summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt12
-rw-r--r--Documentation/devicetree/bindings/clock/hi3660-clock.txt42
-rw-r--r--Documentation/devicetree/bindings/mfd/hi6421.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt1
-rw-r--r--Documentation/devicetree/bindings/of/overlay_mgr.txt32
-rw-r--r--Documentation/devicetree/bindings/pci/kirin-pcie.txt50
-rw-r--r--Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt43
-rw-r--r--Documentation/devicetree/bindings/serial/slave-device.txt36
-rw-r--r--Documentation/devicetree/bindings/sound/hisilicon,hi6210-i2s.txt32
-rw-r--r--Documentation/devicetree/bindings/spi/spi_pl022.txt12
-rw-r--r--Documentation/devicetree/bindings/staging/nanohub.txt35
-rw-r--r--Documentation/devicetree/bindings/thermal/hi3660-thermal.txt16
-rw-r--r--Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt8
-rw-r--r--Documentation/devicetree/bindings/trusty/trusty-fiq.txt8
-rw-r--r--Documentation/devicetree/bindings/trusty/trusty-irq.txt67
-rw-r--r--Documentation/devicetree/bindings/trusty/trusty-smc.txt6
-rw-r--r--Documentation/devicetree/bindings/ufs/hi3660-ufs.txt58
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt5
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/boot/dts/hisilicon/Makefile1
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660-drm.dtsi114
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660-gpu.dtsi33
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts507
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660-ion.dtsi141
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660-sched-energy.dtsi72
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660.dtsi1358
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts275
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-sched-energy.dtsi69
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi99
-rw-r--r--arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi1059
-rw-r--r--arch/arm64/boot/dts/hisilicon/hisi_3660_ipc.dtsi347
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/configs/hikey960_defconfig545
-rw-r--r--arch/arm64/configs/hikey_defconfig575
-rw-r--r--arch/arm64/include/asm/fiq_glue.h26
-rw-r--r--arch/arm64/kernel/cpufeature.c6
-rw-r--r--arch/arm64/kernel/sleep.S4
-rw-r--r--arch/arm64/kernel/smp.c93
-rw-r--r--build.config14
-rw-r--r--build.config.hikey96014
-rw-r--r--build.config.net_test9
-rwxr-xr-xbuild_test.sh21
-rw-r--r--drivers/Kconfig6
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/btwilink.c11
-rw-r--r--drivers/bluetooth/hci_ldisc.c1
-rw-r--r--drivers/bluetooth/hci_ll.c262
-rw-r--r--drivers/bluetooth/hci_serdev.c356
-rw-r--r--drivers/bluetooth/hci_uart.h4
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/clk/hisilicon/Kconfig13
-rw-r--r--drivers/clk/hisilicon/Makefile2
-rw-r--r--drivers/clk/hisilicon/clk-hi3660-stub.c257
-rw-r--r--drivers/clk/hisilicon/clk-hi3660.c637
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c6
-rw-r--r--drivers/clk/hisilicon/clkgate-separated.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpuidle/Kconfig.arm12
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-hisi.c292
-rw-r--r--drivers/devfreq/Kconfig10
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/hisi-ddr-devfreq.c329
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/hisi_asp_dma.c1025
-rw-r--r--drivers/dma/k3dma.c12
-rw-r--r--drivers/gpu/Makefile3
-rw-r--r--drivers/gpu/arm/Kconfig1
-rw-r--r--drivers/gpu/arm/Makefile1
-rw-r--r--drivers/gpu/arm/utgard/.gitignore2
-rw-r--r--drivers/gpu/arm/utgard/Kbuild243
-rw-r--r--drivers/gpu/arm/utgard/Kconfig129
-rw-r--r--drivers/gpu/arm/utgard/Makefile206
-rw-r--r--drivers/gpu/arm/utgard/common/mali_broadcast.c142
-rw-r--r--drivers/gpu/arm/utgard/common/mali_broadcast.h57
-rw-r--r--drivers/gpu/arm/utgard/common/mali_control_timer.c128
-rw-r--r--drivers/gpu/arm/utgard/common/mali_control_timer.h28
-rw-r--r--drivers/gpu/arm/utgard/common/mali_dlbu.c213
-rw-r--r--drivers/gpu/arm/utgard/common/mali_dlbu.h45
-rw-r--r--drivers/gpu/arm/utgard/common/mali_dvfs_policy.c308
-rw-r--r--drivers/gpu/arm/utgard/common/mali_dvfs_policy.h34
-rw-r--r--drivers/gpu/arm/utgard/common/mali_executor.c2693
-rw-r--r--drivers/gpu/arm/utgard/common/mali_executor.h102
-rw-r--r--drivers/gpu/arm/utgard/common/mali_gp.c357
-rw-r--r--drivers/gpu/arm/utgard/common/mali_gp.h127
-rw-r--r--drivers/gpu/arm/utgard/common/mali_gp_job.c302
-rw-r--r--drivers/gpu/arm/utgard/common/mali_gp_job.h324
-rw-r--r--drivers/gpu/arm/utgard/common/mali_group.c1865
-rw-r--r--drivers/gpu/arm/utgard/common/mali_group.h460
-rw-r--r--drivers/gpu/arm/utgard/common/mali_hw_core.c47
-rw-r--r--drivers/gpu/arm/utgard/common/mali_hw_core.h111
-rw-r--r--drivers/gpu/arm/utgard/common/mali_kernel_common.h181
-rw-r--r--drivers/gpu/arm/utgard/common/mali_kernel_core.c1339
-rw-r--r--drivers/gpu/arm/utgard/common/mali_kernel_core.h57
-rw-r--r--drivers/gpu/arm/utgard/common/mali_kernel_utilization.c440
-rw-r--r--drivers/gpu/arm/utgard/common/mali_kernel_utilization.h72
-rw-r--r--drivers/gpu/arm/utgard/common/mali_kernel_vsync.c45
-rw-r--r--drivers/gpu/arm/utgard/common/mali_l2_cache.c534
-rw-r--r--drivers/gpu/arm/utgard/common/mali_l2_cache.h124
-rw-r--r--drivers/gpu/arm/utgard/common/mali_mem_validation.c65
-rw-r--r--drivers/gpu/arm/utgard/common/mali_mem_validation.h19
-rw-r--r--drivers/gpu/arm/utgard/common/mali_mmu.c433
-rw-r--r--drivers/gpu/arm/utgard/common/mali_mmu.h124
-rw-r--r--drivers/gpu/arm/utgard/common/mali_mmu_page_directory.c495
-rw-r--r--drivers/gpu/arm/utgard/common/mali_mmu_page_directory.h110
-rw-r--r--drivers/gpu/arm/utgard/common/mali_osk.h1389
-rw-r--r--drivers/gpu/arm/utgard/common/mali_osk_bitops.h162
-rw-r--r--drivers/gpu/arm/utgard/common/mali_osk_list.h273
-rw-r--r--drivers/gpu/arm/utgard/common/mali_osk_mali.h151
-rw-r--r--drivers/gpu/arm/utgard/common/mali_osk_profiling.h146
-rw-r--r--drivers/gpu/arm/utgard/common/mali_osk_types.h471
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pm.c1362
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pm.h91
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pm_domain.c209
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pm_domain.h104
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pm_metrics.c255
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pm_metrics.h74
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pmu.c270
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pmu.h123
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pp.c502
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pp.h138
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pp_job.c316
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pp_job.h591
-rw-r--r--drivers/gpu/arm/utgard/common/mali_scheduler.c1548
-rw-r--r--drivers/gpu/arm/utgard/common/mali_scheduler.h131
-rw-r--r--drivers/gpu/arm/utgard/common/mali_scheduler_types.h29
-rw-r--r--drivers/gpu/arm/utgard/common/mali_session.c155
-rw-r--r--drivers/gpu/arm/utgard/common/mali_session.h136
-rw-r--r--drivers/gpu/arm/utgard/common/mali_soft_job.c438
-rw-r--r--drivers/gpu/arm/utgard/common/mali_soft_job.h190
-rw-r--r--drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.c77
-rw-r--r--drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.h70
-rw-r--r--drivers/gpu/arm/utgard/common/mali_timeline.c1816
-rw-r--r--drivers/gpu/arm/utgard/common/mali_timeline.h561
-rw-r--r--drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.c213
-rw-r--r--drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.h67
-rw-r--r--drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.c179
-rw-r--r--drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.h51
-rw-r--r--drivers/gpu/arm/utgard/common/mali_ukk.h551
-rw-r--r--drivers/gpu/arm/utgard/common/mali_user_settings_db.c147
-rw-r--r--drivers/gpu/arm/utgard/common/mali_user_settings_db.h39
-rw-r--r--drivers/gpu/arm/utgard/include/linux/mali/mali_utgard.h526
-rw-r--r--drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_ioctl.h90
-rw-r--r--drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_events.h190
-rw-r--r--drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_gator_api.h305
-rw-r--r--drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_uk_types.h1090
-rw-r--r--drivers/gpu/arm/utgard/linux/license/gpl/mali_kernel_license.h30
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_devfreq.c310
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_devfreq.h17
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_device_pause_resume.c36
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_dma_fence.c352
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_dma_fence.h109
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_internal_sync.c813
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_internal_sync.h144
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_kernel_linux.c1134
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_kernel_linux.h36
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.c1410
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.h29
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_linux_trace.h162
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory.c530
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory.h143
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.c362
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.h58
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_cow.c776
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_cow.h48
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.c262
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.h64
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.c369
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.h53
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_external.c91
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_external.h29
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_manager.c993
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_manager.h51
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.c830
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.h54
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_secure.c169
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_secure.h30
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.c942
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.h121
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_types.h219
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_ump.c154
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_ump.h29
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_util.c158
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_util.h20
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_virtual.c127
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_virtual.h35
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_atomics.c59
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_bitmap.c152
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_irq.c200
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_locks.c287
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_locks.h326
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_low_level_mem.c146
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_mali.c491
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_math.c27
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_memory.c61
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_misc.c81
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_notification.c182
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_pm.c83
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_profiling.c1282
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_specific.h72
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_time.c59
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_timers.c76
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_wait_queue.c78
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_wq.c240
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_pmu_power_up_down.c23
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_profiling_events.h17
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_profiling_gator_api.h17
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_profiling_internal.c275
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_profiling_internal.h35
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_sync.c657
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_sync.h169
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_uk_types.h17
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_core.c146
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_gp.c91
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_mem.c333
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_pp.c105
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_profiling.c183
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_soft_job.c90
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_timeline.c88
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_vsync.c39
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_wrappers.h75
-rw-r--r--drivers/gpu/arm/utgard/platform/arm/arm.c623
-rw-r--r--drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.c122
-rw-r--r--drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.h44
-rw-r--r--drivers/gpu/arm/utgard/platform/arm/juno_opp.c127
-rw-r--r--drivers/gpu/arm/utgard/platform/hikey/mali_hikey.c635
-rw-r--r--drivers/gpu/arm/utgard/platform/hikey/mali_hikey_hi6220_registers_gpu.h66
-rw-r--r--drivers/gpu/arm/utgard/readme.txt28
-rw-r--r--drivers/gpu/arm/utgard/regs/mali_200_regs.h131
-rw-r--r--drivers/gpu/arm/utgard/regs/mali_gp_regs.h172
-rw-r--r--drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.c13
-rw-r--r--drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.h48
-rw-r--r--drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.c13
-rw-r--r--drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.h26
-rw-r--r--drivers/gpu/arm_gpu/Kbuild224
-rw-r--r--drivers/gpu/arm_gpu/Kconfig263
-rw-r--r--drivers/gpu/arm_gpu/Makefile42
-rw-r--r--drivers/gpu/arm_gpu/Makefile.kbase17
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/Kbuild60
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_backend_config.h29
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_cache_policy_backend.c29
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_cache_policy_backend.h34
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_debug_job_fault_backend.c157
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_devfreq.c413
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_devfreq.h24
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_device_hw.c255
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_device_internal.h67
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_gpu.c123
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_gpuprops_backend.c110
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_instr_backend.c492
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_instr_defs.h58
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_instr_internal.h45
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_irq_internal.h39
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_irq_linux.c469
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_as.c235
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_defs.h123
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_hw.c1514
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_internal.h164
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_rb.c1947
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_rb.h76
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_affinity.c303
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_affinity.h129
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_backend.c356
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_internal.h69
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_mmu_hw_direct.c401
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_mmu_hw_direct.h42
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_always_on.c63
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_always_on.h77
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_backend.c478
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca.c182
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca.h92
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_devfreq.c129
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_devfreq.h55
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_fixed.c65
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_fixed.h40
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_coarse_demand.c70
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_coarse_demand.h64
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_defs.h519
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_demand.c73
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_demand.h64
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_driver.c1671
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_internal.h548
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_metrics.c401
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_policy.c973
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_policy.h227
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_time.c103
-rw-r--r--drivers/gpu/arm_gpu/backend/gpu/mali_kbase_time.h52
-rw-r--r--drivers/gpu/arm_gpu/docs/Doxyfile126
-rw-r--r--drivers/gpu/arm_gpu/docs/policy_operation_diagram.dot112
-rw-r--r--drivers/gpu/arm_gpu/docs/policy_overview.dot63
-rw-r--r--drivers/gpu/arm_gpu/ipa/Kbuild24
-rw-r--r--drivers/gpu/arm_gpu/ipa/mali_kbase_ipa.c585
-rw-r--r--drivers/gpu/arm_gpu/ipa/mali_kbase_ipa.h165
-rw-r--r--drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_debugfs.c219
-rw-r--r--drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_debugfs.h49
-rw-r--r--drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_simple.c325
-rw-r--r--drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_simple.h40
-rw-r--r--drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_vinstr_common.c217
-rw-r--r--drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_vinstr_common.h161
-rw-r--r--drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_vinstr_g71.c128
-rw-r--r--drivers/gpu/arm_gpu/mali_base_hwconfig_features.h368
-rw-r--r--drivers/gpu/arm_gpu/mali_base_hwconfig_issues.h1134
-rw-r--r--drivers/gpu/arm_gpu/mali_base_kernel.h1818
-rw-r--r--drivers/gpu/arm_gpu/mali_base_mem_priv.h52
-rw-r--r--drivers/gpu/arm_gpu/mali_base_vendor_specific_func.h24
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase.h613
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_10969_workaround.c210
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_10969_workaround.h23
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_as_fault_debugfs.c102
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_as_fault_debugfs.h45
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_cache_policy.c54
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_cache_policy.h45
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_config.c51
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_config.h345
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_config_defaults.h226
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_context.c362
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_context.h90
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_core_linux.c4875
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_ctx_sched.c203
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_ctx_sched.h131
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_debug.c39
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_debug.h164
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_debug_job_fault.c499
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_debug_job_fault.h96
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_debug_mem_view.c306
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_debug_mem_view.h25
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_defs.h1625
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_device.c674
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_disjoint_events.c76
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_dma_fence.c449
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_dma_fence.h131
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_event.c259
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_fence.c196
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_fence.h266
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_fence_defs.h51
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_gator.h45
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_gator_api.c334
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_gator_api.h219
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names.h2170
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names_thex.h291
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names_tmix.h291
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names_tsix.h291
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_gpu_id.h129
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_gpu_memory_debugfs.c97
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_gpu_memory_debugfs.h37
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_gpuprops.c514
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_gpuprops.h84
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_gpuprops_types.h92
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_hw.c492
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_hw.h65
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_hwaccess_backend.h54
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_hwaccess_defs.h36
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_hwaccess_gpuprops.h47
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_hwaccess_instr.h116
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_hwaccess_jm.h381
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_hwaccess_pm.h209
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_hwaccess_time.h53
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_hwcnt_reader.h66
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_ioctl.h658
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_jd.c1847
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_jd_debugfs.c235
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_jd_debugfs.h39
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_jm.c131
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_jm.h110
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_js.c2819
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_js.h925
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_js_ctx_attr.c301
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_js_ctx_attr.h158
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_js_defs.h386
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_linux.h43
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mem.c2869
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mem.h1138
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mem_linux.c2670
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mem_linux.h240
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mem_lowlevel.h89
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mem_pool.c651
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mem_pool_debugfs.c81
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mem_pool_debugfs.h36
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mem_profile_debugfs.c121
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mem_profile_debugfs.h59
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mem_profile_debugfs_buf_size.h33
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mmu.c2141
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mmu_hw.h123
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mmu_mode.h49
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mmu_mode_aarch64.c212
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_mmu_mode_lpae.c200
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_platform_fake.c124
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_pm.c205
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_pm.h171
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_profiling_gator_api.h40
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_regs_history_debugfs.c130
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_regs_history_debugfs.h50
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_replay.c1166
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_smc.c74
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_smc.h67
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_softjobs.c1512
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_strings.c23
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_strings.h19
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_sync.h203
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_sync_android.c537
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_sync_common.c43
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_sync_file.c339
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_tlstream.c2572
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_tlstream.h623
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_trace_defs.h264
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_trace_timeline.c236
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_trace_timeline.h363
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_trace_timeline_defs.h140
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_uku.h532
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_utility.c33
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_utility.h37
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_vinstr.c2076
-rw-r--r--drivers/gpu/arm_gpu/mali_kbase_vinstr.h155
-rw-r--r--drivers/gpu/arm_gpu/mali_linux_kbase_trace.h201
-rw-r--r--drivers/gpu/arm_gpu/mali_linux_trace.h189
-rw-r--r--drivers/gpu/arm_gpu/mali_malisw.h131
-rw-r--r--drivers/gpu/arm_gpu/mali_midg_coherency.h26
-rw-r--r--drivers/gpu/arm_gpu/mali_midg_regmap.h611
-rw-r--r--drivers/gpu/arm_gpu/mali_timeline.h396
-rw-r--r--drivers/gpu/arm_gpu/mali_uk.h141
-rw-r--r--drivers/gpu/arm_gpu/platform/Kconfig25
-rw-r--r--drivers/gpu/arm_gpu/platform/devicetree/Kbuild18
-rw-r--r--drivers/gpu/arm_gpu/platform/devicetree/mali_kbase_config_devicetree.c31
-rw-r--r--drivers/gpu/arm_gpu/platform/devicetree/mali_kbase_config_platform.h80
-rw-r--r--drivers/gpu/arm_gpu/platform/devicetree/mali_kbase_runtime_pm.c122
-rw-r--r--drivers/gpu/arm_gpu/platform/hisilicon/Kbuild15
-rw-r--r--drivers/gpu/arm_gpu/platform/hisilicon/Kconfig41
-rw-r--r--drivers/gpu/arm_gpu/platform/hisilicon/mali_kbase_config_hifeatures.h70
-rw-r--r--drivers/gpu/arm_gpu/platform/hisilicon/mali_kbase_config_hisilicon.c512
-rw-r--r--drivers/gpu/arm_gpu/platform/hisilicon/mali_kbase_config_platform.h104
-rw-r--r--drivers/gpu/arm_gpu/platform/mali_kbase_platform_common.h26
-rw-r--r--drivers/gpu/arm_gpu/platform/mali_kbase_platform_fake.h38
-rw-r--r--drivers/gpu/arm_gpu/platform/vexpress/Kbuild18
-rw-r--r--drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_config_platform.h75
-rw-r--r--drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_config_vexpress.c85
-rw-r--r--drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_cpu_vexpress.c279
-rw-r--r--drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_cpu_vexpress.h38
-rw-r--r--drivers/gpu/arm_gpu/platform/vexpress_1xv7_a57/Kbuild16
-rw-r--r--drivers/gpu/arm_gpu/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h73
-rw-r--r--drivers/gpu/arm_gpu/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c79
-rw-r--r--drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/Kbuild18
-rw-r--r--drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h75
-rw-r--r--drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c83
-rw-r--r--drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c71
-rw-r--r--drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.h28
-rw-r--r--drivers/gpu/arm_gpu/platform_dummy/mali_ukk_os.h53
-rw-r--r--drivers/gpu/arm_gpu/protected_mode_switcher.h64
-rw-r--r--drivers/gpu/arm_gpu/sconscript92
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h16
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c213
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c47
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c1
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c7
-rw-r--r--drivers/gpu/drm/hisilicon/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/Makefile1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/Kconfig39
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/Makefile12
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/dw_drm_dsi.c1649
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/dw_dsi_reg.h145
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/kirin_dpe_reg.h3115
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/kirin_drm_dpe_utils.c730
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/kirin_drm_dpe_utils.h58
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/kirin_drm_drv.c379
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/kirin_drm_drv.h61
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/kirin_drm_dss.c696
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/kirin_drm_overlay_utils.c1241
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/kirin_fb.c94
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/kirin_fbdev.c472
-rw-r--r--drivers/gpu/drm/hisilicon/kirin960/panel/panel-hikey960-nte300nts.c402
-rw-r--r--drivers/hisi/Kconfig17
-rw-r--r--drivers/hisi/Makefile4
-rw-r--r--drivers/hisi/ap/platform/hi3660/global_ddr_map.h62
-rw-r--r--drivers/hisi/ap/platform/hi3660/mntn_public_interface.h411
-rw-r--r--drivers/hisi/ap/platform/hi3660/soc_acpu_baseaddr_interface.h324
-rw-r--r--drivers/hisi/hifi_dsp/Kconfig8
-rw-r--r--drivers/hisi/hifi_dsp/Makefile35
-rw-r--r--drivers/hisi/hifi_dsp/audio_hifi.h263
-rw-r--r--drivers/hisi/hifi_dsp/hifi_lpp.c1699
-rw-r--r--drivers/hisi/hifi_dsp/hifi_lpp.h351
-rw-r--r--drivers/hisi/hifi_dsp/hifi_om.c1741
-rw-r--r--drivers/hisi/hifi_dsp/hifi_om.h405
-rw-r--r--drivers/hisi/hifi_dsp/memcpy_opt.S21
-rw-r--r--drivers/hisi/hifi_mailbox/Kconfig3
-rw-r--r--drivers/hisi/hifi_mailbox/Makefile3
-rw-r--r--drivers/hisi/hifi_mailbox/ipcm/Kconfig23
-rw-r--r--drivers/hisi/hifi_mailbox/ipcm/Makefile5
-rw-r--r--drivers/hisi/hifi_mailbox/ipcm/bsp_drv_ipc.h125
-rw-r--r--drivers/hisi/hifi_mailbox/ipcm/bsp_ipc.c462
-rw-r--r--drivers/hisi/hifi_mailbox/ipcm/drv_comm.h188
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/Kconfig8
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/Makefile17
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox.c28
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_cfg.h646
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_debug.c443
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_debug.h213
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_gut.c981
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_gut.h170
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_ifc.c385
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_ifc.h296
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_msg.c147
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_msg.h48
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_platform.h118
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_port_linux.c630
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_port_linux.h65
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_stub.h74
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_table.c22
-rw-r--r--drivers/hisi/hifi_mailbox/mailbox/mdrv_ipc_enum.h413
-rw-r--r--drivers/hisi/mailbox/Kconfig7
-rw-r--r--drivers/hisi/mailbox/Makefile1
-rw-r--r--drivers/hisi/mailbox/hisi_mailbox/Kconfig44
-rw-r--r--drivers/hisi/mailbox/hisi_mailbox/Makefile6
-rw-r--r--drivers/hisi/mailbox/hisi_mailbox/hisi_mailbox.c1012
-rw-r--r--drivers/hisi/mailbox/hisi_mailbox/hisi_mailbox_dev.c1304
-rw-r--r--drivers/hisi/mailbox/hisi_mailbox/hisi_rproc.c410
-rw-r--r--drivers/hisi/mailbox/hisi_mailbox/hisi_rproc_test.c775
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c28
-rw-r--r--drivers/iommu/Kconfig18
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/hisi_smmu.h178
-rw-r--r--drivers/iommu/hisi_smmu_lpae.c849
-rw-r--r--drivers/iommu/hisilicon/Kconfig40
-rw-r--r--drivers/iommu/hisilicon/Makefile5
-rw-r--r--drivers/iommu/hisilicon/hisi_smmu_test.c387
-rw-r--r--drivers/iommu/ion-iommu-map.c365
-rw-r--r--drivers/irqchip/irq-gic.c8
-rw-r--r--drivers/mailbox/Kconfig6
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/hi3660-mailbox.c688
-rw-r--r--drivers/mfd/hi6421-pmic-core.c89
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/microchip/Kconfig7
-rw-r--r--drivers/misc/microchip/Makefile4
-rw-r--r--drivers/misc/microchip/hub/Kconfig6
-rw-r--r--drivers/misc/microchip/hub/Makefile1
-rw-r--r--drivers/misc/microchip/hub/hub_usb5734.c300
-rw-r--r--drivers/misc/ti-st/Kconfig8
-rw-r--r--drivers/misc/ti-st/Makefile1
-rw-r--r--drivers/misc/ti-st/st_kim.c94
-rw-r--r--drivers/misc/ti-st/st_ll.c17
-rw-r--r--drivers/misc/ti-st/tty_hci.c543
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c310
-rw-r--r--drivers/mmc/host/dw_mmc.c12
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/of/Kconfig10
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/overlay_mgr.c152
-rw-r--r--drivers/pci/host/Kconfig9
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pcie-kirin.c442
-rw-r--r--drivers/pci/host/pcie-kirin.h71
-rw-r--r--drivers/platform/Kconfig2
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/hisi/Kconfig6
-rw-r--r--drivers/platform/hisi/Makefile2
-rw-r--r--drivers/platform/hisi/hi6220_fiq_debugger.c312
-rw-r--r--drivers/power/reset/hisi-reboot.c55
-rw-r--r--drivers/regulator/Kconfig10
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/hi6421-regulator.c7
-rw-r--r--drivers/regulator/hi6421v530-regulator.c214
-rw-r--r--drivers/reset/hisilicon/Kconfig7
-rw-r--r--drivers/reset/hisilicon/Makefile1
-rw-r--r--drivers/reset/hisilicon/reset-hi3660.c126
-rw-r--r--drivers/scsi/ufs/Kconfig8
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufs-hi3660.c715
-rw-r--r--drivers/scsi/ufs/ufs-hi3660.h170
-rw-r--r--drivers/scsi/ufs/ufshci.h4
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/ion/Kconfig26
-rw-r--r--drivers/staging/android/ion/Makefile10
-rw-r--r--drivers/staging/android/ion/compat_ion.c137
-rw-r--r--drivers/staging/android/ion/devicetree.txt51
-rw-r--r--drivers/staging/android/ion/hisi/Makefile7
-rw-r--r--drivers/staging/android/ion/hisi/hisi_cpudraw_alloc.c341
-rw-r--r--drivers/staging/android/ion/hisi/hisi_cpudraw_alloc.h38
-rw-r--r--drivers/staging/android/ion/hisi/hisi_ion_dump.c69
-rw-r--r--drivers/staging/android/ion/hisi/hisi_ion_smart_pool.c346
-rw-r--r--drivers/staging/android/ion/hisi/hisi_ion_smart_pool.h42
-rw-r--r--drivers/staging/android/ion/hisi/of_hisi_ion.c413
-rw-r--r--drivers/staging/android/ion/hisilicon/Kconfig5
-rw-r--r--drivers/staging/android/ion/hisilicon/Makefile3
-rw-r--r--drivers/staging/android/ion/hisilicon/hi6220_ion.c113
-rw-r--r--drivers/staging/android/ion/hisilicon/hisi_ion.c266
-rw-r--r--drivers/staging/android/ion/ion-ioctl.c177
-rw-r--r--drivers/staging/android/ion/ion.c703
-rw-r--r--drivers/staging/android/ion/ion.h78
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c51
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c33
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c38
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c12
-rw-r--r--drivers/staging/android/ion/ion_heap.c38
-rw-r--r--drivers/staging/android/ion/ion_of.c185
-rw-r--r--drivers/staging/android/ion/ion_of.h37
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c24
-rw-r--r--drivers/staging/android/ion/ion_priv.h184
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c245
-rw-r--r--drivers/staging/android/ion/ion_test.c29
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c7
-rw-r--r--drivers/staging/android/uapi/ion.h105
-rw-r--r--drivers/staging/nanohub/Kconfig22
-rw-r--r--drivers/staging/nanohub/Makefile7
-rw-r--r--drivers/staging/nanohub/bl.c500
-rw-r--r--drivers/staging/nanohub/bl.h95
-rw-r--r--drivers/staging/nanohub/comms.c582
-rw-r--r--drivers/staging/nanohub/comms.h129
-rw-r--r--drivers/staging/nanohub/main.c1793
-rw-r--r--drivers/staging/nanohub/main.h150
-rw-r--r--drivers/staging/nanohub/spi.c549
-rw-r--r--drivers/staging/nanohub/spi.h21
-rw-r--r--drivers/thermal/Kconfig7
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/hi3660_thermal.c198
-rw-r--r--drivers/trusty/Kconfig52
-rw-r--r--drivers/trusty/Makefile13
-rw-r--r--drivers/trusty/trusty-fiq-arm.c42
-rw-r--r--drivers/trusty/trusty-fiq-arm64-glue.S59
-rw-r--r--drivers/trusty/trusty-fiq-arm64.c172
-rw-r--r--drivers/trusty/trusty-fiq.c85
-rw-r--r--drivers/trusty/trusty-fiq.h16
-rw-r--r--drivers/trusty/trusty-ipc.c1672
-rw-r--r--drivers/trusty/trusty-irq.c599
-rw-r--r--drivers/trusty/trusty-log.c274
-rw-r--r--drivers/trusty/trusty-log.h22
-rw-r--r--drivers/trusty/trusty-mem.c134
-rw-r--r--drivers/trusty/trusty-virtio.c733
-rw-r--r--drivers/trusty/trusty.c575
-rw-r--r--drivers/tty/Makefile1
-rw-r--r--drivers/tty/serdev/Kconfig16
-rw-r--r--drivers/tty/serdev/Makefile5
-rw-r--r--drivers/tty/serdev/core.c492
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c265
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/tty_buffer.c19
-rw-r--r--drivers/tty/tty_io.c52
-rw-r--r--drivers/tty/tty_port.c65
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile5
-rw-r--r--drivers/usb/core/buffer.c12
-rw-r--r--drivers/usb/core/hcd.c80
-rw-r--r--drivers/usb/core/usb.c18
-rw-r--r--drivers/usb/dwc2/core.c6
-rw-r--r--drivers/usb/dwc2/core.h32
-rw-r--r--drivers/usb/dwc2/core_intr.c22
-rw-r--r--drivers/usb/dwc2/gadget.c17
-rw-r--r--drivers/usb/dwc2/hcd.c102
-rw-r--r--drivers/usb/dwc2/platform.c48
-rw-r--r--drivers/usb/dwc3/Kconfig22
-rw-r--r--drivers/usb/dwc3/Makefile15
-rw-r--r--drivers/usb/dwc3/core.c533
-rw-r--r--drivers/usb/dwc3/core.h328
-rw-r--r--drivers/usb/dwc3/debug.c32
-rw-r--r--drivers/usb/dwc3/debug.h69
-rw-r--r--drivers/usb/dwc3/debugfs.c105
-rw-r--r--drivers/usb/dwc3/drd.c85
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c37
-rw-r--r--drivers/usb/dwc3/dwc3-hi3660.c310
-rw-r--r--drivers/usb/dwc3/dwc3-hisi.c1972
-rw-r--r--drivers/usb/dwc3/dwc3-hisi.h293
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c4
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c50
-rw-r--r--drivers/usb/dwc3/dwc3-otg.c362
-rw-r--r--drivers/usb/dwc3/dwc3-otg.h133
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c131
-rw-r--r--drivers/usb/dwc3/dwc3-st.c1
-rw-r--r--drivers/usb/dwc3/ep0.c573
-rw-r--r--drivers/usb/dwc3/gadget.c991
-rw-r--r--drivers/usb/dwc3/gadget.h25
-rw-r--r--drivers/usb/dwc3/host.c101
-rw-r--r--drivers/usb/dwc3/io.h20
-rw-r--r--drivers/usb/dwc3/trace.h167
-rw-r--r--drivers/usb/host/xhci-mem.c12
-rw-r--r--drivers/usb/host/xhci-plat.c35
-rw-r--r--drivers/usb/host/xhci.c15
-rw-r--r--drivers/usb/pd/Kconfig1
-rw-r--r--drivers/usb/pd/Makefile2
-rw-r--r--drivers/usb/pd/hisi_pd.c602
-rw-r--r--drivers/usb/pd/richtek/Kconfig48
-rw-r--r--drivers/usb/pd/richtek/Makefile12
-rw-r--r--drivers/usb/pd/richtek/pd_core.c708
-rw-r--r--drivers/usb/pd/richtek/pd_dpm_core.c1450
-rw-r--r--drivers/usb/pd/richtek/pd_dpm_prv.h333
-rw-r--r--drivers/usb/pd/richtek/pd_policy_engine.c782
-rw-r--r--drivers/usb/pd/richtek/pd_policy_engine_dbg.c48
-rw-r--r--drivers/usb/pd/richtek/pd_policy_engine_dfp.c183
-rw-r--r--drivers/usb/pd/richtek/pd_policy_engine_dr.c62
-rw-r--r--drivers/usb/pd/richtek/pd_policy_engine_drs.c96
-rw-r--r--drivers/usb/pd/richtek/pd_policy_engine_prs.c154
-rw-r--r--drivers/usb/pd/richtek/pd_policy_engine_snk.c205
-rw-r--r--drivers/usb/pd/richtek/pd_policy_engine_src.c252
-rw-r--r--drivers/usb/pd/richtek/pd_policy_engine_ufp.c144
-rw-r--r--drivers/usb/pd/richtek/pd_policy_engine_vcs.c76
-rw-r--r--drivers/usb/pd/richtek/pd_process_evt.c883
-rw-r--r--drivers/usb/pd/richtek/pd_process_evt_dbg.c49
-rw-r--r--drivers/usb/pd/richtek/pd_process_evt_drs.c172
-rw-r--r--drivers/usb/pd/richtek/pd_process_evt_prs.c259
-rw-r--r--drivers/usb/pd/richtek/pd_process_evt_snk.c514
-rw-r--r--drivers/usb/pd/richtek/pd_process_evt_src.c579
-rw-r--r--drivers/usb/pd/richtek/pd_process_evt_vcs.c189
-rw-r--r--drivers/usb/pd/richtek/pd_process_evt_vdm.c587
-rw-r--r--drivers/usb/pd/richtek/rt-regmap.c2129
-rw-r--r--drivers/usb/pd/richtek/tcpc_rt1711h.c1417
-rw-r--r--drivers/usb/pd/richtek/tcpci_alert.c374
-rw-r--r--drivers/usb/pd/richtek/tcpci_core.c634
-rw-r--r--drivers/usb/pd/richtek/tcpci_event.c800
-rw-r--r--drivers/usb/pd/richtek/tcpci_timer.c957
-rw-r--r--drivers/usb/pd/richtek/tcpci_typec.c1656
-rw-r--r--drivers/usb/pd/richtek/tcpm.c376
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--include/dsm/dsm_pub.h561
-rw-r--r--include/dt-bindings/clock/hi3660-clock.h220
-rw-r--r--include/dt-bindings/pinctrl/hisi.h15
-rw-r--r--include/linux/hisi/hisi-iommu.h13
-rw-r--r--include/linux/hisi/hisi_ion.h178
-rw-r--r--include/linux/hisi/hisi_irq_affinity.h17
-rw-r--r--include/linux/hisi/hisi_mailbox.h240
-rw-r--r--include/linux/hisi/hisi_rproc.h96
-rw-r--r--include/linux/hisi/ion-iommu.h79
-rw-r--r--include/linux/hisi/ipc_msg.h105
-rw-r--r--include/linux/hisi/log/hisi_log.h143
-rw-r--r--include/linux/hisi/rdr_pub.h303
-rw-r--r--include/linux/hisi/rdr_types.h21
-rw-r--r--include/linux/hisi/usb/hisi_pd_dev.h193
-rw-r--r--include/linux/hisi/usb/hisi_usb.h57
-rw-r--r--include/linux/hisi/usb/hub/hisi_hub.h24
-rw-r--r--include/linux/hisi/usb/pd/richtek/pd_core.h1218
-rw-r--r--include/linux/hisi/usb/pd/richtek/pd_dpm_core.h119
-rw-r--r--include/linux/hisi/usb/pd/richtek/pd_policy_engine.h421
-rw-r--r--include/linux/hisi/usb/pd/richtek/pd_process_evt.h178
-rw-r--r--include/linux/hisi/usb/pd/richtek/rt-regmap.h296
-rw-r--r--include/linux/hisi/usb/pd/richtek/rt1711h.h158
-rw-r--r--include/linux/hisi/usb/pd/richtek/std_tcpci_v10.h183
-rw-r--r--include/linux/hisi/usb/pd/richtek/tcpci.h403
-rw-r--r--include/linux/hisi/usb/pd/richtek/tcpci_config.h80
-rw-r--r--include/linux/hisi/usb/pd/richtek/tcpci_core.h359
-rw-r--r--include/linux/hisi/usb/pd/richtek/tcpci_event.h202
-rw-r--r--include/linux/hisi/usb/pd/richtek/tcpci_timer.h99
-rw-r--r--include/linux/hisi/usb/pd/richtek/tcpci_typec.h52
-rw-r--r--include/linux/hisi/usb/pd/richtek/tcpm.h332
-rw-r--r--include/linux/hisi_ion.h178
-rw-r--r--include/linux/iommu.h43
-rw-r--r--include/linux/ion.h5
-rw-r--r--include/linux/mfd/hi6421-pmic.h5
-rw-r--r--include/linux/platform_data/nanohub.h26
-rw-r--r--include/linux/serdev.h291
-rw-r--r--include/linux/ti_wilink_st.h1
-rw-r--r--include/linux/trusty/sm_err.h43
-rw-r--r--include/linux/trusty/smcall.h138
-rw-r--r--include/linux/trusty/trusty.h88
-rw-r--r--include/linux/trusty/trusty_ipc.h88
-rw-r--r--include/linux/tty.h12
-rw-r--r--include/linux/usb.h1
-rw-r--r--include/linux/usb/hcd.h3
-rw-r--r--include/net/bluetooth/hci_core.h5
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--net/bluetooth/Kconfig11
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/hci_core.c10
-rw-r--r--net/bluetooth/led.c70
-rw-r--r--net/bluetooth/led.h37
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/Makefile1
-rw-r--r--sound/soc/hisilicon/Kconfig11
-rw-r--r--sound/soc/hisilicon/Makefile2
-rw-r--r--sound/soc/hisilicon/hi6210-i2s.c628
-rw-r--r--sound/soc/hisilicon/hi6210-i2s.h276
-rw-r--r--sound/soc/hisilicon/hisi-i2s.c435
-rw-r--r--sound/soc/hisilicon/hisi-i2s.h109
777 files changed, 201276 insertions, 2602 deletions
diff --git a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
index 3f81575aa6be..4a14b319f87d 100644
--- a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
+++ b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
@@ -1,5 +1,13 @@
Hisilicon Platforms Device Tree Bindings
----------------------------------------------------
+Hi3660 SoC
+Required root node properties:
+ - compatible = "hisilicon,hi3660";
+
+HiKey960 Board
+Required root node properties:
+ - compatible = "hisilicon,hi3660-hikey960", "hisilicon,hi3660";
+
Hi4511 Board
Required root node properties:
- compatible = "hisilicon,hi3620-hi4511";
@@ -8,6 +16,10 @@ Hi6220 SoC
Required root node properties:
- compatible = "hisilicon,hi6220";
+Hi3660 SoC
+Required root node properties:
+ - compatible = "hisilicon,hi3660";
+
HiKey Board
Required root node properties:
- compatible = "hisilicon,hi6220-hikey", "hisilicon,hi6220";
diff --git a/Documentation/devicetree/bindings/clock/hi3660-clock.txt b/Documentation/devicetree/bindings/clock/hi3660-clock.txt
new file mode 100644
index 000000000000..cc9b86c35758
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/hi3660-clock.txt
@@ -0,0 +1,42 @@
+* Hisilicon Hi3660 Clock Controller
+
+The Hi3660 clock controller generates and supplies clock to various
+controllers within the Hi3660 SoC.
+
+Required Properties:
+
+- compatible: the compatible should be one of the following strings to
+ indicate the clock controller functionality.
+
+ - "hisilicon,hi3660-crgctrl"
+ - "hisilicon,hi3660-pctrl"
+ - "hisilicon,hi3660-pmuctrl"
+ - "hisilicon,hi3660-sctrl"
+ - "hisilicon,hi3660-iomcu"
+
+- reg: physical base address of the controller and length of memory mapped
+ region.
+
+- #clock-cells: should be 1.
+
+Each clock is assigned an identifier and client nodes use this identifier
+to specify the clock which they consume.
+
+All these identifier could be found in <dt-bindings/clock/hi3660-clock.h>.
+
+Examples:
+ crg_ctrl: clock-controller@fff35000 {
+ compatible = "hisilicon,hi3660-crgctrl", "syscon";
+ reg = <0x0 0xfff35000 0x0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ uart0: serial@fdf02000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0xfdf02000 0x0 0x1000>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_MUX_UART0>,
+ <&crg_ctrl HI3660_PCLK>;
+ clock-names = "uartclk", "apb_pclk";
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/mfd/hi6421.txt b/Documentation/devicetree/bindings/mfd/hi6421.txt
index 0d5a4466a494..22da96d344a7 100644
--- a/Documentation/devicetree/bindings/mfd/hi6421.txt
+++ b/Documentation/devicetree/bindings/mfd/hi6421.txt
@@ -1,7 +1,9 @@
* HI6421 Multi-Functional Device (MFD), by HiSilicon Ltd.
Required parent device properties:
-- compatible : contains "hisilicon,hi6421-pmic";
+- compatible : One of the following chip-specific strings:
+ "hisilicon,hi6421-pmic";
+ "hisilicon,hi6421v530-pmic";
- reg : register range space of hi6421;
Supported Hi6421 sub-devices include:
diff --git a/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
index df370585cbcc..8af1afcb86dc 100644
--- a/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
@@ -12,6 +12,7 @@ extensions to the Synopsys Designware Mobile Storage Host Controller.
Required Properties:
* compatible: should be one of the following.
+ - "hisilicon,hi3660-dw-mshc": for controllers with hi3660 specific extensions.
- "hisilicon,hi4511-dw-mshc": for controllers with hi4511 specific extensions.
- "hisilicon,hi6220-dw-mshc": for controllers with hi6220 specific extensions.
diff --git a/Documentation/devicetree/bindings/of/overlay_mgr.txt b/Documentation/devicetree/bindings/of/overlay_mgr.txt
new file mode 100644
index 000000000000..5f3ce4c6d481
--- /dev/null
+++ b/Documentation/devicetree/bindings/of/overlay_mgr.txt
@@ -0,0 +1,32 @@
+overlay_mgr
+
+Required properties:
+- compatible: "linux,overlay_manager";
+
+Optional properties:
+- starts from the word "hardware": hardware_cfg_0
+
+These properties can be chosen from kernel command line:
+overlay_mgr.overlay_dt_entry=hardware_cfg_0
+DT contains main overlay_mng entry with all possible
+HW config setups. And then kernel command line option
+will allow to choose between them.
+
+Example:
+ overlay_mgr {
+ compatible = "linux,overlay_manager";
+ hardware_cfg_0 {
+ overlay@0 {
+ fragment@0 {
+ __overlay__ {
+ };
+ };
+ };
+ overlay@1 {
+ fragment@0 {
+ __overlay__ {
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/kirin-pcie.txt b/Documentation/devicetree/bindings/pci/kirin-pcie.txt
new file mode 100644
index 000000000000..68ffa0fbcd73
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/kirin-pcie.txt
@@ -0,0 +1,50 @@
+HiSilicon Kirin SoCs PCIe host DT description
+
+Kirin PCIe host controller is based on Designware PCI core.
+It shares common functions with PCIe Designware core driver
+and inherits common properties defined in
+Documentation/devicetree/bindings/pci/designware-pci.txt.
+
+Additional properties are described here:
+
+Required properties
+- compatible:
+ "hisilicon,kirin960-pcie" for PCIe of Kirin960 SoC
+- reg: Should contain rc_dbi, apb, phy, config registers location and length.
+- reg-names: Must include the following entries:
+ "dbi": controller configuration registers;
+ "apb": apb Ctrl register defined by Kirin;
+ "phy": apb PHY register defined by Kirin;
+ "config": PCIe configuration space registers.
+- reset-gpios: The gpio to generate PCIe perst assert and deassert signal.
+
+Optional properties:
+
+Example based on kirin960:
+
+ pcie@f4000000 {
+ compatible = "hisilicon,kirin-pcie";
+ reg = <0x0 0xf4000000 0x0 0x1000>, <0x0 0xff3fe000 0x0 0x1000>,
+ <0x0 0xf3f20000 0x0 0x40000>, <0x0 0xF4000000 0 0x2000>;
+ reg-names = "dbi","apb","phy", "config";
+ bus-range = <0x0 0x1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ ranges = <0x02000000 0x0 0x00000000 0x0 0xf5000000 0x0 0x2000000>;
+ num-lanes = <1>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <0x0 0 0 1 &gic 0 0 0 282 4>,
+ <0x0 0 0 2 &gic 0 0 0 283 4>,
+ <0x0 0 0 3 &gic 0 0 0 284 4>,
+ <0x0 0 0 4 &gic 0 0 0 285 4>;
+ clocks = <&crg_ctrl HI3660_PCIEPHY_REF>,
+ <&crg_ctrl HI3660_CLK_GATE_PCIEAUX>,
+ <&crg_ctrl HI3660_PCLK_GATE_PCIE_PHY>,
+ <&crg_ctrl HI3660_PCLK_GATE_PCIE_SYS>,
+ <&crg_ctrl HI3660_ACLK_GATE_PCIE>;
+ clock-names = "pcie_phy_ref", "pcie_aux",
+ "pcie_apb_phy", "pcie_apb_sys", "pcie_aclk";
+ reset-gpios = <&gpio11 1 0 >;
+ };
diff --git a/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt b/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt
new file mode 100644
index 000000000000..2bf3344b2a02
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt
@@ -0,0 +1,43 @@
+Hisilicon System Reset Controller
+======================================
+
+Please also refer to reset.txt in this directory for common reset
+controller binding usage.
+
+The reset controller registers are part of the system-ctl block on
+hi3660 SoC.
+
+Required properties:
+- compatible: should be
+ "hisilicon,hi3660-reset"
+- hisi,rst-syscon: phandle of the reset's syscon.
+- #reset-cells : Specifies the number of cells needed to encode a
+ reset source. The type shall be a <u32> and the value shall be 2.
+
+ Cell #1 : offset of the reset assert control
+ register from the syscon register base
+ offset + 4: deassert control register
+ offset + 8: status control register
+ Cell #2 : bit position of the reset in the reset control register
+
+Example:
+ iomcu: iomcu@ffd7e000 {
+ compatible = "hisilicon,hi3660-iomcu", "syscon";
+ reg = <0x0 0xffd7e000 0x0 0x1000>;
+ };
+
+ iomcu_rst: iomcu_rst_controller {
+ compatible = "hisilicon,hi3660-reset";
+ hisi,rst-syscon = <&iomcu>;
+ #reset-cells = <2>;
+ };
+
+Specifying reset lines connected to IP modules
+==============================================
+example:
+
+ i2c0: i2c@..... {
+ ...
+ resets = <&iomcu_rst 0x20 3>; /* offset: 0x20; bit: 3 */
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/serial/slave-device.txt b/Documentation/devicetree/bindings/serial/slave-device.txt
new file mode 100644
index 000000000000..f66037928f5f
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/slave-device.txt
@@ -0,0 +1,36 @@
+Serial Slave Device DT binding
+
+This documents the binding structure and common properties for serial
+attached devices. Common examples include Bluetooth, WiFi, NFC and GPS
+devices.
+
+Serial attached devices shall be a child node of the host UART device the
+slave device is attached to. It is expected that the attached device is
+the only child node of the UART device. The slave device node name shall
+reflect the generic type of device for the node.
+
+Required Properties:
+
+- compatible : A string reflecting the vendor and specific device the node
+ represents.
+
+Optional Properties:
+
+- max-speed : The maximum baud rate the device operates at. This should
+ only be present if the maximum is less than the slave device
+ can support. For example, a particular board has some signal
+ quality issue or the host processor can't support higher
+ baud rates.
+
+Example:
+
+serial@1234 {
+ compatible = "ns16550a";
+ interrupts = <1>;
+
+ bluetooth {
+ compatible = "brcm,bcm43341-bt";
+ interrupt-parent = <&gpio>;
+ interrupts = <10>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/hisilicon,hi6210-i2s.txt b/Documentation/devicetree/bindings/sound/hisilicon,hi6210-i2s.txt
new file mode 100644
index 000000000000..4be451cf4716
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/hisilicon,hi6210-i2s.txt
@@ -0,0 +1,32 @@
+* Hisilicon 6210 i2s controller
+
+Required properties:
+
+- compatible: should be one of the following:
+ - "hisilicon,hi6210-i2s"
+- reg: physical base address of the i2s controller unit and length of
+ memory mapped region.
+- interrupts: should contain the i2s interrupt.
+- clocks: a list of phandle + clock-specifier pairs, one for each entry
+ in clock-names.
+- clock-names: should contain following:
+ - "dacodec"
+ - "i2s-base"
+- dmas: DMA specifiers for tx dma. See the DMA client binding,
+ Documentation/devicetree/bindings/dma/dma.txt
+- dma-names: should be "tx" and "rx"
+- hisilicon,sysctrl-syscon: phandle to sysctrl syscon
+
+Example for the hi6210 i2s controller:
+
+i2s0: hi6210_i2s {
+ compatible = "hisilicon,hi6210-i2s";
+ reg = <0x0 0xf7118000 0x0 0x8000>; /* i2s unit */
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>; /* 155 "DigACodec_intr"-32 */
+ clocks = <&sys_ctrl HI6220_DACODEC_PCLK>,
+ <&sys_ctrl HI6220_BBPPLL0_DIV>;
+ clock-names = "dacodec", "i2s-base";
+ dmas = <&dma0 15 &dma0 14>;
+ dma-names = "rx", "tx";
+ hisilicon,sysctrl-syscon = <&sys_ctrl>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi_pl022.txt b/Documentation/devicetree/bindings/spi/spi_pl022.txt
index 4d1673ca8cf8..20ccb2244c59 100644
--- a/Documentation/devicetree/bindings/spi/spi_pl022.txt
+++ b/Documentation/devicetree/bindings/spi/spi_pl022.txt
@@ -30,9 +30,16 @@ contain the following properties.
0: SPI
1: Texas Instruments Synchronous Serial Frame Format
2: Microwire (Half Duplex)
-- pl022,com-mode : polling, interrupt or dma
+- pl022,com-mode : specifies the transfer mode:
+ 0: interrupt mode
+ 1: polling mode (default mode if property not present)
+ 2: DMA mode
- pl022,rx-level-trig : Rx FIFO watermark level
+ Set to 0 for watermark level of 1 element (default if property not
+ present), 1 for 4 elements, 2 for 8 elements, 3 for 16 element or 4
+ for 32 elements
- pl022,tx-level-trig : Tx FIFO watermark level
+ Same values and default as for pl022,rx-level-trig
- pl022,ctrl-len : Microwire interface: Control length
- pl022,wait-state : Microwire interface: Wait state
- pl022,duplex : Microwire interface: Full/Half duplex
@@ -56,9 +63,7 @@ Example:
spi-max-frequency = <12000000>;
spi-cpol;
spi-cpha;
- pl022,hierarchy = <0>;
pl022,interface = <0>;
- pl022,slave-tx-disable;
pl022,com-mode = <0x2>;
pl022,rx-level-trig = <0>;
pl022,tx-level-trig = <0>;
@@ -67,4 +72,3 @@ Example:
pl022,duplex = <0>;
};
};
-
diff --git a/Documentation/devicetree/bindings/staging/nanohub.txt b/Documentation/devicetree/bindings/staging/nanohub.txt
new file mode 100644
index 000000000000..22ef03c312db
--- /dev/null
+++ b/Documentation/devicetree/bindings/staging/nanohub.txt
@@ -0,0 +1,35 @@
+GOOGLE nanohub sensorhub
+
+Required properties:
+- compatible : should be "nanohub"
+
+Example:
+
+&spi_7 {
+ sensorhub@0 {
+ compatible = "nanohub";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+ spi-cpol;
+ spi-cpha;
+
+ interrupt-parent = <&msm_gpio>;
+ interrupts = <66 0x2>;
+ sensorhub,nreset-gpio = <&msm_gpio 59 0>;
+ sensorhub,boot0-gpio = <&msm_gpio 60 0>;
+ sensorhub,wakeup-gpio = <&msm_gpio 65 0>;
+ sensorhub,irq1-gpio = <&msm_gpio 66 0>;
+ sensorhub,spi-cs-gpio = <&msm_gpio 43 0>;
+ sensorhub,bl-addr = <0x08000000>;
+ sensorhub,kernel-addr = <0x0800C000>;
+ sensorhub,shared-addr = <0x08020000>;
+ sensorhub,flash-banks = <0 0x08000000 0x04000>,
+ <3 0x0800C000 0x04000>,
+ <4 0x08010000 0x10000>,
+ <5 0x08020000 0x20000>;
+ sensorhub,num-flash-banks = <4>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sensorhub_ctrl_active
+ &sensorhub_int1_active
+ &sensorhub_int2_active>;
diff --git a/Documentation/devicetree/bindings/thermal/hi3660-thermal.txt b/Documentation/devicetree/bindings/thermal/hi3660-thermal.txt
new file mode 100644
index 000000000000..f3dddcfc83ac
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/hi3660-thermal.txt
@@ -0,0 +1,16 @@
+* Temperature Sensor on hisilicon hi3660 SoC
+
+** Required properties :
+
+- compatible: "hisilicon,thermal-hi3660".
+- reg: physical base address of thermal sensor and length of memory mapped
+ region.
+- #thermal-sensor-cells: Should be 1. See ./thermal.txt for a description.
+
+Example :
+
+ tsensor: tsensor {
+ compatible = "hisilicon,thermal-hi3660";
+ reg = <0x0 0xfff30000 0x0 0x1000>;
+ #thermal-sensor-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt b/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt
new file mode 100644
index 000000000000..18329d39487e
--- /dev/null
+++ b/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt
@@ -0,0 +1,8 @@
+Trusty fiq debugger interface
+
+Provides a single fiq for the fiq debugger.
+
+Required properties:
+- compatible: compatible = "android,trusty-fiq-v1-*"; where * is a serial port.
+
+Must be a child of the node that provides fiq support ("android,trusty-fiq-v1").
diff --git a/Documentation/devicetree/bindings/trusty/trusty-fiq.txt b/Documentation/devicetree/bindings/trusty/trusty-fiq.txt
new file mode 100644
index 000000000000..de810b955bc9
--- /dev/null
+++ b/Documentation/devicetree/bindings/trusty/trusty-fiq.txt
@@ -0,0 +1,8 @@
+Trusty fiq interface
+
+Trusty provides fiq emulation.
+
+Required properties:
+- compatible: "android,trusty-fiq-v1"
+
+Must be a child of the node that provides the trusty std/fast call interface.
diff --git a/Documentation/devicetree/bindings/trusty/trusty-irq.txt b/Documentation/devicetree/bindings/trusty/trusty-irq.txt
new file mode 100644
index 000000000000..5aefeb8e536f
--- /dev/null
+++ b/Documentation/devicetree/bindings/trusty/trusty-irq.txt
@@ -0,0 +1,67 @@
+Trusty irq interface
+
+Trusty requires non-secure irqs to be forwarded to the secure OS.
+
+Required properties:
+- compatible: "android,trusty-irq-v1"
+
+Optional properties:
+
+- interrupt-templates: is an optional property that works together
+ with "interrupt-ranges" to specify secure side to kernel IRQs mapping.
+
+ It is a list of entries, each one of which defines a group of interrupts
+ having common properties, and has the following format:
+ < phandle irq_id_pos [templ_data]>
+ phandle - phandle of interrupt controller this template is for
+ irq_id_pos - the position of irq id in interrupt specifier array
+ for interrupt controller referenced by phandle.
+ templ_data - is an array of u32 values (could be empty) in the same
+ format as interrupt specifier for interrupt controller
+ referenced by phandle but with omitted irq id field.
+
+- interrupt-ranges: list of entries that specifies secure side to kernel
+ IRQs mapping.
+
+ Each entry in the "interrupt-ranges" list has the following format:
+ <beg end templ_idx>
+ beg - first entry in this range
+ end - last entry in this range
+ templ_idx - index of entry in "interrupt-templates" property
+ that must be used as a template for all interrupts
+ in this range
+
+Example:
+{
+ gic: interrupt-controller@50041000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ ...
+ };
+ ...
+ IPI: interrupt-controller {
+ compatible = "android,CustomIPI";
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ ...
+ trusty {
+ compatible = "android,trusty-smc-v1";
+ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ irq {
+ compatible = "android,trusty-irq-v1";
+ interrupt-templates = <&IPI 0>,
+ <&gic 1 GIC_PPI 0>,
+ <&gic 1 GIC_SPI 0>;
+ interrupt-ranges = < 0 15 0>,
+ <16 31 1>,
+ <32 223 2>;
+ };
+ }
+}
+
+Must be a child of the node that provides the trusty std/fast call interface.
diff --git a/Documentation/devicetree/bindings/trusty/trusty-smc.txt b/Documentation/devicetree/bindings/trusty/trusty-smc.txt
new file mode 100644
index 000000000000..1b39ad317c67
--- /dev/null
+++ b/Documentation/devicetree/bindings/trusty/trusty-smc.txt
@@ -0,0 +1,6 @@
+Trusty smc interface
+
+Trusty is running in secure mode on the same (arm) cpu(s) as the current os.
+
+Required properties:
+- compatible: "android,trusty-smc-v1"
diff --git a/Documentation/devicetree/bindings/ufs/hi3660-ufs.txt b/Documentation/devicetree/bindings/ufs/hi3660-ufs.txt
new file mode 100644
index 000000000000..461afc8ef017
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/hi3660-ufs.txt
@@ -0,0 +1,58 @@
+* Hisilicon Universal Flash Storage (UFS) Host Controller
+
+UFS nodes are defined to describe on-chip UFS hardware macro.
+Each UFS Host Controller should have its own node.
+
+Required properties:
+- compatible : compatible list, contains one of the following -
+ "hisilicon,hi3660-ufs" for hisi ufs host controller
+ present on Hi3660 chipset.
+- reg : should contain UFS register address space & UFS SYS CTRL register address,
+- interrupt-parent : interrupt device
+- interrupts : interrupt number
+- clocks : List of phandle and clock specifier pairs
+- clock-names : List of clock input name strings sorted in the same
+ order as the clocks property. "clk_ref", "clk_phy" is optional
+- resets : reset node register, one reset the clk and the other reset the controller
+- reset-names : describe reset node register
+
+Optional properties for board device:
+- ufs-hi3660-use-rate-B : specifies UFS rate-B
+- ufs-hi3660-broken-fastauto : specifies no fastauto
+- ufs-hi3660-use-HS-GEAR3 : specifies UFS HS-GEAR3
+- ufs-hi3660-use-HS-GEAR2 : specifies UFS HS-GEAR2
+- ufs-hi3660-use-HS-GEAR1 : specifies UFS HS-GEAR1
+- ufs-hi3660-broken-clk-gate-bypass : specifies no clk-gate
+- ufs-hi3660-use-one-line : specifies UFS use one line work
+- reset-gpio : specifies to reset devices
+
+Example:
+
+ ufs: ufs@ff3b0000 {
+ compatible = "jedec,ufs-1.1", "hisilicon,hi3660-ufs";
+ /* 0: HCI standard */
+ /* 1: UFS SYS CTRL */
+ reg = <0x0 0xff3b0000 0x0 0x1000>,
+ <0x0 0xff3b1000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_UFSIO_REF>,
+ <&crg_ctrl HI3660_CLK_GATE_UFSPHY_CFG>;
+ clock-names = "clk_ref", "clk_phy";
+ freq-table-hz = <0 0>, <0 0>;
+ /* offset: 0x84; bit: 12 */
+ /* offset: 0x84; bit: 7 */
+ resets = <&crg_rst 0x84 12>,
+ <&crg_rst 0x84 7>;
+ reset-names = "rst", "assert";
+ }
+
+ &ufs {
+ ufs-hi3660-use-rate-B;
+ ufs-hi3660-broken-fastauto;
+ ufs-hi3660-use-HS-GEAR3;
+ ufs-hi3660-broken-clk-gate-bypass;
+ reset-gpio = <&gpio18 1 0>;
+ status = "okay";
+ }
+
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 2c30a5479069..dc77e64afda0 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -29,6 +29,11 @@ Refer to phy/phy-bindings.txt for generic phy consumer properties
- g-rx-fifo-size: size of rx fifo size in gadget mode.
- g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
+- extcon: phandles to external connector devices. First phandle should point to
+ external connector, which provide "USB" cable events, the second should point
+ to external connector device, which provide "USB-HOST" cable events. If one
+ of the external connector devices is not required, empty <0> phandle should
+ be specified.
Example:
diff --git a/MAINTAINERS b/MAINTAINERS
index a419303662bf..339e24cd07a0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10613,6 +10613,14 @@ S: Maintained
F: Documentation/devicetree/bindings/serial/
F: drivers/tty/serial/
+SERIAL DEVICE BUS
+M: Rob Herring <robh@kernel.org>
+L: linux-serial@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/serial/slave-device.txt
+F: drivers/tty/serdev/
+F: include/linux/serdev.h
+
STI CEC DRIVER
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
L: kernel@stlinux.com
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5702e7d0f5e0..1ab7b8f75c5c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -179,6 +179,9 @@ config LOCKDEP_SUPPORT
config TRACE_IRQFLAGS_SUPPORT
def_bool y
+config FIQ_GLUE
+ bool
+
config RWSEM_XCHGADD_ALGORITHM
def_bool y
diff --git a/arch/arm64/boot/dts/hisilicon/Makefile b/arch/arm64/boot/dts/hisilicon/Makefile
index d5f43a06b1c1..b633b5d51732 100644
--- a/arch/arm64/boot/dts/hisilicon/Makefile
+++ b/arch/arm64/boot/dts/hisilicon/Makefile
@@ -1,4 +1,5 @@
dtb-$(CONFIG_ARCH_HISI) += hi6220-hikey.dtb
+dtb-$(CONFIG_ARCH_HISI) += hi3660-hikey960.dtb
dtb-$(CONFIG_ARCH_HISI) += hip05-d02.dtb
dtb-$(CONFIG_ARCH_HISI) += hip06-d03.dtb
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-drm.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660-drm.dtsi
new file mode 100644
index 000000000000..c7f497c8e1c3
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-drm.dtsi
@@ -0,0 +1,114 @@
+/{
+ dpe: dpe@E8600000 {
+ compatible = "hisilicon,hi3660-dpe";
+ status = "ok";
+
+ reg = <0x0 0xE8600000 0x0 0x80000>,
+ <0x0 0xFFF35000 0 0x1000>,
+ <0x0 0xFFF0A000 0 0x1000>,
+ <0x0 0xFFF31000 0 0x1000>,
+ <0x0 0xE86C0000 0 0x10000>;
+ interrupts = <0 245 4>;
+
+ clocks = <&crg_ctrl HI3660_ACLK_GATE_DSS>,
+ <&crg_ctrl HI3660_PCLK_GATE_DSS>,
+ <&crg_ctrl HI3660_CLK_GATE_EDC0>,
+ <&crg_ctrl HI3660_CLK_GATE_LDI0>,
+ <&crg_ctrl HI3660_CLK_GATE_LDI1>,
+ <&sctrl HI3660_CLK_GATE_DSS_AXI_MM>,
+ <&sctrl HI3660_PCLK_GATE_MMBUF>;
+ clock-names = "aclk_dss",
+ "pclk_dss",
+ "clk_edc0",
+ "clk_ldi0",
+ "clk_ldi1",
+ "clk_dss_axi_mm",
+ "pclk_mmbuf";
+
+ dma-coherent;
+
+ port {
+ dpe_out: endpoint {
+ remote-endpoint = <&dsi_in>;
+ };
+ };
+
+ iommu_info {
+ start-addr = <0x8000>;
+ size = <0xbfff8000>;
+ };
+ };
+
+ dsi: dsi@E8601000 {
+ compatible = "hisilicon,hi3660-dsi";
+ status = "ok";
+
+ reg = <0 0xE8601000 0 0x7F000>,
+ <0 0xFFF35000 0 0x1000>;
+
+ clocks = <&crg_ctrl HI3660_CLK_GATE_TXDPHY0_REF>,
+ <&crg_ctrl HI3660_CLK_GATE_TXDPHY1_REF>,
+ <&crg_ctrl HI3660_CLK_GATE_TXDPHY0_CFG>,
+ <&crg_ctrl HI3660_CLK_GATE_TXDPHY1_CFG>,
+ <&crg_ctrl HI3660_PCLK_GATE_DSI0>,
+ <&crg_ctrl HI3660_PCLK_GATE_DSI1>;
+ clock-names = "clk_txdphy0_ref",
+ "clk_txdphy1_ref",
+ "clk_txdphy0_cfg",
+ "clk_txdphy1_cfg",
+ "pclk_dsi0",
+ "pclk_dsi1";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ mux-gpio = <&gpio2 4 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi_in: endpoint {
+ remote-endpoint = <&dpe_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ dsi_out0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&adv7533_in>;
+ };
+
+ dsi_out1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&panel0_in>;
+ };
+ };
+ };
+
+ panel@1 {
+ compatible = "hisilicon,mipi-hikey";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ status = "ok";
+ reg = <1>;
+ panel-width-mm = <94>;
+ panel-height-mm = <151>;
+ vdd-supply = <&ldo3>;
+ pwr-en-gpio = <&gpio27 0 0>;
+ bl-en-gpio = <&gpio27 2 0>;
+ pwm-gpio = <&gpio22 6 0>;
+
+ port {
+ panel0_in: endpoint {
+ remote-endpoint = <&dsi_out1>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-gpu.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660-gpu.dtsi
new file mode 100644
index 000000000000..ee4ebc60fa9f
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-gpu.dtsi
@@ -0,0 +1,33 @@
+/{
+ gpu: mali@E82C0000 {
+ compatible = "arm,malit6xx", "arm,mali-midgard";
+ gpu_outstanding = <0x0>;
+ reg = <0x0 0xE82C0000 0x0 0x4000>;
+ interrupts = <0 258 4 0 259 4 0 260 4>;
+ interrupt-names = "JOB", "MMU", "GPU";
+ clocks = <&stub_clock HI3660_CLK_STUB_GPU>;
+ clock-names = "clk_mali";
+ ipa-model = "mali-simple-power-model";
+ operating-points = <
+ /* <frequency> <voltage>*/
+ 178000 650000
+ 400000 700000
+ 533000 800000
+ 807000 900000
+ 960000 1000000
+ 1037000 1100000
+ >;
+ cooling-min-level = <5>;
+ cooling-max-level = <0>;
+ #cooling-cells = <2>; /* min followed by max */
+
+ gpu_power_model: power_model {
+ compatible = "arm,mali-simple-power-model";
+ static-coefficient = <350>;
+ dynamic-coefficient = <5000>;
+ ts = <48020 2120 (-50) 1>;
+ thermal-zone = "cls0";
+ temp-poll-interval-ms = <100>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
new file mode 100644
index 000000000000..c7dfbd19a51e
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
@@ -0,0 +1,507 @@
+/*
+ * dts file for Hisilicon HiKey960 Development Board
+ *
+ * Copyright (C) 2016, Hisilicon Ltd.
+ *
+ */
+
+/*
+ *
+ * Modifications made by Cadence Design Systems, Inc. 06/21/2017
+ * Copyright (C) 2017 Cadence Design Systems, Inc.All rights reserved worldwide.
+ *
+ */
+
+/dts-v1/;
+
+#include "hi3660.dtsi"
+#include "hikey960-pinctrl.dtsi"
+#include "hi3660-gpu.dtsi"
+#include "hisi_3660_ipc.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "hi3660-drm.dtsi"
+#include "hi3660-ion.dtsi"
+
+/ {
+ model = "HiKey960";
+ compatible = "hisilicon,hi3660-hikey960", "hisilicon,hi3660";
+
+ aliases {
+ mshc1 = &dwmmc1;
+ mshc2 = &dwmmc2;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ serial4 = &uart4;
+ serial5 = &uart5;
+ serial6 = &uart6;
+ };
+
+ chosen {
+ stdout-path = "serial6:115200n8";
+ };
+
+ fiq-debugger {
+ compatible = "android,irq-hi6220-uart";
+ reg = <0x0 0xfff32000 0x0 0x1000>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "fiq", "signal";
+ };
+
+ uart_overlay@0 {
+ fragment@0 {
+ target-path="/soc/serial@fff32000";
+ __overlay__ {
+ status = "disabled";
+ };
+ };
+ };
+
+ overlay_mgr {
+ compatible = "linux,overlay_manager";
+ hardware_cfg_spidev0 {
+ overlay_0 {
+ fragment@0 {
+ target-path="/soc/spi@ffd68000/spidev@0";
+ __overlay__ {
+ status = "ok";
+ };
+ };
+ };
+ };
+ hardware_cfg_neonkey {
+ overlay_0 {
+ fragment@0 {
+ target-path="/soc/spi@ffd68000/sensorhub@0";
+ __overlay__ {
+ status = "ok";
+ };
+ };
+ };
+ };
+ hardware_cfg_argonkey {
+ overlay_0 {
+ fragment@0 {
+ target-path="/soc/spi@ffd68000/argonkey@0";
+ __overlay__ {
+ status = "ok";
+ };
+ };
+ };
+ };
+ hardware_cfg_disable_bt {
+ overlay_0 {
+ fragment@0 {
+ target-path="/soc/serial@fdf01000/bluetooth";
+ __overlay__ {
+ status = "disabled";
+ };
+ };
+ };
+ };
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x00400000 0x0 0xBFE00000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ fastboot_cma: fastboot-cma-mem {
+ reg = <0x0 0x16c00000 0x0 0x4000000>;
+ compatible = "shared-dma-pool";
+ hisi,cma-sec;
+ reusable;
+ };
+
+ uefi-reboot-mode {
+ reg = <0x0 0x32100000 0x0 0x00001000>;
+ no-map;
+ };
+
+ bl31 {
+ reg = <0x0 0x20200000 0x0 0x200000>;
+ no-map;
+ };
+
+ ramoops: ramoops@20A00000 {
+ compatible = "ramoops";
+ reg = <0x0 0x20A00000 0x0 0x00100000>; /* pstore/ramoops buffer */
+ record-size = <0x00020000>;
+ console-size = <0x00020000>;
+ ftrace-size = <0x00020000>;
+ };
+
+ hifi-base {
+ reg = <0x0 0x89200000 0x0 0x980000>;
+ no-map;
+ };
+
+ lpmx-core {
+ reg = <0x0 0x89b80000 0x0 0x100000>;
+ no-map;
+ };
+
+ lpmcu {
+ reg = <0x0 0x89c80000 0x0 0x40000>;
+ no-map;
+ };
+
+ hifi-data {
+ reg = <0x0 0x8B300000 0x0 0x380000>;
+ no-map;
+ };
+ };
+
+ reboot-mode-syscon@32100000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x0 0x32100000 0x0 0x00001000>;
+
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x0>;
+
+ mode-normal = <0x77665501>;
+ mode-bootloader = <0x77665500>;
+ mode-recovery = <0x77665502>;
+ };
+ };
+
+
+ pclk: apb_pclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <20000000>;
+ clock-output-names = "apb_pclk";
+ };
+
+ hifidsp {
+ compatible = "hisilicon,k3hifidsp";
+ };
+
+ keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwr_key_pmx_func &pwr_key_cfg_func>;
+
+ power {
+ wakeup-source;
+ gpios = <&gpio4 2 GPIO_ACTIVE_LOW>;
+ label = "GPIO Power";
+ linux,code = <KEY_POWER>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ user_led1 {
+ label = "user_led1";
+ /* gpio_150_user_led1 */
+ gpios = <&gpio18 6 0>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ user_led2 {
+ label = "user_led2";
+ /* gpio_151_user_led2 */
+ gpios = <&gpio18 7 0>;
+ linux,default-trigger = "mmc0";
+ };
+
+ user_led3 {
+ label = "user_led3";
+ /* gpio_189_user_led3 */
+ gpios = <&gpio23 5 0>;
+ default-state = "off";
+ };
+
+ user_led4 {
+ label = "user_led4";
+ /* gpio_190_user_led4 */
+ gpios = <&gpio23 6 0>;
+ linux,default-trigger = "cpu0";
+ };
+
+ wlan_active_led {
+ label = "wifi_active";
+ /* gpio_205_wifi_active */
+ gpios = <&gpio25 5 0>;
+ linux,default-trigger = "phy0tx";
+ default-state = "off";
+ };
+
+ bt_active_led {
+ label = "bt_active";
+ gpios = <&gpio25 7 0>;
+ /* gpio_207_user_led1 */
+ linux,default-trigger = "hci0rx";
+ default-state = "off";
+ };
+ };
+
+ pmic: pmic@fff34000 {
+ compatible = "hisilicon,hi6421v530-pmic";
+ reg = <0x0 0xfff34000 0x0 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ regulators {
+ ldo3: LDO3 { /* HDMI */
+ regulator-name = "VOUT3_1V85";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2200000>;
+ regulator-always-on;
+ regulator-enable-ramp-delay = <120>;
+ };
+
+ ldo9: LDO9 { /* SDCARD I/O */
+ regulator-name = "VOUT9_1V8_2V95";
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+
+ ldo11: LDO11 { /* Low Speed Connector */
+ regulator-name = "VOUT11_1V8_2V95";
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+
+ ldo15: LDO15 { /* UFS VCC */
+ regulator-name = "VOUT15_3V0";
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-enable-ramp-delay = <120>;
+ };
+
+ ldo16: LDO16 { /* SD VDD */
+ regulator-name = "VOUT16_2V95";
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <360>;
+ };
+ };
+ };
+
+ wlan_en: wlan-en-1-8v {
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ /* GPIO_051_WIFI_EN */
+ gpio = <&gpio6 3 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+ smmu {
+ compatible = "hisi,hisi-smmu";
+ phy_pgd_base = <0x0 0x34A78000>;
+ };
+
+ smmu_lpae {
+ compatible = "hisi,hisi-smmu-lpae";
+ status = "ok";
+ };
+
+ /* bluetooth - TI WL1837 */
+ kim {
+ compatible = "kim";
+ /*
+ * FIXME: The following is complete CRAP since
+ * the vendor driver doesn't follow the gpio
+ * binding. Passing in a magic Linux gpio number
+ * here until we fix the vendor driver.
+ */
+ /* BT_EN: GPIO_126_BT_EN */
+ nshutdown_gpio = <390>;
+ dev_name = "/dev/ttyAMA4";
+ flow_cntrl = <1>;
+ /* baud_rate = <3000000>; */
+ /* baud_rate = <230400>; ok */
+ baud_rate = <921600>;
+ /* baud_rate = <1500000>; nok */
+ };
+
+ btwilink {
+ compatible = "btwilink";
+ };
+
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+ };
+};
+
+&i2c0 {
+ /* On Low speed expansion */
+ label = "LS-I2C0";
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+
+ adv7533: adv7533@39 {
+ status = "ok";
+ compatible = "adi,adv7533";
+ reg = <0x39>;
+ };
+};
+
+&i2c7 {
+ /* On Low speed expansion */
+ label = "LS-I2C1";
+ status = "okay";
+};
+
+&uart3 {
+ /* On Low speed expansion */
+ label = "LS-UART0";
+ status = "okay";
+};
+
+&uart4 {
+ status = "okay";
+
+ bluetooth {
+ compatible = "ti,wl1837-st";
+ enable-gpios = <&gpio15 6 GPIO_ACTIVE_HIGH>;
+ max-speed = <921600>;
+ };
+};
+
+&uart6 {
+ /* On Low speed expansion */
+ label = "LS-UART1";
+ status = "okay";
+};
+
+&spi2 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+
+ spidev@0 {
+ compatible = "rohm,dh2228fv";
+ spi-max-frequency = <500000>;
+ reg = <0>;
+ status = "disabled";
+ };
+
+ sensorhub@0 {
+ compatible = "nanohub";
+ reg = <0>;
+ spi-max-frequency = <500000>;
+ spi-cpol;
+ spi-cpha;
+
+ sensorhub,nreset-gpio = <&gpio26 3 0>; /* Fake */
+ sensorhub,boot0-gpio = <&gpio26 2 0>; /* Fake */
+ sensorhub,wakeup-gpio = <&gpio26 0 0>; /* Gpio_208 -> PB9 */
+ sensorhub,irq1-gpio = <&gpio26 1 0>; /* Gpio_209 -> PB5 */
+ sensorhub,spi-cs-gpio = <&gpio27 2 0>;
+ sensorhub,bl-addr = <0x08000000>;
+ sensorhub,kernel-addr = <0x0800C000>;
+ sensorhub,num-flash-banks = <4>;
+ sensorhub,flash-banks = <0 0x08000000 0x04000>,
+ <3 0x0800C000 0x04000>,
+ <4 0x08010000 0x10000>,
+ <5 0x08020000 0x20000>;
+ sensorhub,shared-addr = <0x08040000>;
+ sensorhub,num-shared-flash-banks = <2>;
+ sensorhub,shared-flash-banks = <6 0x08040000 0x20000>,
+ <7 0x08060000 0x20000>;
+
+ status = "disabled";
+ };
+
+ argonkey@0 {
+ compatible = "nanohub";
+ reg = <0>;
+ spi-max-frequency = <500000>;
+
+ sensorhub,irq1-gpio = <&gpio26 0 0>; /* Gpio_208 -> PA1 */
+ sensorhub,irq2-gpio = <&gpio6 4 0>; /* Gpio_052 -> PA3 */
+ sensorhub,wakeup-gpio = <&gpio2 3 0>; /* Gpio_019 -> PA0 */
+ sensorhub,spi-cs-gpio = <&gpio27 2 0>; /* Gpio_218 */
+ sensorhub,nreset-gpio = <&gpio26 3 0>; /* Gpio_211 -> Nreset */
+ sensorhub,boot0-gpio = <&gpio5 0 0>; /* Gpio_040 -> Boot0 */
+ sensorhub,bl-addr = <0x08000000>;
+ sensorhub,kernel-addr = <0x0800C000>;
+ sensorhub,num-flash-banks = <4>;
+ sensorhub,flash-banks =
+ <0 0x08000000 0x04000>,
+ <3 0x0800C000 0x04000>,
+ <4 0x08010000 0x10000>,
+ <5 0x08020000 0x20000>;
+ sensorhub,shared-addr = <0x08040000>;
+ sensorhub,num-shared-flash-banks = <6>;
+ sensorhub,shared-flash-banks =
+ <6 0x08040000 0x20000>,
+ <7 0x08060000 0x20000>,
+ <8 0x08080000 0x20000>,
+ <9 0x080A0000 0x20000>,
+ <10 0x080C0000 0x20000>,
+ <11 0x080E0000 0x20000>;
+
+ status = "disabled";
+ };
+};
+
+&spi3 {
+ /* On High speed expansion */
+ label = "HS-SPI1";
+ status = "okay";
+};
+
+&dwmmc1 {
+ vmmc-supply = <&ldo16>;
+ vqmmc-supply = <&ldo9>;
+ status = "okay";
+};
+
+&dwmmc2 { /* WIFI */
+ broken-cd;
+ /* WL_EN */
+ vmmc-supply = <&wlan_en>;
+ ti,non-removable;
+ non-removable;
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ status = "ok";
+
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1837";
+ reg = <2>; /* sdio func num */
+ /* WL_IRQ, GPIO_179_WL_WAKEUP_AP */
+ interrupt-parent = <&gpio22>;
+ interrupts = <3 IRQ_TYPE_EDGE_RISING>;
+ };
+};
+
+&ufs {
+ ufs-hi3660-use-rate-B;
+ ufs-hi3660-broken-fastauto;
+ ufs-hi3660-use-HS-GEAR3;
+ ufs-hi3660-broken-clk-gate-bypass;
+ reset-gpio = <&gpio18 1 0>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-ion.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660-ion.dtsi
new file mode 100644
index 000000000000..5732ae6f74b6
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-ion.dtsi
@@ -0,0 +1,141 @@
+/ {
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ graphic_heap: graphic {
+ size = <0x0 0x1E00000>;
+ alignment = <0x0 0x100000>;
+ alloc-ranges = <0x0 0x0 0x1 0x0>;
+ compatible = "hisi_ion";
+ heap-name = "carveout_gralloc";
+ };
+
+ camera_heap: camera-mem {
+ size = <0x0 0x28000000>;
+ alignment = <0x0 0x100000>;
+ alloc-ranges = <0x1 0x0 0x1 0x0>;
+ compatible = "shared-dma-pool";
+ reusable;
+ status = "disable";
+ };
+
+ drm_misc_heap: drm-misc-mem{
+ size = <0x0 0x2C00000>;
+ compatible = "hisi_ion";
+ alloc-ranges = <0x0 0x0 0x1 0x0>;
+ heap-name = "carveout_drm_misc";
+ status = "disable";
+ };
+ hisi_cma {
+ reg = <0x0 0x60000000 0x0 0x20000000>;
+ compatible = "hisi-cma-pool";
+ reusable;
+ status = "disable";
+ };
+ };
+
+ hisi,ion {
+ compatible = "hisilicon,hisi-ion";
+ memory-region = <&camera_heap>;
+
+ iommu_info {
+ start-addr = <0x40000>;
+ size = <0xbffc0000>;
+ iova-align = <0x0 0x8000>;
+ };
+
+ linear {
+ start-addr = <0x40000>;
+ size = <0xbffc0000>;
+ page-size = <0x1000>;
+ page-align = <0x40000>;
+ };
+
+ heap_sys_user@0 {
+ heap-name = "sys_heap";
+ heap-id = <0x0>;
+ heap-base = <0x0>;
+ heap-size = <0x0>;
+ heap-type = "ion_system";
+ };
+ heap_sys_contig@1 {
+ heap-name = "sys_contig";
+ heap-id = <0x1>;
+ heap-base = <0x0>;
+ heap-size = <0x0>;
+ heap-type = "ion_system_contig";
+ };
+ heap_carveout_gralloc@2 {
+ heap-name = <&graphic_heap>;
+ heap-id = <0x2>;
+ heap-base = <0x0>;
+ heap-size = <0x0>;
+ heap-type = "ion_carveout";
+ };
+ heap_camera@6 {
+ heap-name = "camera_heap";
+ heap-id = <0x6>;
+ heap-base = <0xffffffff>;
+ heap-size = <0xffffffff>;
+ heap-type = "ion_dma_pool";
+ };
+ heap_carveout_drm_misc@14 {
+ heap-name = <&drm_misc_heap>;
+ heap-id = <0xe>;
+ heap-base = <0x0>;
+ heap-size = <0x0>;
+ heap-type = "ion_carveout";
+ status = "disable";
+ };
+ heap_sec_tui@17 {
+ heap-name = "heap_sec_tui";
+ heap-id = <0x11>;
+ heap-base = <0x0>;
+ heap-size = <0x2000000>;
+ heap-type = "ion_sec";
+
+ region-nr = <0x0 0x3>;
+ protect-id = <0x0 0x1>;
+ access-attr = <0x0 0x6d8>;
+ water-mark = <0x0 0x2000000>;
+ per-alloc-size = <0x0 0x2000000>;
+ per-bit-size = <0x0 0x1000000>;
+ status = "disable";
+ };
+ heap_sec_iris@18 {
+ heap-name = "heap_sec_iris";
+ heap-id = <0x12>;
+ heap-base = <0x0>;
+ heap-size = <0x3000000>;
+ heap-type = "ion_sec";
+
+ region-nr = <0x0 0x3>;
+ protect-id = <0x0 0x2>;
+ access-attr = <0x0 0x6d8>;
+ water-mark = <0x0 0x2000000>;
+ per-alloc-size = <0x0 0x2000000>;
+ per-bit-size = <0x0 0x1000000>;
+
+ status = "disable";
+ };
+
+ heap_sec_drm@20 {
+ heap-name = "heap_sec_drm";
+ heap-id = <0x14>;
+ heap-base = <0x0>;
+ heap-size = <0x20000000>;
+ heap-type = "ion_sec";
+
+ region-nr = <0x0 0x3>;
+ protect-id = <0x0 0x0>;
+ access-attr = <0x0 0x7de>;
+ water-mark = <0x0 0x10000000>;
+ per-alloc-size = <0x0 0x4000000>;
+ per-bit-size = <0x0 0x1000000>;
+
+ status = "disable";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-sched-energy.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660-sched-energy.dtsi
new file mode 100644
index 000000000000..08b6a2bfbc43
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-sched-energy.dtsi
@@ -0,0 +1,72 @@
+/*
+ * Hi3660 specific energy cost model data. There are no unit
+ * requirements for the data. Data can be normalized to any
+ * reference point, but the normalization must be consistent.
+ * That is, one bogo-joule/watt must be the same quantity for
+ * all data, but we don't care what it is.
+ */
+
+energy-costs {
+ CPU_COST_A72: core-cost0 {
+ busy-cost-data = <
+ 390 404
+ 615 861
+ 782 1398
+ 915 2200
+ 1024 2848
+ >;
+ idle-cost-data = <
+ 18
+ 18
+ 3
+ 0
+ 0
+ >;
+ };
+ CPU_COST_A53: core-cost1 {
+ busy-cost-data = <
+ 133 87
+ 250 164
+ 351 265
+ 429 388
+ 462 502
+ >;
+ idle-cost-data = <
+ 5
+ 5
+ 0
+ 0
+ >;
+ };
+ CLUSTER_COST_A72: cluster-cost0 {
+ busy-cost-data = <
+ 390 102
+ 615 124
+ 782 221
+ 915 330
+ 1024 433
+ >;
+ idle-cost-data = <
+ 102
+ 102
+ 102
+ 102
+ 0
+ >;
+ };
+ CLUSTER_COST_A53: cluster-cost1 {
+ busy-cost-data = <
+ 133 12
+ 250 22
+ 351 36
+ 429 67
+ 462 144
+ >;
+ idle-cost-data = <
+ 12
+ 12
+ 12
+ 0
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
new file mode 100644
index 000000000000..12544c3a1752
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
@@ -0,0 +1,1358 @@
+/*
+ * dts file for Hisilicon Hi3660 SoC
+ *
+ * Copyright (C) 2016, Hisilicon Ltd.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/hi3660-clock.h>
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+ compatible = "hisilicon,hi3660";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+ core1 {
+ cpu = <&cpu1>;
+ };
+ core2 {
+ cpu = <&cpu2>;
+ };
+ core3 {
+ cpu = <&cpu3>;
+ };
+ };
+ cluster1 {
+ core0 {
+ cpu = <&cpu4>;
+ };
+ core1 {
+ cpu = <&cpu5>;
+ };
+ core2 {
+ cpu = <&cpu6>;
+ };
+ core3 {
+ cpu = <&cpu7>;
+ };
+ };
+ };
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP_0>;
+ clocks = <&stub_clock HI3660_CLK_STUB_CLUSTER0>;
+ operating-points-v2 = <&cluster0_opp>;
+ cooling-min-level = <4>;
+ cooling-max-level = <0>;
+ #cooling-cells = <2>; /* min followed by max */
+ dynamic-power-coefficient = <110>;
+ sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>;
+ };
+
+ cpu1: cpu@1 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP_0>;
+ clocks = <&stub_clock HI3660_CLK_STUB_CLUSTER0>;
+ operating-points-v2 = <&cluster0_opp>;
+ sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>;
+ };
+
+ cpu2: cpu@2 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP_0>;
+ clocks = <&stub_clock HI3660_CLK_STUB_CLUSTER0>;
+ operating-points-v2 = <&cluster0_opp>;
+ sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>;
+ };
+
+ cpu3: cpu@3 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP_0>;
+ clocks = <&stub_clock HI3660_CLK_STUB_CLUSTER0>;
+ operating-points-v2 = <&cluster0_opp>;
+ sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>;
+ };
+
+ cpu4: cpu@100 {
+ compatible = "arm,cortex-a73", "arm,armv8";
+ device_type = "cpu";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&A73_L2>;
+ cpu-idle-states = <
+ &CPU_NAP
+ &CPU_SLEEP
+ &CLUSTER_SLEEP_1
+ >;
+ clocks = <&stub_clock HI3660_CLK_STUB_CLUSTER1>;
+ operating-points-v2 = <&cluster1_opp>;
+ cooling-min-level = <4>;
+ cooling-max-level = <0>;
+ #cooling-cells = <2>; /* min followed by max */
+ dynamic-power-coefficient = <550>;
+ sched-energy-costs = <&CPU_COST_A72 &CLUSTER_COST_A72>;
+ };
+
+ cpu5: cpu@101 {
+ compatible = "arm,cortex-a73", "arm,armv8";
+ device_type = "cpu";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ next-level-cache = <&A73_L2>;
+ cpu-idle-states = <
+ &CPU_NAP
+ &CPU_SLEEP
+ &CLUSTER_SLEEP_1
+ >;
+ clocks = <&stub_clock HI3660_CLK_STUB_CLUSTER1>;
+ operating-points-v2 = <&cluster1_opp>;
+ sched-energy-costs = <&CPU_COST_A72 &CLUSTER_COST_A72>;
+ };
+
+ cpu6: cpu@102 {
+ compatible = "arm,cortex-a73", "arm,armv8";
+ device_type = "cpu";
+ reg = <0x0 0x102>;
+ enable-method = "psci";
+ next-level-cache = <&A73_L2>;
+ cpu-idle-states = <
+ &CPU_NAP
+ &CPU_SLEEP
+ &CLUSTER_SLEEP_1
+ >;
+ clocks = <&stub_clock HI3660_CLK_STUB_CLUSTER1>;
+ operating-points-v2 = <&cluster1_opp>;
+ sched-energy-costs = <&CPU_COST_A72 &CLUSTER_COST_A72>;
+ };
+
+ cpu7: cpu@103 {
+ compatible = "arm,cortex-a73", "arm,armv8";
+ device_type = "cpu";
+ reg = <0x0 0x103>;
+ enable-method = "psci";
+ next-level-cache = <&A73_L2>;
+ cpu-idle-states = <
+ &CPU_NAP
+ &CPU_SLEEP
+ &CLUSTER_SLEEP_1
+ >;
+ clocks = <&stub_clock HI3660_CLK_STUB_CLUSTER1>;
+ operating-points-v2 = <&cluster1_opp>;
+ sched-energy-costs = <&CPU_COST_A72 &CLUSTER_COST_A72>;
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ CPU_NAP: cpu-nap {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x0000001>;
+ entry-latency-us = <7>;
+ exit-latency-us = <2>;
+ min-residency-us = <15>;
+ };
+
+ CPU_SLEEP: cpu-sleep {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x0010000>;
+ entry-latency-us = <40>;
+ exit-latency-us = <70>;
+ min-residency-us = <3000>;
+ };
+
+ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x1010000>;
+ entry-latency-us = <500>;
+ exit-latency-us = <5000>;
+ min-residency-us = <20000>;
+ };
+
+ CLUSTER_SLEEP_1: cluster-sleep-1 {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x1010000>;
+ entry-latency-us = <1000>;
+ exit-latency-us = <5000>;
+ min-residency-us = <20000>;
+ };
+ };
+
+ A53_L2: l2-cache0 {
+ compatible = "cache";
+ };
+
+ A73_L2: l2-cache1 {
+ compatible = "cache";
+ };
+
+ /include/ "hi3660-sched-energy.dtsi"
+ };
+
+ cluster0_opp: opp_table0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp00 {
+ opp-hz = /bits/ 64 <533000000>;
+ opp-microvolt = <700000>;
+ clock-latency-ns = <300000>;
+ };
+
+ opp01 {
+ opp-hz = /bits/ 64 <999000000>;
+ opp-microvolt = <800000>;
+ clock-latency-ns = <300000>;
+ };
+
+ opp02 {
+ opp-hz = /bits/ 64 <1402000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <300000>;
+ };
+
+ opp03 {
+ opp-hz = /bits/ 64 <1709000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <300000>;
+ };
+
+ opp04 {
+ opp-hz = /bits/ 64 <1844000000>;
+ opp-microvolt = <1100000>;
+ clock-latency-ns = <300000>;
+ };
+ };
+
+ cluster1_opp: opp_table1 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp10 {
+ opp-hz = /bits/ 64 <903000000>;
+ opp-microvolt = <700000>;
+ clock-latency-ns = <300000>;
+ };
+
+ opp11 {
+ opp-hz = /bits/ 64 <1421000000>;
+ opp-microvolt = <800000>;
+ clock-latency-ns = <300000>;
+ };
+
+ opp12 {
+ opp-hz = /bits/ 64 <1805000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <300000>;
+ };
+
+ opp13 {
+ opp-hz = /bits/ 64 <2112000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <300000>;
+ };
+
+ opp14 {
+ opp-hz = /bits/ 64 <2362000000>;
+ opp-microvolt = <1100000>;
+ clock-latency-ns = <300000>;
+ };
+ };
+
+ gic: interrupt-controller@e82b0000 {
+ compatible = "arm,gic-400";
+ reg = <0x0 0xe82b1000 0 0x1000>, /* GICD */
+ <0x0 0xe82b2000 0 0x2000>, /* GICC */
+ <0x0 0xe82b4000 0 0x2000>, /* GICH */
+ <0x0 0xe82b6000 0 0x2000>; /* GICV */
+ #address-cells = <0>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>,
+ <&cpu1>,
+ <&cpu2>,
+ <&cpu3>,
+ <&cpu4>,
+ <&cpu5>,
+ <&cpu6>,
+ <&cpu7>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ clock-frequency = <1920000>;
+ };
+
+ ddr_devfreq {
+ compatible = "hisilicon,hi3660-ddrfreq";
+ clocks = <&stub_clock HI3660_CLK_STUB_DDR_VOTE>,
+ <&stub_clock HI3660_CLK_STUB_DDR>,
+ <&stub_clock HI3660_CLK_STUB_DDR_LIMIT>;
+ operating-points = <
+ /* kHz uV */
+ 400000 0
+ 685000 0
+ 1067000 0
+ 1244000 0
+ 1866000 0
+ >;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ crg_ctrl: crg_ctrl@fff35000 {
+ compatible = "hisilicon,hi3660-crgctrl", "syscon";
+ reg = <0x0 0xfff35000 0x0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ crg_rst: crg_rst_controller {
+ compatible = "hisilicon,hi3660-reset";
+ #reset-cells = <2>;
+ hisi,rst-syscon = <&crg_ctrl>;
+ };
+
+
+ pctrl: pctrl@e8a09000 {
+ compatible = "hisilicon,hi3660-pctrl", "syscon";
+ reg = <0x0 0xe8a09000 0x0 0x2000>;
+ #clock-cells = <1>;
+ };
+
+ pmctrl: pmctrl@fff31000 {
+ compatible = "hisilicon,hi3660-pmctrl", "syscon";
+ reg = <0x0 0xfff31000 0x0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ pmuctrl: crg_ctrl@fff34000 {
+ compatible = "hisilicon,hi3660-pmuctrl", "syscon";
+ reg = <0x0 0xfff34000 0x0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ sctrl: sctrl@fff0a000 {
+ compatible = "hisilicon,hi3660-sctrl", "syscon";
+ reg = <0x0 0xfff0a000 0x0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ reboot {
+ compatible = "hisilicon,hi3660-reboot";
+ pmu-regmap = <&pmuctrl>;
+ sctrl-regmap = <&sctrl>;
+ reboot-offset = <0x4>;
+ };
+
+ iomcu: iomcu@ffd7e000 {
+ compatible = "hisilicon,hi3660-iomcu", "syscon";
+ reg = <0x0 0xffd7e000 0x0 0x1000>;
+ #clock-cells = <1>;
+
+ };
+
+ iomcu_rst: reset {
+ compatible = "hisilicon,hi3660-reset";
+ hisi,rst-syscon = <&iomcu>;
+ #reset-cells = <2>;
+ };
+
+ mailbox: mailbox@e896b000 {
+ compatible = "hisilicon,hi3660-mbox";
+ reg = <0x0 0xe896b000 0x0 0x1000>;
+ interrupts = <0x0 0xc0 0x4>,
+ <0x0 0xc1 0x4>;
+ #mbox-cells = <3>;
+ };
+
+ stub_clock: stub_clock {
+ compatible = "hisilicon,hi3660-stub-clk";
+ reg = <0x0 0xe896b500 0x0 0x0100>;
+ #clock-cells = <1>;
+ mbox-names = "mbox-tx";
+ mboxes = <&mailbox 13 3 0>;
+ };
+
+ dual_timer0: timer@fff14000 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x0 0xfff14000 0x0 0x1000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_OSC32K>,
+ <&crg_ctrl HI3660_OSC32K>,
+ <&crg_ctrl HI3660_OSC32K>;
+ clock-names = "timer1", "timer2", "apb_pclk";
+ };
+
+ i2c0: i2c@ffd71000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xffd71000 0x0 0x1000>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_I2C0>;
+ resets = <&iomcu_rst 0x20 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pmx_func &i2c0_cfg_func>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@ffd72000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xffd72000 0x0 0x1000>;
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_I2C1>;
+ resets = <&iomcu_rst 0x20 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pmx_func &i2c1_cfg_func>;
+ status = "ok";
+
+ rt1711@4e {
+ compatible = "richtek,rt1711";
+ reg = <0x4e>;
+ status = "ok";
+ rt1711,irq_pin = <&gpio27 3 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_cfg_func>;
+ /* 0: dfp/ufp, 1: dfp, 2: ufp */
+ rt-dual,supported_modes = <0>;
+ /* tcpc_device's name */
+ rt-tcpc,name = "type_c_port0";
+ /* 0: SNK Only, 1: SRC Only, 2: DRP, 3: Try.SRC, 4: Try.SNK */
+ rt-tcpc,role_def = <2>;
+ /* 0: Default, 1: 1.5, 2: 3.0 */
+ rt-tcpc,rp_level = <0>;
+ /* the number of notifier supply */
+ rt-tcpc,notifier_supply_num = <0>;
+ pd-data {
+ pd,source-pdo-size = <1>;
+ /*<0x019014>;*/
+ pd,source-pdo-data = <0x00019064>;
+
+ pd,sink-pdo-size = <2>;
+ /* 0x0002d0c8 : 9V, 2A */
+ pd,sink-pdo-data = <0x000190c8 0x0002d0c8> ;
+
+ pd,id-vdo-size = <3>;
+ pd,id-vdo-data = <0xd00029cf 0x0 0x00010000>;
+ };
+ dpm_caps {
+ local_dr_power;
+ local_dr_data;
+ // local_ext_power;
+ local_usb_comm;
+ // local_usb_suspend;
+ // local_high_cap;
+ // local_give_back;
+ // local_no_suspend;
+ local_vconn_supply;
+
+ // attemp_enter_dp_mode;
+ attemp_discover_cable;
+ attemp_discover_id;
+
+ /* 0: disable, 1: prefer_snk, 2: prefer_src */
+ pr_check = <0>;
+ // pr_reject_as_source;
+ // pr_reject_as_sink;
+ pr_check_gp_source;
+ // pr_check_gp_sink;
+
+ /* 0: disable, 1: prefer_ufp, 2: prefer_dfp */
+ dr_check = <0>;
+ // dr_reject_as_dfp;
+ // dr_reject_as_ufp;
+
+ snk_prefer_low_voltage;
+ snk_ignore_mismatch_current;
+ };
+ };
+
+ adv7533: adv7533@39 {
+ status = "ok";
+ compatible = "adi,adv7533";
+ reg = <0x39>;
+ v1p2-supply = <&ldo3>;
+ vdd-supply = <&ldo3>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <1 2>;
+ pd-gpio = <&gpio5 1 0>;
+ sel-gpio = <&gpio2 4 0>;
+ adi,dsi-lanes = <4>;
+ adi,disable-timing-generator;
+ #sound-dai-cells = <0>;
+
+ port {
+ adv7533_in: endpoint {
+ remote-endpoint = <&dsi_out0>;
+ };
+ };
+ };
+ };
+
+ pd_dpm {
+ compatible = "hisilicon,pd_dpm";
+ tcp_name = "type_c_port0";
+ status = "ok";
+ };
+
+ hubv2: gpio_hubv2 {
+ compatible = "hisilicon,gpio_hubv2";
+ typc_vbus_int_gpio,typec-gpios = <&gpio25 2 0>;
+ typc_vbus_enable_val = <1>;
+ otg_gpio = <&gpio25 6 0>;
+ hub_vdd12_en_gpio = <&gpio2 1 0>;
+ hub_vdd33_en_gpio = <&gpio5 6 0>;
+ hub_reset_en_gpio = <&gpio4 4 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usbhub5734_pmx_func>;
+ };
+
+ i2c3: i2c@fdf0c000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xfdf0c000 0x0 0x1000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_I2C3>;
+ resets = <&crg_rst 0x78 7>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pmx_func &i2c3_cfg_func>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@fdf0b000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xfdf0b000 0x0 0x1000>;
+ interrupts = <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_I2C7>;
+ resets = <&crg_rst 0x60 14>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c7_pmx_func &i2c7_cfg_func>;
+ status = "disabled";
+ };
+
+ uart0: serial@fdf02000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0xfdf02000 0x0 0x1000>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_MUX_UART0>,
+ <&crg_ctrl HI3660_PCLK>;
+ clock-names = "uartclk", "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pmx_func &uart0_cfg_func>;
+ status = "disabled";
+ };
+
+ uart1: serial@fdf00000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0xfdf00000 0x0 0x1000>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_UART1>,
+ <&crg_ctrl HI3660_CLK_GATE_UART1>;
+ clock-names = "uartclk", "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pmx_func &uart1_cfg_func>;
+ status = "disabled";
+ };
+
+ uart2: serial@fdf03000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0xfdf03000 0x0 0x1000>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_UART2>,
+ <&crg_ctrl HI3660_PCLK>;
+ clock-names = "uartclk", "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pmx_func &uart2_cfg_func>;
+ status = "disabled";
+ };
+
+ uart3: serial@ffd74000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0xffd74000 0x0 0x1000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_FACTOR_UART3>,
+ <&crg_ctrl HI3660_PCLK>;
+ clock-names = "uartclk", "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pmx_func &uart3_cfg_func>;
+ status = "disabled";
+ };
+
+ uart4: serial@fdf01000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0xfdf01000 0x0 0x1000>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_UART4>,
+ <&crg_ctrl HI3660_CLK_GATE_UART4>;
+ clock-names = "uartclk", "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_pmx_func &uart4_cfg_func>;
+ status = "disabled";
+ };
+
+ uart5: serial@fdf05000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0xfdf05000 0x0 0x1000>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_UART5>,
+ <&crg_ctrl HI3660_CLK_GATE_UART5>;
+ clock-names = "uartclk", "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart5_pmx_func &uart5_cfg_func>;
+ status = "disabled";
+ };
+
+ uart6: serial@fff32000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0xfff32000 0x0 0x1000>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_UART6>,
+ <&crg_ctrl HI3660_PCLK>;
+ clock-names = "uartclk", "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart6_pmx_func &uart6_cfg_func>;
+ status = "disabled";
+ };
+
+ rtc0: rtc@fff04000 {
+ compatible = "arm,pl031", "arm,primecell";
+ reg = <0x0 0Xfff04000 0x0 0x1000>;
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_PCLK>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio0: gpio@e8a0b000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a0b000 0 0x1000>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 1 0 7>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO0>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio1: gpio@e8a0c000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a0c000 0 0x1000>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 1 7 7>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO1>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio2: gpio@e8a0d000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a0d000 0 0x1000>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 14 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO2>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio3: gpio@e8a0e000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a0e000 0 0x1000>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 22 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO3>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio4: gpio@e8a0f000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a0f000 0 0x1000>;
+ interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 30 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO4>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio5: gpio@e8a10000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a10000 0 0x1000>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 38 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO5>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio6: gpio@e8a11000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a11000 0 0x1000>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 46 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO6>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio7: gpio@e8a12000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a12000 0 0x1000>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 54 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO7>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio8: gpio@e8a13000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a13000 0 0x1000>;
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 62 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO8>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio9: gpio@e8a14000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a14000 0 0x1000>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 70 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO9>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio10: gpio@e8a15000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a15000 0 0x1000>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 78 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO10>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio11: gpio@e8a16000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a16000 0 0x1000>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 86 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO11>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio12: gpio@e8a17000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a17000 0 0x1000>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 94 3 &pmx0 7 101 1>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO12>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio13: gpio@e8a18000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a18000 0 0x1000>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 102 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO13>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio14: gpio@e8a19000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a19000 0 0x1000>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 110 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO14>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio15: gpio@e8a1a000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a1a000 0 0x1000>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 118 6>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO15>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio16: gpio@e8a1b000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a1b000 0 0x1000>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO16>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio17: gpio@e8a1c000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a1c000 0 0x1000>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO17>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio18: gpio@ff3b4000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xff3b4000 0 0x1000>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx2 0 0 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO18>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio19: gpio@ff3b5000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xff3b5000 0 0x1000>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx2 0 8 4>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO19>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio20: gpio@e8a1f000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a1f000 0 0x1000>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx1 0 0 6>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO20>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio21: gpio@e8a20000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xe8a20000 0 0x1000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&pmx3 0 0 6>;
+ clocks = <&crg_ctrl HI3660_PCLK_GPIO21>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio22: gpio@fff0b000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xfff0b000 0 0x1000>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ /* GPIO176 */
+ gpio-ranges = <&pmx4 2 0 6>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&sctrl HI3660_PCLK_AO_GPIO0>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio23: gpio@fff0c000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xfff0c000 0 0x1000>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ /* GPIO184 */
+ gpio-ranges = <&pmx4 0 6 7>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&sctrl HI3660_PCLK_AO_GPIO1>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio24: gpio@fff0d000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xfff0d000 0 0x1000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ /* GPIO192 */
+ gpio-ranges = <&pmx4 0 13 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&sctrl HI3660_PCLK_AO_GPIO2>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio25: gpio@fff0e000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xfff0e000 0 0x1000>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ /* GPIO200 */
+ gpio-ranges = <&pmx4 0 21 4 &pmx4 5 25 3>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&sctrl HI3660_PCLK_AO_GPIO3>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio26: gpio@fff0f000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xfff0f000 0 0x1000>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ /* GPIO208 */
+ gpio-ranges = <&pmx4 0 28 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&sctrl HI3660_PCLK_AO_GPIO4>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio27: gpio@fff10000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xfff10000 0 0x1000>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ /* GPIO216 */
+ gpio-ranges = <&pmx4 0 36 6>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&sctrl HI3660_PCLK_AO_GPIO5>;
+ clock-names = "apb_pclk";
+ };
+
+ gpio28: gpio@fff1d000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0 0xfff1d000 0 0x1000>;
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&sctrl HI3660_PCLK_AO_GPIO6>;
+ clock-names = "apb_pclk";
+ };
+
+ spi2: spi@ffd68000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x0 0xffd68000 0x0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_SPI2>;
+ clock-names = "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_pmx_func>;
+ num-cs = <1>;
+ cs-gpios = <&gpio27 2 0>;
+ status = "disabled";
+ };
+
+ spi3: spi@ff3b3000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x0 0xff3b3000 0x0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_SPI3>;
+ clock-names = "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi3_pmx_func>;
+ num-cs = <1>;
+ cs-gpios = <&gpio18 5 0>;
+ status = "disabled";
+ };
+
+ pcie@f4000000 {
+ compatible = "hisilicon,kirin-pcie";
+ reg = <0x0 0xf4000000 0x0 0x1000>,
+ <0x0 0xff3fe000 0x0 0x1000>,
+ <0x0 0xf3f20000 0x0 0x40000>,
+ <0x0 0xf5000000 0x0 0x2000>;
+ reg-names = "dbi", "apb", "phy", "config";
+ bus-range = <0x0 0x1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ ranges = <0x02000000 0x0 0x00000000
+ 0x0 0xf6000000
+ 0x0 0x02000000>;
+ num-lanes = <1>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <0x0 0 0 1
+ &gic GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <0x0 0 0 2
+ &gic GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <0x0 0 0 3
+ &gic GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <0x0 0 0 4
+ &gic GIC_SPI 285 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_PCIEPHY_REF>,
+ <&crg_ctrl HI3660_CLK_GATE_PCIEAUX>,
+ <&crg_ctrl HI3660_PCLK_GATE_PCIE_PHY>,
+ <&crg_ctrl HI3660_PCLK_GATE_PCIE_SYS>,
+ <&crg_ctrl HI3660_ACLK_GATE_PCIE>;
+ clock-names = "pcie_phy_ref", "pcie_aux",
+ "pcie_apb_phy", "pcie_apb_sys",
+ "pcie_aclk";
+ reset-gpio = <&gpio11 1 0 >;
+ };
+
+ /* SD */
+ dwmmc1: dwmmc1@ff37f000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cd-inverted;
+ compatible = "hisilicon,hi3660-dw-mshc";
+ num-slots = <1>;
+ bus-width = <0x4>;
+ disable-wp;
+ cap-sd-highspeed;
+ supports-highspeed;
+ card-detect-delay = <200>;
+ reg = <0x0 0xff37f000 0x0 0x1000>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_SD>,
+ <&crg_ctrl HI3660_HCLK_GATE_SD>;
+ clock-names = "ciu", "biu";
+ clock-frequency = <3200000>;
+ resets = <&crg_rst 0x94 18>;
+ reset-names = "reset";
+ cd-gpios = <&gpio25 3 0>;
+ hisilicon,peripheral-syscon = <&sctrl>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd_pmx_func
+ &sd_clk_cfg_func
+ &sd_cfg_func>;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ status = "disabled";
+
+ slot@0 {
+ reg = <0x0>;
+ bus-width = <4>;
+ disable-wp;
+ };
+ };
+
+ /* SDIO */
+ dwmmc2: dwmmc2@ff3ff000 {
+ compatible = "hisilicon,hi3660-dw-mshc";
+ reg = <0x0 0xff3ff000 0x0 0x1000>;
+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ num-slots = <1>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_SDIO0>,
+ <&crg_ctrl HI3660_HCLK_GATE_SDIO0>;
+ clock-names = "ciu", "biu";
+ resets = <&crg_rst 0x94 20>;
+ reset-names = "reset";
+ card-detect-delay = <200>;
+ supports-highspeed;
+ keep-power-in-suspend;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdio_pmx_func
+ &sdio_clk_cfg_func
+ &sdio_cfg_func>;
+ status = "disabled";
+ };
+
+ tsensor: tsensor {
+ compatible = "hisilicon,hi3660-thermal";
+ reg = <0x0 0xfff30000 0x0 0x1000>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ ufs: ufs@ff3b0000 {
+ compatible = "jedec,ufs-1.1", "hisilicon,hi3660-ufs";
+ /* 0: HCI standard */
+ /* 1: UFS SYS CTRL */
+ reg = <0x0 0xff3b0000 0x0 0x1000>,
+ <0x0 0xff3b1000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_UFSIO_REF>,
+ <&crg_ctrl HI3660_CLK_GATE_UFSPHY_CFG>;
+ clock-names = "clk_ref", "clk_phy";
+ freq-table-hz = <0 0>, <0 0>;
+ /* offset: 0x84; bit: 12 */
+ /* offset: 0x84; bit: 7 */
+ resets = <&crg_rst 0x84 12>,
+ <&crg_rst 0x84 7>;
+ reset-names = "rst", "assert";
+ };
+
+
+ hub5734_gpio:hub5734_gpio {
+ compatible = "hub5734_gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usbhub5734_pmx_func>;
+ };
+
+ hisi_usb@ff200000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "hisilicon,hi3660-dwc3";
+ reg = <0x0 0xff200000 0x0 0x1000 0x0 0xff100000 0x0 0x100000>;
+ ranges;
+ bc_again_flag = <0>;
+
+ clocks = <&crg_ctrl HI3660_CLK_ABB_USB>,
+ <&crg_ctrl HI3660_ACLK_GATE_USB3OTG>;
+ clock-names = "clk_usb3phy_ref", "aclk_usb3otg";
+ eye_diagram_param = <0x1c466e3>;
+ eye_diagram_host_param = <0x1c466e3>;
+ usb3_phy_cr_param = <0xb80>;
+ usb3_phy_host_cr_param = <0x980>;
+ usb3_phy_tx_vboost_lvl = <0x5>;
+
+ dwc3@ff100000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0xff100000 0x0 0x100000>;
+ interrupts = <0 159 4>, <0 161 4>;
+ dr_mode = "otg";
+ maximum-speed = "super-speed";
+ };
+ };
+
+ watchdog0: watchdog@e8a06000 {
+ compatible = "arm,sp805-wdt", "arm,primecell";
+ reg = <0x0 0xe8a06000 0x0 0x1000>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_OSC32K>;
+ clock-names = "apb_pclk";
+ };
+
+ watchdog1: watchdog@e8a07000 {
+ compatible = "arm,sp805-wdt", "arm,primecell";
+ reg = <0x0 0xe8a07000 0x0 0x1000>;
+ interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_OSC32K>;
+ clock-names = "apb_pclk";
+ };
+
+ thermal-zones {
+
+ cls0: cls0 {
+ polling-delay = <1000>;
+ polling-delay-passive = <25>;
+ sustainable-power = <4000>;
+
+ /* sensor ID */
+ thermal-sensors = <&tsensor 4>;
+
+ trips {
+ threshold: trip-point@0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ target: trip-point@1 {
+ temperature = <75000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&target>;
+ contribution = <1024>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&target>;
+ contribution = <512>;
+ cooling-device = <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map2 {
+ trip = <&target>;
+ contribution = <1024>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
+ i2s2: hisi_i2s {
+ compatible = "hisilicon,hisi-i2s";
+ reg = <0x0 0xe804f800 0x0 0x400>,
+ <0x0 0xe804e000 0x0 0x400>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s2_pmx_func &i2s2_cfg_func>;
+ dmas = <&asp_dmac 18 &asp_dmac 19>;
+ dma-names = "rx", "tx";
+ #sound-dai-cells = <0>;
+ };
+
+ asp_dmac: asp_dmac@E804B000 {
+ compatible = "hisilicon,hisi-pcm-asp-dma";
+ reg = <0x0 0xe804b000 0x0 0x1000>;
+ #dma-cells = <1>;
+ dma-channels = <16>;
+ dma-requests = <32>;
+ dma-min-chan = <0>;
+ dma-used-chans = <0xFFFE>;
+ dma-share;
+ interrupts = <0 216 4>;
+ interrupt-names = "asp_dma_irq";
+ status = "ok";
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "hikey-hdmi";
+ simple-audio-card,format = "i2s";
+
+ simple-audio-card,bitclock-master = <&sound_master>;
+ simple-audio-card,frame-master = <&sound_master>;
+
+ sound_master: simple-audio-card,cpu {
+ sound-dai = <&i2s2>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&adv7533>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index dba3c131c62c..1e3c989c2ba3 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -19,12 +19,141 @@
serial1 = &uart1; /* BT UART */
serial2 = &uart2; /* LS Expansion UART0 */
serial3 = &uart3; /* LS Expansion UART1 */
+ spi0 = &spi0;
};
chosen {
stdout-path = "serial3:115200n8";
};
+ fiq-debugger {
+ compatible = "android,irq-hi6220-uart";
+ reg = <0x0 0xf7113000 0x0 0x1000>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "fiq", "signal";
+ };
+
+ uart_overlay@0 {
+ fragment@0 {
+ target-path="/soc/uart@f7113000";
+ __overlay__ {
+ status = "disabled";
+ };
+ };
+ };
+
+ overlay_mgr {
+ compatible = "linux,overlay_manager";
+ hardware_cfg_innolux_panel {
+ overlay_0 {
+ fragment@0 {
+ target-path="/soc/dsi@f4107800";
+ __overlay__ { /* For panel reg's value should >= 1 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* 1 for output port */
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ dsi_out1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&panel0_in>;
+ };
+ };
+ };
+ panel@1 {
+ compatible = "innolux,n070icn-pb1";
+ reg = <1>;
+ power-on-delay= <50>;
+ reset-delay = <100>;
+ init-delay = <100>;
+ panel-width-mm = <58>;
+ panel-height-mm = <103>;
+ pwr-en-gpio = <&gpio2 1 0>;
+ bl-en-gpio = <&gpio2 3 0>;
+ pwm-gpio = <&gpio12 7 0>;
+
+ port {
+ panel0_in: endpoint {
+ remote-endpoint = <&dsi_out1>;
+ };
+ };
+ };
+ };
+ };
+ };
+ };
+ hardware_cfg_spidev0 {
+ overlay_0 {
+ fragment@0 {
+ target-path="/soc/spi@f7106000/spidev@0";
+ __overlay__ {
+ status = "ok";
+ };
+ };
+ };
+ };
+ hardware_cfg_spidev_dma0 {
+ overlay_0 {
+ fragment@0 {
+ target-path="/soc/spi@f7106000/spidev_dma@0";
+ __overlay__ {
+ status = "ok";
+ };
+ };
+ };
+ };
+ hardware_cfg_neonkey {
+ overlay_0 {
+ fragment@0 {
+ target-path="/soc/spi@f7106000/sensorhub@0";
+ __overlay__ {
+ status = "ok";
+ };
+ };
+ };
+ };
+ hardware_cfg_argonkey {
+ overlay_0 {
+ fragment@0 {
+ target-path="/soc/spi@f7106000/argonkey@0";
+ __overlay__ {
+ status = "ok";
+ };
+ };
+ };
+ };
+ hardware_cfg_disable_bt {
+ overlay_0 {
+ fragment@0 {
+ target-path="/soc/uart@f7111000/bluetooth";
+ __overlay__ {
+ status = "disabled";
+ };
+ };
+ };
+ };
+ hardware_cfg_cs_sd_qb {
+ overlay_0 {
+ fragment@0 { /* Select SD from Quick Bus */
+ target-path="/soc/gpio@f7020000";
+ __overlay__ {
+ sd_sel {
+ gpio-hog;
+ gpios = <4 0>;
+ output-high;
+ line-name = "gpio_sd_sel";
+ };
+ };
+ };
+ };
+ };
+ };
+
/*
* Reserve below regions from memory node:
*
@@ -98,6 +227,11 @@
assigned-clocks = <&sys_ctrl HI6220_UART1_SRC>;
assigned-clock-rates = <150000000>;
status = "ok";
+
+ bluetooth {
+ compatible = "ti,wl1835-st";
+ enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+ };
};
uart2: uart@f7112000 {
@@ -155,7 +289,7 @@
};
gpio3: gpio@f8014000 {
- gpio-line-names = "GPIO3_0", "NC", "NC", "", "NC", "",
+ gpio-line-names = "GPIO3_0", "NC", "NC", "", "NC",
"WLAN_ACTIVE", "NC", "NC";
};
@@ -192,7 +326,7 @@
gpio8: gpio@f7024000 {
gpio-line-names = "NC", "[CEC_CLK_19_2MHZ]", "NC",
- "", "", "", "", "", "";
+ "", "", "", "", "";
};
gpio9: gpio@f7025000 {
@@ -322,6 +456,27 @@
};
};
+ kim {
+ compatible = "kim";
+ pinctrl-names = "default";
+ pinctrl-0 = <>; /* FIXME: add BT PCM pinctrl here */
+ /*
+ * FIXME: The following is complete CRAP since
+ * the vendor driver doesn't follow the gpio
+ * binding. Passing in a magic Linux gpio number
+ * here until we fix the vendor driver.
+ */
+ /* BT_EN: BT_REG_ON_GPIO1_7 */
+ nshutdown_gpio = <503>;
+ dev_name = "/dev/ttyAMA1";
+ flow_cntrl = <1>;
+ baud_rate = <3000000>;
+ };
+
+ btwilink {
+ compatible = "btwilink";
+ };
+
pmic: pmic@f8000000 {
compatible = "hisilicon,hi655x-pmic";
reg = <0x0 0xf8000000 0x0 0x1000>;
@@ -406,6 +561,30 @@
};
};
};
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "hikey-hdmi";
+ simple-audio-card,format = "i2s";
+
+ simple-audio-card,bitclock-master = <&sound_master>;
+ simple-audio-card,frame-master = <&sound_master>;
+
+ sound_master: simple-audio-card,cpu {
+ sound-dai = <&i2s0>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&adv7533>;
+ };
+ };
+
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+ };
};
&uart2 {
@@ -446,6 +625,7 @@
interrupts = <1 2>;
pd-gpio = <&gpio0 4 0>;
adi,dsi-lanes = <4>;
+ #sound-dai-cells = <0>;
port {
adv7533_in: endpoint {
@@ -453,4 +633,95 @@
};
};
};
+ };
+
+&spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "ok";
+
+ /* the two spidev below cannot be both enabled at the same time */
+ spidev@0 {
+ compatible = "rohm,dh2228fv";
+ spi-max-frequency = <500000>;
+ reg = <0>;
+ status = "disabled";
+ };
+
+ /*
+ * To enable the DMA on a slave of spi0, add the lines
+ * pl022,com-mode = <2>;
+ * pl022,rx-level-trig = <1>;
+ * in the DT node of the slave,
+ */
+ spidev_dma@0 {
+ compatible = "rohm,dh2228fv";
+ spi-max-frequency = <500000>;
+ reg = <0>;
+ pl022,com-mode = <2>;
+ pl022,rx-level-trig = <1>;
+ status = "disabled";
+ };
+
+ sensorhub@0 {
+ compatible = "nanohub";
+ reg = <0>;
+ spi-max-frequency = <500000>;
+ spi-cpol;
+ spi-cpha;
+
+ sensorhub,nreset-gpio = <&gpio0 6 0>;
+ sensorhub,boot0-gpio = <&gpio2 2 0>; /* Fake */
+ sensorhub,wakeup-gpio = <&gpio2 0 0>; /* Gpio2_0 -> PB9 */
+ sensorhub,irq1-gpio = <&gpio2 1 0>; /* Gpio2_1 -> PB5 */
+ sensorhub,spi-cs-gpio = <&gpio6 2 0>;
+ sensorhub,bl-addr = <0x08000000>;
+ sensorhub,kernel-addr = <0x0800C000>;
+ sensorhub,num-flash-banks = <4>;
+ sensorhub,flash-banks = <0 0x08000000 0x04000>,
+ <3 0x0800C000 0x04000>,
+ <4 0x08010000 0x10000>,
+ <5 0x08020000 0x20000>;
+ sensorhub,shared-addr = <0x08040000>;
+ sensorhub,num-shared-flash-banks = <2>;
+ sensorhub,shared-flash-banks = <6 0x08040000 0x20000>,
+ <7 0x08060000 0x20000>;
+ status = "disabled";
+ };
+
+ argonkey@0 {
+ compatible = "nanohub";
+ reg = <0>;
+ pl022,com-mode = <2>;
+ pl022,rx-level-trig = <1>;
+ spi-max-frequency = <7500000>;
+
+ interrupt-parent = <&gpio2>;
+ interrupts = <0 0x2>;
+ sensorhub,irq1-gpio = <&gpio2 0 0>;
+ sensorhub,irq2-gpio = <&gpio10 2 0>;
+ sensorhub,wakeup-gpio = <&gpio9 1 0>;
+ sensorhub,spi-cs-gpio = <&gpio6 7 0>;
+ sensorhub,nreset-gpio = <&gpio2 3 0>;
+ sensorhub,boot0-gpio = <&gpio2 7 0>;
+ sensorhub,bl-addr = <0x08000000>;
+ sensorhub,kernel-addr = <0x0800C000>;
+ sensorhub,num-flash-banks = <4>;
+ sensorhub,flash-banks =
+ <0 0x08000000 0x04000>,
+ <3 0x0800C000 0x04000>,
+ <4 0x08010000 0x10000>,
+ <5 0x08020000 0x20000>;
+ sensorhub,shared-addr = <0x08040000>;
+ sensorhub,num-shared-flash-banks = <6>;
+ sensorhub,shared-flash-banks =
+ <6 0x08040000 0x20000>,
+ <7 0x08060000 0x20000>,
+ <8 0x08080000 0x20000>,
+ <9 0x080A0000 0x20000>,
+ <10 0x080C0000 0x20000>,
+ <11 0x080E0000 0x20000>;
+
+ status = "disabled";
+ };
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-sched-energy.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220-sched-energy.dtsi
new file mode 100644
index 000000000000..6dfc49332b4f
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-sched-energy.dtsi
@@ -0,0 +1,69 @@
+/*
+ * Hikey specific energy cost model data.
+ */
+
+/* static struct idle_state idle_states_cluster_a53[] = { */
+/* { .power = 47 }, /\* arch_cpu_idle() (active idle) = WFI *\/ */
+/* { .power = 47 }, /\* WFI *\/ */
+/* { .power = 47 }, /\* cpu-sleep-0 *\/ */
+/* { .power = 0 }, /\* cluster-sleep-0 *\/ */
+/* }; */
+
+/* static struct capacity_state cap_states_cluster_a53[] = { */
+/* /\* Power per cluster *\/ */
+/* { .cap = 178, .power = 16, }, /\* 200 MHz *\/ */
+/* { .cap = 369, .power = 29, }, /\* 432 MHz *\/ */
+/* { .cap = 622, .power = 47, }, /\* 729 MHz *\/ */
+/* { .cap = 819, .power = 75, }, /\* 960 MHz *\/ */
+/* { .cap = 1024, .power = 112, }, /\* 1200 Mhz *\/ */
+/* }; */
+
+/* static struct idle_state idle_states_core_a53[] = { */
+/* { .power = 15 }, /\* arch_cpu_idle() (active idle) = WFI *\/ */
+/* { .power = 15 }, /\* WFI *\/ */
+/* { .power = 0 }, /\* cpu-sleep-0 *\/ */
+/* { .power = 0 }, /\* cluster-sleep-0 *\/ */
+/* }; */
+
+/* static struct capacity_state cap_states_core_a53[] = { */
+/* /\* Power per cpu *\/ */
+/* { .cap = 178, .power = 69, }, /\* 200 MHz *\/ */
+/* { .cap = 369, .power = 124, }, /\* 432 MHz *\/ */
+/* { .cap = 622, .power = 224, }, /\* 729 MHz *\/ */
+/* { .cap = 819, .power = 367, }, /\* 960 MHz *\/ */
+/* { .cap = 1024, .power = 670, }, /\* 1200 Mhz *\/ */
+/* }; */
+
+energy-costs {
+ CPU_COST: core-cost {
+ busy-cost-data = <
+ 178 69
+ 369 124
+ 622 224
+ 819 367
+ 1024 670
+ >;
+ idle-cost-data = <
+ 15
+ 15
+ 0
+ 0
+ >;
+ };
+
+ CLUSTER_COST: cluster-cost {
+ busy-cost-data = <
+ 178 16
+ 369 29
+ 622 47
+ 819 75
+ 1024 112
+ >;
+ idle-cost-data = <
+ 47
+ 47
+ 47
+ 0
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 509a2eda2ce4..9704f2e961f9 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -91,6 +91,7 @@
cooling-max-level = <0>;
#cooling-cells = <2>; /* min followed by max */
cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ sched-energy-costs = <&CPU_COST &CLUSTER_COST>;
dynamic-power-coefficient = <311>;
};
@@ -102,6 +103,7 @@
next-level-cache = <&CLUSTER0_L2>;
operating-points-v2 = <&cpu_opp_table>;
cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ sched-energy-costs = <&CPU_COST &CLUSTER_COST>;
};
cpu2: cpu@2 {
@@ -112,6 +114,7 @@
next-level-cache = <&CLUSTER0_L2>;
operating-points-v2 = <&cpu_opp_table>;
cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ sched-energy-costs = <&CPU_COST &CLUSTER_COST>;
};
cpu3: cpu@3 {
@@ -122,6 +125,7 @@
next-level-cache = <&CLUSTER0_L2>;
operating-points-v2 = <&cpu_opp_table>;
cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ sched-energy-costs = <&CPU_COST &CLUSTER_COST>;
};
cpu4: cpu@100 {
@@ -132,6 +136,7 @@
next-level-cache = <&CLUSTER1_L2>;
operating-points-v2 = <&cpu_opp_table>;
cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ sched-energy-costs = <&CPU_COST &CLUSTER_COST>;
};
cpu5: cpu@101 {
@@ -142,6 +147,7 @@
next-level-cache = <&CLUSTER1_L2>;
operating-points-v2 = <&cpu_opp_table>;
cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ sched-energy-costs = <&CPU_COST &CLUSTER_COST>;
};
cpu6: cpu@102 {
@@ -152,6 +158,7 @@
next-level-cache = <&CLUSTER1_L2>;
operating-points-v2 = <&cpu_opp_table>;
cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ sched-energy-costs = <&CPU_COST &CLUSTER_COST>;
};
cpu7: cpu@103 {
@@ -162,6 +169,7 @@
next-level-cache = <&CLUSTER1_L2>;
operating-points-v2 = <&cpu_opp_table>;
cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ sched-energy-costs = <&CPU_COST &CLUSTER_COST>;
};
CLUSTER0_L2: l2-cache0 {
@@ -171,6 +179,8 @@
CLUSTER1_L2: l2-cache1 {
compatible = "cache";
};
+
+ /include/ "hi6220-sched-energy.dtsi"
};
cpu_opp_table: cpu_opp_table {
@@ -332,6 +342,19 @@
status = "disabled";
};
+ dma0: dma@f7370000 {
+ compatible = "hisilicon,k3-dma-1.0";
+ reg = <0x0 0xf7370000 0x0 0x1000>;
+ #dma-cells = <1>;
+ dma-channels = <15>;
+ dma-requests = <32>;
+ interrupts = <0 84 4>;
+ clocks = <&sys_ctrl HI6220_EDMAC_ACLK>;
+ dma-no-cci;
+ dma-type = "hi6220_dma";
+ status = "ok";
+ };
+
dual_timer0: timer@f8008000 {
compatible = "arm,sp804", "arm,primecell";
reg = <0x0 0xf8008000 0x0 0x1000>;
@@ -679,7 +702,8 @@
reg = <0x0 0xf7106000 0x0 0x1000>;
interrupts = <0 50 4>;
bus-id = <0>;
- enable-dma = <0>;
+ dmas = <&dma0 0 &dma0 1>;
+ dma-names = "rx", "tx";
clocks = <&sys_ctrl HI6220_SPI_CLK>;
clock-names = "apb_pclk";
pinctrl-names = "default";
@@ -732,6 +756,16 @@
regulator-always-on;
};
+ usb_vbus: usb-vbus {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&gpio2 6 1>;
+ };
+
+ usb_id: usb-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&gpio2 5 1>;
+ };
+
usb_phy: usbphy {
compatible = "hisilicon,hi6220-usb-phy";
#phy-cells = <0>;
@@ -745,12 +779,14 @@
phys = <&usb_phy>;
phy-names = "usb2-phy";
clocks = <&sys_ctrl HI6220_USBOTG_HCLK>;
+ extcon = <&usb_vbus>, <&usb_id>;
clock-names = "otg";
dr_mode = "otg";
g-use-dma;
g-rx-fifo-size = <512>;
g-np-tx-fifo-size = <128>;
- g-tx-fifo-size = <128 128 128 128 128 128>;
+ g-tx-fifo-size = <128 128 128 128 128 128 128 128
+ 16 16 16 16 16 16 16>;
interrupts = <0 77 0x4>;
};
@@ -823,6 +859,14 @@
pinctrl-1 = <&sdio_pmx_idle &sdio_clk_cfg_idle &sdio_cfg_idle>;
};
+ watchdog0: watchdog@f8005000 {
+ compatible = "arm,sp805-wdt", "arm,primecell";
+ reg = <0x0 0xf8005000 0x0 0x1000>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ao_ctrl HI6220_WDT0_PCLK>;
+ clock-names = "apb_pclk";
+ };
+
tsensor: tsensor@0,f7030700 {
compatible = "hisilicon,tsensor";
reg = <0x0 0xf7030700 0x0 0x1000>;
@@ -832,6 +876,19 @@
#thermal-sensor-cells = <1>;
};
+ i2s0: hi6210_i2s {
+ compatible = "hisilicon,hi6210-i2s";
+ reg = <0x0 0xf7118000 0x0 0x8000>; /* i2s unit */
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>; /* 155 "DigACodec_intr"-32 */
+ clocks = <&sys_ctrl HI6220_DACODEC_PCLK>,
+ <&sys_ctrl HI6220_BBPPLL0_DIV>;
+ clock-names = "dacodec", "i2s-base";
+ dmas = <&dma0 15 &dma0 14>;
+ dma-names = "rx", "tx";
+ hisilicon,sysctrl-syscon = <&sys_ctrl>;
+ #sound-dai-cells = <0>;
+ };
+
thermal-zones {
cls0: cls0 {
@@ -914,5 +971,43 @@
};
};
};
+
+ mali:mali@f4080000 {
+ compatible = "arm,mali-450", "arm,mali-utgard";
+ reg = <0x0 0x3f100000 0x0 0x00708000>;
+ clocks = <&media_ctrl HI6220_G3D_CLK>,
+ <&media_ctrl HI6220_G3D_PCLK>;
+ clock-names = "clk_g3d", "pclk_g3d";
+ mali_def_freq = <500>;
+ pclk_freq = <144>;
+ dfs_steps = <2>;
+ dfs_lockprf = <1>;
+ dfs_limit_max_prf = <1>;
+ dfs_profile_num = <2>;
+ dfs_profiles = <250 3 0>, <500 1 0>;
+ mali_type = <2>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <1 126 4>, /*gp*/
+ <1 126 4>, /*gp mmu*/
+ <1 126 4>, /*pp bc*/
+ <1 126 4>, /*pmu*/
+ <1 126 4>, /*pp0*/
+ <1 126 4>,
+ <1 126 4>, /*pp1*/
+ <1 126 4>,
+ <1 126 4>, /*pp2*/
+ <1 126 4>,
+ <1 126 4>, /*pp4*/
+ <1 126 4>,
+ <1 126 4>, /*pp5*/
+ <1 126 4>,
+ <1 126 4>, /*pp6*/
+ <1 126 4>;
+ interrupt-names = "IRQGP", "IRQGPMMU", "IRQPP", "IRQPMU",
+ "IRQPP0", "IRQPPMMU0", "IRQPP1", "IRQPPMMU1",
+ "IRQPP2", "IRQPPMMU2","IRQPP4", "IRQPPMMU4",
+ "IRQPP5", "IRQPPMMU5", "IRQPP6", "IRQPPMMU6";
+ };
};
};
diff --git a/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi b/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi
new file mode 100644
index 000000000000..7e542d28dadb
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi
@@ -0,0 +1,1059 @@
+/*
+ * pinctrl dts fils for Hislicon HiKey960 development board
+ *
+ */
+
+#include <dt-bindings/pinctrl/hisi.h>
+
+/ {
+ soc {
+ /* [IOMG_000, IOMG_123] */
+ range: gpio-range {
+ #pinctrl-single,gpio-range-cells = <3>;
+ };
+
+ pmx0: pinmux@e896c000 {
+ compatible = "pinctrl-single";
+ reg = <0x0 0xe896c000 0x0 0x1f0>;
+ #pinctrl-cells = <1>;
+ #gpio-range-cells = <0x3>;
+ pinctrl-single,register-width = <0x20>;
+ pinctrl-single,function-mask = <0x7>;
+ /* pin base, nr pins & gpio function */
+ pinctrl-single,gpio-range = <
+ &range 0 7 0
+ &range 8 116 0>;
+
+ pmu_pmx_func: pmu_pmx_func {
+ pinctrl-single,pins = <
+ 0x008 MUX_M1 /* PMU1_SSI */
+ 0x00c MUX_M1 /* PMU2_SSI */
+ 0x010 MUX_M1 /* PMU_CLKOUT */
+ 0x100 MUX_M1 /* PMU_HKADC_SSI */
+ >;
+ };
+
+ csi0_pwd_n_pmx_func: csi0_pwd_n_pmx_func {
+ pinctrl-single,pins = <
+ 0x044 MUX_M0 /* CSI0_PWD_N */
+ >;
+ };
+
+ csi1_pwd_n_pmx_func: csi1_pwd_n_pmx_func {
+ pinctrl-single,pins = <
+ 0x04c MUX_M0 /* CSI1_PWD_N */
+ >;
+ };
+
+ isp0_pmx_func: isp0_pmx_func {
+ pinctrl-single,pins = <
+ 0x058 MUX_M1 /* ISP_CLK0 */
+ 0x064 MUX_M1 /* ISP_SCL0 */
+ 0x068 MUX_M1 /* ISP_SDA0 */
+ >;
+ };
+
+ isp1_pmx_func: isp1_pmx_func {
+ pinctrl-single,pins = <
+ 0x05c MUX_M1 /* ISP_CLK1 */
+ 0x06c MUX_M1 /* ISP_SCL1 */
+ 0x070 MUX_M1 /* ISP_SDA1 */
+ >;
+ };
+
+ pwr_key_pmx_func: pwr_key_pmx_func {
+ pinctrl-single,pins = <
+ 0x080 MUX_M0 /* GPIO_034 */
+ >;
+ };
+
+ i2c3_pmx_func: i2c3_pmx_func {
+ pinctrl-single,pins = <
+ 0x02c MUX_M1 /* I2C3_SCL */
+ 0x030 MUX_M1 /* I2C3_SDA */
+ >;
+ };
+
+ i2c4_pmx_func: i2c4_pmx_func {
+ pinctrl-single,pins = <
+ 0x090 MUX_M1 /* I2C4_SCL */
+ 0x094 MUX_M1 /* I2C4_SDA */
+ >;
+ };
+
+ pcie_perstn_pmx_func: pcie_perstn_pmx_func {
+ pinctrl-single,pins = <
+ 0x15c MUX_M1 /* PCIE_PERST_N */
+ >;
+ };
+
+ usbhub5734_pmx_func: usbhub5734_pmx_func {
+ pinctrl-single,pins = <
+ 0x11c MUX_M0 /* GPIO_073 */
+ 0x120 MUX_M0 /* GPIO_074 */
+ >;
+ };
+
+ uart0_pmx_func: uart0_pmx_func {
+ pinctrl-single,pins = <
+ 0x0cc MUX_M2 /* UART0_RXD */
+ 0x0d0 MUX_M2 /* UART0_TXD */
+ >;
+ };
+
+ uart1_pmx_func: uart1_pmx_func {
+ pinctrl-single,pins = <
+ 0x0b0 MUX_M2 /* UART1_CTS_N */
+ 0x0b4 MUX_M2 /* UART1_RTS_N */
+ 0x0a8 MUX_M2 /* UART1_RXD */
+ 0x0ac MUX_M2 /* UART1_TXD */
+ >;
+ };
+
+ uart2_pmx_func: uart2_pmx_func {
+ pinctrl-single,pins = <
+ 0x0bc MUX_M2 /* UART2_CTS_N */
+ 0x0c0 MUX_M2 /* UART2_RTS_N */
+ 0x0c8 MUX_M2 /* UART2_RXD */
+ 0x0c4 MUX_M2 /* UART2_TXD */
+ >;
+ };
+
+ uart3_pmx_func: uart3_pmx_func {
+ pinctrl-single,pins = <
+ 0x0dc MUX_M1 /* UART3_CTS_N */
+ 0x0e0 MUX_M1 /* UART3_RTS_N */
+ 0x0e4 MUX_M1 /* UART3_RXD */
+ 0x0e8 MUX_M1 /* UART3_TXD */
+ >;
+ };
+
+ uart4_pmx_func: uart4_pmx_func {
+ pinctrl-single,pins = <
+ 0x0ec MUX_M1 /* UART4_CTS_N */
+ 0x0f0 MUX_M1 /* UART4_RTS_N */
+ 0x0f4 MUX_M1 /* UART4_RXD */
+ 0x0f8 MUX_M1 /* UART4_TXD */
+ >;
+ };
+
+ uart5_pmx_func: uart5_pmx_func {
+ pinctrl-single,pins = <
+ 0x0c4 MUX_M3 /* UART5_CTS_N */
+ 0x0c8 MUX_M3 /* UART5_RTS_N */
+ 0x0bc MUX_M3 /* UART5_RXD */
+ 0x0c0 MUX_M3 /* UART5_TXD */
+ >;
+ };
+
+ uart6_pmx_func: uart6_pmx_func {
+ pinctrl-single,pins = <
+ 0x0cc MUX_M1 /* UART6_CTS_N */
+ 0x0d0 MUX_M1 /* UART6_RTS_N */
+ 0x0d4 MUX_M1 /* UART6_RXD */
+ 0x0d8 MUX_M1 /* UART6_TXD */
+ >;
+ };
+
+ cam0_rst_pmx_func: cam0_rst_pmx_func {
+ pinctrl-single,pins = <
+ 0x0c8 MUX_M0 /* CAM0_RST */
+ >;
+ };
+
+ cam1_rst_pmx_func: cam1_rst_pmx_func {
+ pinctrl-single,pins = <
+ 0x124 MUX_M0 /* CAM1_RST */
+ >;
+ };
+ };
+
+ /* [IOMG_MMC0_000, IOMG_MMC0_005] */
+ pmx1: pinmux@ff37e000 {
+ compatible = "pinctrl-single";
+ reg = <0x0 0xff37e000 0x0 0x18>;
+ #gpio-range-cells = <0x3>;
+ #pinctrl-cells = <1>;
+ pinctrl-single,register-width = <0x20>;
+ pinctrl-single,function-mask = <0x7>;
+ /* pin base, nr pins & gpio function */
+ pinctrl-single,gpio-range = <&range 0 6 0>;
+
+ sd_pmx_func: sd_pmx_func {
+ pinctrl-single,pins = <
+ 0x000 MUX_M1 /* SD_CLK */
+ 0x004 MUX_M1 /* SD_CMD */
+ 0x008 MUX_M1 /* SD_DATA0 */
+ 0x00c MUX_M1 /* SD_DATA1 */
+ 0x010 MUX_M1 /* SD_DATA2 */
+ 0x014 MUX_M1 /* SD_DATA3 */
+ >;
+ };
+ };
+
+ /* [IOMG_FIX_000, IOMG_FIX_011] */
+ pmx2: pinmux@ff3b6000 {
+ compatible = "pinctrl-single";
+ reg = <0x0 0xff3b6000 0x0 0x30>;
+ #pinctrl-cells = <1>;
+ #gpio-range-cells = <0x3>;
+ pinctrl-single,register-width = <0x20>;
+ pinctrl-single,function-mask = <0x7>;
+ /* pin base, nr pins & gpio function */
+ pinctrl-single,gpio-range = <&range 0 12 0>;
+
+ ufs_pmx_func: ufs_pmx_func {
+ pinctrl-single,pins = <
+ 0x000 MUX_M1 /* UFS_REF_CLK */
+ 0x004 MUX_M1 /* UFS_RST_N */
+ >;
+ };
+
+ spi3_pmx_func: spi3_pmx_func {
+ pinctrl-single,pins = <
+ 0x008 MUX_M1 /* SPI3_CLK */
+ 0x00c MUX_M1 /* SPI3_DI */
+ 0x010 MUX_M1 /* SPI3_DO */
+ 0x014 MUX_M1 /* SPI3_CS0_N */
+ >;
+ };
+ };
+
+ /* [IOMG_MMC1_000, IOMG_MMC1_005] */
+ pmx3: pinmux@ff3fd000 {
+ compatible = "pinctrl-single";
+ reg = <0x0 0xff3fd000 0x0 0x18>;
+ #pinctrl-cells = <1>;
+ #gpio-range-cells = <0x3>;
+ pinctrl-single,register-width = <0x20>;
+ pinctrl-single,function-mask = <0x7>;
+ /* pin base, nr pins & gpio function */
+ pinctrl-single,gpio-range = <&range 0 6 0>;
+
+ sdio_pmx_func: sdio_pmx_func {
+ pinctrl-single,pins = <
+ 0x000 MUX_M1 /* SDIO_CLK */
+ 0x004 MUX_M1 /* SDIO_CMD */
+ 0x008 MUX_M1 /* SDIO_DATA0 */
+ 0x00c MUX_M1 /* SDIO_DATA1 */
+ 0x010 MUX_M1 /* SDIO_DATA2 */
+ 0x014 MUX_M1 /* SDIO_DATA3 */
+ >;
+ };
+ };
+
+ /* [IOMG_AO_000, IOMG_AO_041] */
+ pmx4: pinmux@fff11000 {
+ compatible = "pinctrl-single";
+ reg = <0x0 0xfff11000 0x0 0xa8>;
+ #pinctrl-cells = <1>;
+ #gpio-range-cells = <0x3>;
+ pinctrl-single,register-width = <0x20>;
+ pinctrl-single,function-mask = <0x7>;
+ /* pin base in node, nr pins & gpio function */
+ pinctrl-single,gpio-range = <&range 0 42 0>;
+
+ i2s2_pmx_func: i2s2_pmx_func {
+ pinctrl-single,pins = <
+ 0x044 MUX_M1 /* I2S2_DI */
+ 0x048 MUX_M1 /* I2S2_DO */
+ 0x04c MUX_M1 /* I2S2_XCLK */
+ 0x050 MUX_M1 /* I2S2_XFS */
+ >;
+ };
+
+ slimbus_pmx_func: slimbus_pmx_func {
+ pinctrl-single,pins = <
+ 0x02c MUX_M1 /* SLIMBUS_CLK */
+ 0x030 MUX_M1 /* SLIMBUS_DATA */
+ >;
+ };
+
+ i2c0_pmx_func: i2c0_pmx_func {
+ pinctrl-single,pins = <
+ 0x014 MUX_M1 /* I2C0_SCL */
+ 0x018 MUX_M1 /* I2C0_SDA */
+ >;
+ };
+
+ i2c1_pmx_func: i2c1_pmx_func {
+ pinctrl-single,pins = <
+ 0x01c MUX_M1 /* I2C1_SCL */
+ 0x020 MUX_M1 /* I2C1_SDA */
+ >;
+ };
+
+ i2c7_pmx_func: i2c7_pmx_func {
+ pinctrl-single,pins = <
+ 0x024 MUX_M3 /* I2C7_SCL */
+ 0x028 MUX_M3 /* I2C7_SDA */
+ >;
+ };
+
+ pcie_pmx_func: pcie_pmx_func {
+ pinctrl-single,pins = <
+ 0x084 MUX_M1 /* PCIE_CLKREQ_N */
+ 0x088 MUX_M1 /* PCIE_WAKE_N */
+ >;
+ };
+
+ spi2_pmx_func: spi2_pmx_func {
+ pinctrl-single,pins = <
+ 0x08c MUX_M1 /* SPI2_CLK */
+ 0x090 MUX_M1 /* SPI2_DI */
+ 0x094 MUX_M1 /* SPI2_DO */
+ 0x098 MUX_M1 /* SPI2_CS0_N */
+ >;
+ };
+
+ i2s0_pmx_func: i2s0_pmx_func {
+ pinctrl-single,pins = <
+ 0x034 MUX_M1 /* I2S0_DI */
+ 0x038 MUX_M1 /* I2S0_DO */
+ 0x03c MUX_M1 /* I2S0_XCLK */
+ 0x040 MUX_M1 /* I2S0_XFS */
+ >;
+ };
+ };
+
+ pmx5: pinmux@e896c800 {
+ compatible = "pinconf-single";
+ reg = <0x0 0xe896c800 0x0 0x200>;
+ #pinctrl-cells = <1>;
+ pinctrl-single,register-width = <0x20>;
+
+ pmu_cfg_func: pmu_cfg_func {
+ pinctrl-single,pins = <
+ 0x010 0x0 /* PMU1_SSI */
+ 0x014 0x0 /* PMU2_SSI */
+ 0x018 0x0 /* PMU_CLKOUT */
+ 0x10c 0x0 /* PMU_HKADC_SSI */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_06MA DRIVE6_MASK
+ >;
+ };
+
+ i2c3_cfg_func: i2c3_cfg_func {
+ pinctrl-single,pins = <
+ 0x038 0x0 /* I2C3_SCL */
+ 0x03c 0x0 /* I2C3_SDA */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ csi0_pwd_n_cfg_func: csi0_pwd_n_cfg_func {
+ pinctrl-single,pins = <
+ 0x050 0x0 /* CSI0_PWD_N */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ csi1_pwd_n_cfg_func: csi1_pwd_n_cfg_func {
+ pinctrl-single,pins = <
+ 0x058 0x0 /* CSI1_PWD_N */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ isp0_cfg_func: isp0_cfg_func {
+ pinctrl-single,pins = <
+ 0x064 0x0 /* ISP_CLK0 */
+ 0x070 0x0 /* ISP_SCL0 */
+ 0x074 0x0 /* ISP_SDA0 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK>;
+ };
+
+ isp1_cfg_func: isp1_cfg_func {
+ pinctrl-single,pins = <
+ 0x068 0x0 /* ISP_CLK1 */
+ 0x078 0x0 /* ISP_SCL1 */
+ 0x07c 0x0 /* ISP_SDA1 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ pwr_key_cfg_func: pwr_key_cfg_func {
+ pinctrl-single,pins = <
+ 0x08c 0x0 /* GPIO_034 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ uart1_cfg_func: uart1_cfg_func {
+ pinctrl-single,pins = <
+ 0x0b4 0x0 /* UART1_RXD */
+ 0x0b8 0x0 /* UART1_TXD */
+ 0x0bc 0x0 /* UART1_CTS_N */
+ 0x0c0 0x0 /* UART1_RTS_N */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ uart2_cfg_func: uart2_cfg_func {
+ pinctrl-single,pins = <
+ 0x0c8 0x0 /* UART2_CTS_N */
+ 0x0cc 0x0 /* UART2_RTS_N */
+ 0x0d0 0x0 /* UART2_TXD */
+ 0x0d4 0x0 /* UART2_RXD */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ uart5_cfg_func: uart5_cfg_func {
+ pinctrl-single,pins = <
+ 0x0c8 0x0 /* UART5_RXD */
+ 0x0cc 0x0 /* UART5_TXD */
+ 0x0d0 0x0 /* UART5_CTS_N */
+ 0x0d4 0x0 /* UART5_RTS_N */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ cam0_rst_cfg_func: cam0_rst_cfg_func {
+ pinctrl-single,pins = <
+ 0x0d4 0x0 /* CAM0_RST */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ uart0_cfg_func: uart0_cfg_func {
+ pinctrl-single,pins = <
+ 0x0d8 0x0 /* UART0_RXD */
+ 0x0dc 0x0 /* UART0_TXD */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ uart6_cfg_func: uart6_cfg_func {
+ pinctrl-single,pins = <
+ 0x0d8 0x0 /* UART6_CTS_N */
+ 0x0dc 0x0 /* UART6_RTS_N */
+ 0x0e0 0x0 /* UART6_RXD */
+ 0x0e4 0x0 /* UART6_TXD */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ uart3_cfg_func: uart3_cfg_func {
+ pinctrl-single,pins = <
+ 0x0e8 0x0 /* UART3_CTS_N */
+ 0x0ec 0x0 /* UART3_RTS_N */
+ 0x0f0 0x0 /* UART3_RXD */
+ 0x0f4 0x0 /* UART3_TXD */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ uart4_cfg_func: uart4_cfg_func {
+ pinctrl-single,pins = <
+ 0x0f8 0x0 /* UART4_CTS_N */
+ 0x0fc 0x0 /* UART4_RTS_N */
+ 0x100 0x0 /* UART4_RXD */
+ 0x104 0x0 /* UART4_TXD */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ cam1_rst_cfg_func: cam1_rst_cfg_func {
+ pinctrl-single,pins = <
+ 0x130 0x0 /* CAM1_RST */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+ };
+
+ pmx6: pinmux@ff3b6800 {
+ compatible = "pinconf-single";
+ reg = <0x0 0xff3b6800 0x0 0x18>;
+ #pinctrl-cells = <1>;
+ pinctrl-single,register-width = <0x20>;
+
+ ufs_cfg_func: ufs_cfg_func {
+ pinctrl-single,pins = <
+ 0x000 0x0 /* UFS_REF_CLK */
+ 0x004 0x0 /* UFS_RST_N */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_08MA DRIVE6_MASK
+ >;
+ };
+
+ spi3_cfg_func: spi3_cfg_func {
+ pinctrl-single,pins = <
+ 0x008 0x0 /* SPI3_CLK */
+ 0x0 /* SPI3_DI */
+ 0x010 0x0 /* SPI3_DO */
+ 0x014 0x0 /* SPI3_CS0_N */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+ };
+
+ pmx7: pinmux@ff3fd800 {
+ compatible = "pinconf-single";
+ reg = <0x0 0xff3fd800 0x0 0x18>;
+ #pinctrl-cells = <1>;
+ pinctrl-single,register-width = <0x20>;
+
+ sdio_clk_cfg_func: sdio_clk_cfg_func {
+ pinctrl-single,pins = <
+ 0x000 0x0 /* SDIO_CLK */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE6_32MA DRIVE6_MASK
+ >;
+ };
+
+ sdio_cfg_func: sdio_cfg_func {
+ pinctrl-single,pins = <
+ 0x004 0x0 /* SDIO_CMD */
+ 0x008 0x0 /* SDIO_DATA0 */
+ 0x00c 0x0 /* SDIO_DATA1 */
+ 0x010 0x0 /* SDIO_DATA2 */
+ 0x014 0x0 /* SDIO_DATA3 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE6_19MA DRIVE6_MASK
+ >;
+ };
+ };
+
+ pmx8: pinmux@ff37e800 {
+ compatible = "pinconf-single";
+ reg = <0x0 0xff37e800 0x0 0x18>;
+ #pinctrl-cells = <1>;
+ pinctrl-single,register-width = <0x20>;
+
+ sd_clk_cfg_func: sd_clk_cfg_func {
+ pinctrl-single,pins = <
+ 0x000 0x0 /* SD_CLK */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE6_32MA
+ DRIVE6_MASK
+ >;
+ };
+
+ sd_cfg_func: sd_cfg_func {
+ pinctrl-single,pins = <
+ 0x004 0x0 /* SD_CMD */
+ 0x008 0x0 /* SD_DATA0 */
+ 0x00c 0x0 /* SD_DATA1 */
+ 0x010 0x0 /* SD_DATA2 */
+ 0x014 0x0 /* SD_DATA3 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE6_19MA
+ DRIVE6_MASK
+ >;
+ };
+ };
+
+ pmx9: pinmux@fff11800 {
+ compatible = "pinconf-single";
+ reg = <0x0 0xfff11800 0x0 0xbc>;
+ #pinctrl-cells = <1>;
+ pinctrl-single,register-width = <0x20>;
+
+ i2c0_cfg_func: i2c0_cfg_func {
+ pinctrl-single,pins = <
+ 0x01c 0x0 /* I2C0_SCL */
+ 0x020 0x0 /* I2C0_SDA */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ i2c1_cfg_func: i2c1_cfg_func {
+ pinctrl-single,pins = <
+ 0x024 0x0 /* I2C1_SCL */
+ 0x028 0x0 /* I2C1_SDA */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ i2c7_cfg_func: i2c7_cfg_func {
+ pinctrl-single,pins = <
+ 0x02c 0x0 /* I2C7_SCL */
+ 0x030 0x0 /* I2C7_SDA */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ slimbus_cfg_func: slimbus_cfg_func {
+ pinctrl-single,pins = <
+ 0x034 0x0 /* SLIMBUS_CLK */
+ 0x038 0x0 /* SLIMBUS_DATA */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ i2s0_cfg_func: i2s0_cfg_func {
+ pinctrl-single,pins = <
+ 0x040 0x0 /* I2S0_DI */
+ 0x044 0x0 /* I2S0_DO */
+ 0x048 0x0 /* I2S0_XCLK */
+ 0x04c 0x0 /* I2S0_XFS */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ i2s2_cfg_func: i2s2_cfg_func {
+ pinctrl-single,pins = <
+ 0x050 0x0 /* I2S2_DI */
+ 0x054 0x0 /* I2S2_DO */
+ 0x058 0x0 /* I2S2_XCLK */
+ 0x05c 0x0 /* I2S2_XFS */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ pcie_cfg_func: pcie_cfg_func {
+ pinctrl-single,pins = <
+ 0x094 0x0 /* PCIE_CLKREQ_N */
+ 0x098 0x0 /* PCIE_WAKE_N */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ spi2_cfg_func: spi2_cfg_func {
+ pinctrl-single,pins = <
+ 0x09c 0x0 /* SPI2_CLK */
+ 0x0a0 0x0 /* SPI2_DI */
+ 0x0a4 0x0 /* SPI2_DO */
+ 0x0a8 0x0 /* SPI2_CS0_N */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ usb_cfg_func: usb_cfg_func {
+ pinctrl-single,pins = <
+ 0x0ac 0x0 /* GPIO_219 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hisi_3660_ipc.dtsi b/arch/arm64/boot/dts/hisilicon/hisi_3660_ipc.dtsi
new file mode 100644
index 000000000000..c5e2c5516339
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hisi_3660_ipc.dtsi
@@ -0,0 +1,347 @@
+/*
+ * Hisilicon Ltd. HI3660 SoC
+ *
+ * Copyright (C) 2012-2014 Hisilicon Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publishhed by the Free Software Foundation.
+ *
+ * mailbox-allocation-table
+ * ipc-channel SOURCE DESTINATION remark
+ * mailbox-0 LPM3 GIC_1 ADC
+ * mailbox-1 DEFAULT GIC_1 RDR
+ * mailbox-2 HIFI GIC_1 HIFI
+ * mailbox-3 DEFAULT GIC_1 RESERVED
+ * mailbox-4 IOM3 GIC_1 IOM3
+ * mailbox-5 IVP GIC_2 IVP
+ * mailbox-6 DEFAULT GIC_2 RESERVED
+ * mailbox-7 DEFAULT GIC_2 RESERVED
+ * mailbox-8 ISP GIC_2 ISP
+ * mailbox-9 ISP GIC_2 ISP
+ * mailbox-10 GIC_1 IOM3 IOM3
+ * mailbox-11 GIC_1 IOM3 RESERVED
+ * mailbox-12 LPM3 IOM3 LPM3
+ * mailbox-13 GIC_1 LPM3 CLOCK
+ * mailbox-14 GIC_2 LPM3 IP_REGULATOR
+ * mailbox-15 IOM3 LPM3 IOM3
+ * mailbox-16 GIC_1 LPM3 ADC/MODEM PANIC
+ * mailbox-17 GIC_2 LPM3 RDR
+ * mailbox-18 GIC_2 HIFI HIFI
+ * mailbox-19 GIC_1 MODEM-A9 RESERVED
+ * mailbox-20 GIC_1 MODEM-A9 RESERVED
+ * mailbox-21 GIC_1 MODEM-A9 RESERVED
+ * mailbox-22 GIC_1 MODEM-BBE16 RESERVED
+ * mailbox-23 GIC_1 ISP ISP
+ * mailbox-24 GIC_1 ISP ISP
+ * mailbox-25 GIC_1 IVP32 IVP
+ * mailbox-26 GIC_1 IVP32 IVP
+ * mailbox-27 GIC_1 LPM3 HISEE/DDR
+ * mailbox-28 GIC_1 LPM3 TSENSOR
+ * mailbox-29 GIC_1 HIFI BSP_RESET_CORE_NOTIFY
+ * mailbox-30 GIC_COMM COMM RESERVED
+ */
+
+/{
+ hisi_ipc: ipc@e896b000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "hisilicon,HiIPCV230";
+ reg = <0x0 0xe896b000 0x0 0x1000>;
+ interrupts = <0 192 4>, <0 193 4>;
+ clocks = <&pclk>;
+ clock-names = "apb_pclk";
+ unlock_key = <0x1ACCE551>;
+ capability = <8>;
+ hardware_board_type = <0>; /* 1:udp & fpga 0:others */
+ status = "ok";
+
+ /*
+ * "func"
+ * <mbox_type, is_src_mbox, is_des_mbox>;
+ *
+ * "src_bit" & "des_bit"
+ * <0: GIC_1>;
+ * <1: GIC_2>;
+ * <2: IOM7>;
+ * <3: LPM3>;
+ * <4: HIFI>;
+ * <5: MODEM_A9>;
+ * <6: MODEM_BBE16>;
+ * <7: IVP>;
+ * <8: ISP>;
+ * <9: COMM>;
+ *
+ * "index"
+ * software encoded the index of different ipc-mailbox,
+ * if add new ipc, please add 100 in new ipc-dtsi,
+ * example:
+ * Kernel-PERI-IPC: 0~99; ISP-IPC: 100~199; NEW-IPC: 200~299
+ *
+ * "used"
+ * <1: using 0: unused>
+ *
+ * "timeout"
+ * <300> means TIMEOUT time is 300ms,
+ * timeout must be multiple of 5ms,such as 305;
+ *
+ * "fifo_size"
+ * <8> means IPC async send tx_thread_fifo is 8;
+ *
+ * "sched_priority"
+ * <1: highest priority 20: default priority>
+ *
+ * "sched_policy"
+ * <1: SCHED_FIFO 2: SCHED_RR 2:default policy>
+ *
+ * don't forget to increase the number of mailbox
+ * when add or decrease a mailbox channel
+ */
+ mailboxes = <25>;
+ mailbox-0 {
+ compatible = "HiIPCV230,rx-mailbox-fast";
+ func = <1 0 1>;
+ interrupts = <0 197 4>;
+ src_bit = <3>;
+ des_bit = <0>;
+ index = <0>;
+ used = <1>;
+ timeout = <300>;
+ };
+
+ mailbox-1 {
+ compatible = "HiIPCV230,rx-mailbox-fast";
+ func = <1 0 1>;
+ interrupts = <0 198 4>;
+ src_bit = <3>;
+ des_bit = <0>;
+ index = <1>;
+ used = <1>;
+ timeout = <300>;
+ };
+
+ mailbox-2 {
+ compatible = "HiIPCV230,rx-mailbox-fast";
+ func = <1 0 1>;
+ interrupts = <0 199 4>;
+ src_bit = <4>;
+ des_bit = <0>;
+ index = <2>;
+ used = <1>;
+ timeout = <300>;
+ };
+
+ mailbox-3 {
+ compatible = "HiIPCV230,rx-mailbox-fast";
+ func = <1 0 1>;
+ interrupts = <0 200 4>;
+ src_bit = <3>;
+ des_bit = <0>;
+ index = <3>;
+ used = <0>;
+ timeout = <300>;
+ };
+
+ mailbox-4 {
+ compatible = "HiIPCV230,rx-mailbox-fast";
+ func = <1 0 1>;
+ interrupts = <0 201 4>;
+ src_bit = <2>;
+ des_bit = <0>;
+ index = <4>;
+ used = <1>;
+ timeout = <300>;
+ };
+
+ mailbox-5 {
+ compatible = "HiIPCV230,rx-mailbox-fast";
+ func = <1 0 1>;
+ interrupts = <0 202 4>;
+ src_bit = <7>;
+ des_bit = <1>;
+ index = <5>;
+ used = <1>;
+ timeout = <300>;
+ };
+
+ mailbox-6 {
+ compatible = "HiIPCV230,rx-mailbox-fast";
+ func = <1 0 1>;
+ interrupts = <0 203 4>;
+ src_bit = <7>;
+ des_bit = <1>;
+ index = <6>;
+ used = <0>;
+ timeout = <300>;
+ };
+ mailbox-7 {
+ compatible = "HiIPCV230,rx-mailbox-fast";
+ func = <1 0 1>;
+ interrupts = <0 204 4>;
+ src_bit = <3>;
+ des_bit = <1>;
+ index = <7>;
+ used = <0>;
+ timeout = <300>;
+ };
+ mailbox-8 {
+ compatible = "HiIPCV230,rx-mailbox-fast";
+ func = <1 0 1>;
+ interrupts = <0 205 4>;
+ src_bit = <8>;
+ des_bit = <1>;
+ index = <8>;
+ used = <1>;
+ timeout = <300>;
+ };
+
+ mailbox-9 {
+ compatible = "HiIPCV230,rx-mailbox-fast";
+ func = <1 0 1>;
+ interrupts = <0 206 4>;
+ src_bit = <8>;
+ des_bit = <1>;
+ index = <9>;
+ used = <0>;
+ timeout = <300>;
+ };
+
+ mailbox-11 {
+ compatible = "HiIPCV230,tx-mailbox-fast";
+ func = <1 1 0>;
+ src_bit = <0>;
+ des_bit = <2>;
+ index = <11>;
+ used = <0>;
+ timeout = <300>;
+ };
+
+ mailbox-12 {
+ compatible = "HiIPCV230,tx-mailbox-fast";
+ func = <1 1 0>;
+ src_bit = <0>;
+ des_bit = <2>;
+ index = <12>;
+ used = <0>;
+ timeout = <300>;
+ };
+
+ mailbox-14 {
+ compatible = "HiIPCV230,tx-mailbox-fast";
+ func = <1 1 0>;
+ src_bit = <1>;
+ des_bit = <3>;
+ index = <14>;
+ used = <1>;
+ timeout = <300>;
+ };
+
+ mailbox-15 {
+ compatible = "HiIPCV230,tx-mailbox-fast";
+ func = <1 1 0>;
+ src_bit = <0>;
+ des_bit = <3>;
+ index = <15>;
+ used = <0>;
+ timeout = <300>;
+ };
+
+ mailbox-17 {
+ compatible = "HiIPCV230,tx-mailbox-fast";
+ func = <1 1 0>;
+ src_bit = <1>;
+ des_bit = <3>;
+ index = <17>;
+ used = <1>;
+ timeout = <300>;
+ };
+
+ mailbox-18 {
+ compatible = "HiIPCV230,tx-mailbox-fast";
+ func = <1 1 0>;
+ src_bit = <1>;
+ des_bit = <4>;
+ index = <18>;
+ used = <1>;
+ timeout = <300>;
+ };
+ mailbox-19 {
+ compatible = "HiIPCV230,tx-mailbox-fast";
+ func = <1 1 0>;
+ src_bit = <0>;
+ des_bit = <5>;
+ index = <19>;
+ used = <0>;
+ timeout = <300>;
+ };
+
+ mailbox-20 {
+ compatible = "HiIPCV230,tx-mailbox-fast";
+ func = <1 1 0>;
+ src_bit = <0>;
+ des_bit = <5>;
+ index = <20>;
+ used = <0>;
+ timeout = <300>;
+ };
+
+ mailbox-21 {
+ compatible = "HiIPCV230,tx-mailbox-fast";
+ func = <1 1 0>;
+ src_bit = <0>;
+ des_bit = <5>;
+ index = <21>;
+ used = <0>;
+ timeout = <300>;
+ };
+
+ mailbox-22 {
+ compatible = "HiIPCV230,tx-mailbox-fast";
+ func = <1 1 0>;
+ src_bit = <0>;
+ des_bit = <6>;
+ index = <22>;
+ used = <0>;
+ timeout = <300>;
+ };
+ mailbox-23 {
+ compatible = "HiIPCV230,tx-mailbox-fast";
+ func = <1 1 0>;
+ src_bit = <0>;
+ des_bit = <8>;
+ index = <23>;
+ used = <1>;
+ timeout = <300>;
+ };
+
+ mailbox-24 {
+ compatible = "HiIPCV230,tx-mailbox-fast";
+ func = <1 1 0>;
+ src_bit = <0>;
+ des_bit = <8>;
+ index = <24>;
+ used = <0>;
+ timeout = <300>;
+ };
+
+ mailbox-26 {
+ compatible = "HiIPCV230,tx-mailbox-fast";
+ func = <1 1 0>;
+ src_bit = <0>;
+ des_bit = <7>;
+ index = <26>;
+ used = <0>;
+ timeout = <300>;
+ };
+
+ mailbox-30 {
+ compatible = "HiIPCV230,mailbox-comm";
+ func = <1 1 0>;
+ src_bit = <1>;
+ des_bit = <3>;
+ index = <30>;
+ used = <0>;
+ timeout = <300>;
+ };
+ };
+};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index dab2cb0c1f1c..e8ca5d85f41d 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -262,6 +262,7 @@ CONFIG_GPIO_PCA953X_IRQ=y
CONFIG_GPIO_MAX77620=y
CONFIG_POWER_RESET_MSM=y
CONFIG_BATTERY_BQ27XXX=y
+CONFIG_POWER_RESET_HISI=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_SENSORS_LM90=m
diff --git a/arch/arm64/configs/hikey960_defconfig b/arch/arm64/configs/hikey960_defconfig
new file mode 100644
index 000000000000..8613e46f3ad7
--- /dev/null
+++ b/arch/arm64/configs/hikey960_defconfig
@@ -0,0 +1,545 @@
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SCHED_WALT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=19
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_PCI=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_SCHED_MC=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_CMDLINE="console=ttyAMA0"
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_ARM_HISI_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_INET_ESP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETLABEL=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_IP6_NF_NAT=y
+CONFIG_IP6_NF_TARGET_MASQUERADE=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_BT=y
+CONFIG_BT_LEDS=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIBTUSB=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_REGULATOR=y
+CONFIG_RFKILL_GPIO=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=64
+CONFIG_CONNECTOR=y
+CONFIG_OF_OVERLAY_MGR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_VIRTIO_BLK=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_HUB_USB5734=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_HI3660=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_XGENE=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=y
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_VIRTIO_NET=y
+CONFIG_NET_XGENE=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_USB_PEGASUS=y
+CONFIG_USB_RTL8150=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_DM9601=y
+CONFIG_USB_NET_SR9800=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_USB_NET_PLUSB=y
+CONFIG_USB_NET_MCS7830=y
+CONFIG_WL18XX=y
+CONFIG_WLCORE_SDIO=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=16
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_XGENE=y
+CONFIG_POWER_RESET_HISI=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_HISI_THERMAL=y
+CONFIG_HI3660_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_ARM_SP805_WATCHDOG=y
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_MALI_DEVFREQ=y
+CONFIG_DRM=y
+CONFIG_DRM_I2C_ADV7511=y
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_DRM_KIRIN_960=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_SIMPLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_DEBUG=y
+CONFIG_SND_DEBUG_VERBOSE=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_I2S_HISI_I2S=y
+CONFIG_SND_SIMPLE_CARD=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_ISP1760=y
+CONFIG_TCPC_CLASS=y
+CONFIG_USB_POWER_DELIVERY=y
+CONFIG_TCPC_RT1711H=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_SIMPLE=y
+CONFIG_USB_SERIAL_FTDI_SIO=y
+CONFIG_USB_SERIAL_PL2303=y
+CONFIG_USB_SERIAL_QUALCOMM=y
+CONFIG_USB_SERIAL_OPTION=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_GADGETFS=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=64
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_K3=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_REGULATOR=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_DMADEVICES=y
+CONFIG_HISI_ASP_DMA=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_HISI=y
+CONFIG_FIQ_DEBUGGER_NO_SLEEP=y
+CONFIG_FIQ_DEBUGGER_CONSOLE=y
+CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE=y
+CONFIG_FIQ_DEBUGGER_UART_OVERLAY=y
+CONFIG_NANOHUB=y
+CONFIG_NANOHUB_SPI=y
+CONFIG_HISI_FIQ_DEBUGGER=y
+CONFIG_STUB_CLK_HI6220=y
+CONFIG_STUB_CLK_HI3660=y
+CONFIG_MAILBOX=y
+CONFIG_HI6220_MBOX=y
+CONFIG_HI3660_MBOX=y
+CONFIG_HISI_IOMMU_LPAE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_HISI_DDR_DEVFREQ=y
+CONFIG_EXTCON=y
+CONFIG_MALI_MIDGARD=y
+CONFIG_MALI_EXPERT=y
+CONFIG_MALI_PLATFORM_FAKE=y
+CONFIG_MALI_PLATFORM_HISILICON=y
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_HISILICON_PLATFORM=y
+CONFIG_HISILICON_PLATFORM_MAILBOX=y
+CONFIG_HISI_MAILBOX=y
+CONFIG_HISI_RPROC=y
+CONFIG_HIFI_DSP_ONE_TRACK=y
+CONFIG_HIFI_MAILBOX=y
+CONFIG_HIFI_IPC=y
+CONFIG_HIFI_IPC_3660=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_EFI_VARS=y
+CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y
+CONFIG_ACPI=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_BTRFS_FS=y
+CONFIG_F2FS_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_PMSG=y
+CONFIG_PSTORE_FTRACE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_NETWORK_XFRM=y
+CONFIG_LSM_MMAP_MIN_ADDR=4096
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_CRC_T10DIF=y
diff --git a/arch/arm64/configs/hikey_defconfig b/arch/arm64/configs/hikey_defconfig
new file mode 100644
index 000000000000..bab952bd0a2e
--- /dev/null
+++ b/arch/arm64/configs/hikey_defconfig
@@ -0,0 +1,575 @@
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SCHED_WALT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_PCI=y
+CONFIG_PCI_XGENE=y
+CONFIG_SCHED_MC=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_CMDLINE="console=ttyAMA0"
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+CONFIG_IMG_DTB=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES="hisilicon/hi6220-hikey"
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_INET_ESP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETLABEL=y
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_IP6_NF_NAT=y
+CONFIG_IP6_NF_TARGET_MASQUERADE=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_BT=y
+CONFIG_BT_LEDS=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIBTUSB=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_REGULATOR=y
+CONFIG_RFKILL_GPIO=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=64
+CONFIG_CONNECTOR=y
+CONFIG_OF_OVERLAY_MGR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_VIRTIO_BLK=y
+CONFIG_UID_SYS_STATS=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_XGENE=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=y
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_VIRTIO_NET=y
+CONFIG_NET_XGENE=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_PEGASUS=y
+CONFIG_USB_RTL8150=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_DM9601=y
+CONFIG_USB_NET_SR9800=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_USB_NET_PLUSB=y
+CONFIG_USB_NET_MCS7830=y
+CONFIG_WL18XX=y
+CONFIG_WLCORE_SDIO=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=16
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_XGENE=y
+CONFIG_POWER_RESET_HISI=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_HISI_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_ARM_SP805_WATCHDOG=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_MEDIA_TUNER_SIMPLE=y
+CONFIG_MEDIA_TUNER_TDA8290=y
+CONFIG_MEDIA_TUNER_TEA5761=y
+CONFIG_MEDIA_TUNER_TEA5767=y
+CONFIG_MEDIA_TUNER_MSI001=y
+CONFIG_MEDIA_TUNER_MT20XX=y
+CONFIG_MEDIA_TUNER_MT2060=y
+CONFIG_MEDIA_TUNER_MT2063=y
+CONFIG_MEDIA_TUNER_MT2266=y
+CONFIG_MEDIA_TUNER_MT2131=y
+CONFIG_MEDIA_TUNER_QT1010=y
+CONFIG_MEDIA_TUNER_XC2028=y
+CONFIG_MEDIA_TUNER_XC5000=y
+CONFIG_MEDIA_TUNER_XC4000=y
+CONFIG_MEDIA_TUNER_MXL5005S=y
+CONFIG_MEDIA_TUNER_MXL5007T=y
+CONFIG_MEDIA_TUNER_MC44S803=y
+CONFIG_MEDIA_TUNER_MAX2165=y
+CONFIG_MEDIA_TUNER_TDA18218=y
+CONFIG_MEDIA_TUNER_FC0011=y
+CONFIG_MEDIA_TUNER_FC0012=y
+CONFIG_MEDIA_TUNER_FC0013=y
+CONFIG_MEDIA_TUNER_TDA18212=y
+CONFIG_MEDIA_TUNER_E4000=y
+CONFIG_MEDIA_TUNER_FC2580=y
+CONFIG_MEDIA_TUNER_M88RS6000T=y
+CONFIG_MEDIA_TUNER_TUA9001=y
+CONFIG_MEDIA_TUNER_SI2157=y
+CONFIG_MEDIA_TUNER_IT913X=y
+CONFIG_MEDIA_TUNER_R820T=y
+CONFIG_MEDIA_TUNER_MXL301RF=y
+CONFIG_MEDIA_TUNER_QM1D1C0042=y
+CONFIG_DVB_AU8522_V4L=y
+CONFIG_DVB_TUNER_DIB0070=y
+CONFIG_DVB_TUNER_DIB0090=y
+CONFIG_MALI400=y
+CONFIG_MALI450=y
+CONFIG_MALI400_DEBUG=y
+# CONFIG_MALI400_PROFILING is not set
+# CONFIG_MALI_DVFS is not set
+CONFIG_MALI_SHARED_INTERRUPTS=y
+CONFIG_MALI_DT=y
+CONFIG_MALI_PLAT_SPECIFIC_DT=y
+CONFIG_DRM=y
+CONFIG_DRM_CMA_FBDEV_BUFFER_NUM=2
+CONFIG_DRM_I2C_ADV7511=y
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_DRM_HISI_KIRIN=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_SIMPLE=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_DEBUG=y
+CONFIG_SND_DEBUG_VERBOSE=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_I2S_HI6210_I2S=y
+CONFIG_SND_SIMPLE_CARD=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_SIMPLE=y
+CONFIG_USB_SERIAL_FTDI_SIO=y
+CONFIG_USB_SERIAL_PL2303=y
+CONFIG_USB_SERIAL_QUALCOMM=y
+CONFIG_USB_SERIAL_OPTION=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_GADGETFS=y
+CONFIG_MMC=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_BLOCK_MINORS=64
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_K3=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_REGULATOR=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_DMADEVICES=y
+CONFIG_K3_DMA=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_DUMMY=y
+CONFIG_FIQ_DEBUGGER_NO_SLEEP=y
+CONFIG_FIQ_DEBUGGER_CONSOLE=y
+CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE=y
+CONFIG_FIQ_DEBUGGER_UART_OVERLAY=y
+CONFIG_NANOHUB=y
+CONFIG_NANOHUB_SPI=y
+CONFIG_HISI_FIQ_DEBUGGER=y
+CONFIG_STUB_CLK_HI6220=y
+CONFIG_MAILBOX=y
+CONFIG_HI6220_MBOX=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXTCON=y
+CONFIG_EXTCON_GPIO=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_EFI_VARS=y
+CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_BTRFS_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_PMSG=y
+CONFIG_PSTORE_FTRACE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=5
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_NETWORK_XFRM=y
+CONFIG_LSM_MMAP_MIN_ADDR=4096
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRC_T10DIF=y
diff --git a/arch/arm64/include/asm/fiq_glue.h b/arch/arm64/include/asm/fiq_glue.h
new file mode 100644
index 000000000000..3a3b43b0ed8a
--- /dev/null
+++ b/arch/arm64/include/asm/fiq_glue.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_FIQ_GLUE_H
+#define __ASM_FIQ_GLUE_H
+
+struct fiq_glue_handler {
+ struct fiq_glue_handler *next;
+ void (*fiq)(struct fiq_glue_handler *h, const struct pt_regs *regs,
+ void *svc_sp);
+ void (*resume)(struct fiq_glue_handler *h);
+};
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler);
+
+#endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index bfbd9e90fa04..21c54aa4e528 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -623,8 +623,10 @@ void update_cpu_features(int cpu,
* Mismatched CPU features are a recipe for disaster. Don't even
* pretend to support them.
*/
- WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC,
- "Unsupported CPU feature variation.\n");
+ if (taint) {
+ pr_warn_once("Unsupported CPU feature variation detected.\n");
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ }
}
u64 read_system_reg(u32 id)
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 1bec41b5fda3..79a604e867d6 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -138,6 +138,10 @@ ENTRY(_cpu_resume)
bl kasan_unpoison_task_stack_below
#endif
+#ifdef CONFIG_TRUSTY_FIQ
+ bl trusty_fiq_cpu_resume
+#endif
+
ldp x19, x20, [x29, #16]
ldp x21, x22, [x29, #32]
ldp x23, x24, [x29, #48]
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a70f7d3361c4..a843934c95e4 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -37,6 +37,7 @@
#include <linux/completion.h>
#include <linux/of.h>
#include <linux/irq_work.h>
+#include <linux/irqdomain.h>
#include <asm/alternative.h>
#include <asm/atomic.h>
@@ -73,9 +74,13 @@ enum ipi_msg_type {
IPI_CPU_STOP,
IPI_TIMER,
IPI_IRQ_WORK,
- IPI_WAKEUP
+ IPI_WAKEUP,
+ IPI_CUSTOM_FIRST,
+ IPI_CUSTOM_LAST = 15,
};
+struct irq_domain *ipi_custom_irq_domain;
+
#ifdef CONFIG_ARM64_VHE
/* Whether the boot CPU is running in HYP mode or not*/
@@ -874,7 +879,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
#endif
default:
- pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
+ if (ipi_custom_irq_domain &&
+ ipinr >= IPI_CUSTOM_FIRST && ipinr <= IPI_CUSTOM_LAST)
+ handle_domain_irq(ipi_custom_irq_domain, ipinr, regs);
+ else
+ pr_crit("CPU%u: Unknown IPI message 0x%x\n",
+ cpu, ipinr);
break;
}
@@ -883,6 +893,85 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
set_irq_regs(old_regs);
}
+static void custom_ipi_enable(struct irq_data *data)
+{
+ /*
+ * Always trigger a new ipi on enable. This only works for clients
+ * that then clear the ipi before unmasking interrupts.
+ */
+ smp_cross_call(cpumask_of(smp_processor_id()), data->hwirq);
+}
+
+static void custom_ipi_disable(struct irq_data *data)
+{
+}
+
+static struct irq_chip custom_ipi_chip = {
+ .name = "CustomIPI",
+ .irq_enable = custom_ipi_enable,
+ .irq_disable = custom_ipi_disable,
+};
+
+static void handle_custom_ipi_irq(struct irq_desc *desc)
+{
+ if (!desc->action) {
+ pr_crit("CPU%u: Unknown IPI message 0x%x, no custom handler\n",
+ smp_processor_id(), irq_desc_get_irq(desc));
+ return;
+ }
+
+ if (!cpumask_test_cpu(smp_processor_id(), desc->percpu_enabled))
+ return; /* IPIs may not be maskable in hardware */
+
+ handle_percpu_devid_irq(desc);
+}
+
+static int custom_ipi_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ if (hw < IPI_CUSTOM_FIRST || hw > IPI_CUSTOM_LAST) {
+ pr_err("hwirq-%u is not in supported range for CustomIPI IRQ domain\n",
+ (uint)hw);
+ return -EINVAL;
+ }
+
+ irq_set_percpu_devid(irq);
+ irq_set_chip_and_handler(irq, &custom_ipi_chip, handle_custom_ipi_irq);
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ return 0;
+}
+
+static const struct irq_domain_ops custom_ipi_domain_ops = {
+ .map = custom_ipi_domain_map,
+};
+
+static int __init smp_custom_ipi_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "android,CustomIPI");
+ if (np) {
+ /*
+ * Register linear irq doman to cover the whole IPI range
+ * even though we are only using part of it. Proper IRQ
+ * range check will be done by an implementation of mapping
+ * routine.
+ */
+ pr_info("Initilizing CustomIPI irq domain\n");
+ ipi_custom_irq_domain =
+ irq_domain_add_linear(np,
+ IPI_CUSTOM_LAST + 1,
+ &custom_ipi_domain_ops,
+ NULL);
+ WARN_ON(!ipi_custom_irq_domain);
+ return 0;
+ }
+
+ return 0;
+}
+core_initcall(smp_custom_ipi_init);
+
void smp_send_reschedule(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
diff --git a/build.config b/build.config
new file mode 100644
index 000000000000..b0ae146fcf96
--- /dev/null
+++ b/build.config
@@ -0,0 +1,14 @@
+ARCH=arm64
+BRANCH=mirror-aosp-android-hikey-linaro-4.9
+CROSS_COMPILE=aarch64-linux-android-
+DEFCONFIG=hikey_defconfig
+EXTRA_CMDS=''
+KERNEL_DIR=hikey-linaro
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
+FILES="
+arch/arm64/boot/Image
+arch/arm64/boot/Image-dtb
+arch/arm64/boot/dts/hisilicon/hi6220-hikey.dtb
+vmlinux
+System.map
+"
diff --git a/build.config.hikey960 b/build.config.hikey960
new file mode 100644
index 000000000000..48c3ce3a8db9
--- /dev/null
+++ b/build.config.hikey960
@@ -0,0 +1,14 @@
+ARCH=arm64
+BRANCH=mirror-aosp-android-hikey-linaro-4.9
+CROSS_COMPILE=aarch64-linux-android-
+DEFCONFIG=hikey960_defconfig
+EXTRA_CMDS=''
+KERNEL_DIR=hikey-linaro
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
+FILES="
+arch/arm64/boot/Image
+arch/arm64/boot/Image.gz
+arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dtb
+vmlinux
+System.map
+"
diff --git a/build.config.net_test b/build.config.net_test
new file mode 100644
index 000000000000..e1808d938ee5
--- /dev/null
+++ b/build.config.net_test
@@ -0,0 +1,9 @@
+ARCH=um
+SUBARCH=x86_64
+CROSS_COMPILE=
+EXTRA_CMDS=''
+KERNEL_DIR=hikey-linaro
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=
+FILES="
+linux
+"
diff --git a/build_test.sh b/build_test.sh
new file mode 100755
index 000000000000..654e65c39e18
--- /dev/null
+++ b/build_test.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+# Usage:
+# build/build_test.sh
+
+export MAKE_ARGS=$@
+export ROOT_DIR=$(dirname $(readlink -f $0))
+export NET_TEST=${ROOT_DIR}/../kernel/tests/net/test
+export BUILD_CONFIG=build/build.config.net_test
+
+test=all_tests.sh
+set -e
+source ${ROOT_DIR}/envsetup.sh
+
+echo "========================================================"
+echo " Building kernel and running tests "
+
+cd ${KERNEL_DIR}
+$NET_TEST/run_net_test.sh --builder $test
+echo $?
+echo "======Finished running tests======"
diff --git a/drivers/Kconfig b/drivers/Kconfig
index de581c13ec9a..4931ada070a8 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -76,6 +76,8 @@ source "drivers/hwmon/Kconfig"
source "drivers/thermal/Kconfig"
+source "drivers/trusty/Kconfig"
+
source "drivers/watchdog/Kconfig"
source "drivers/ssb/Kconfig"
@@ -174,6 +176,8 @@ source "drivers/ipack/Kconfig"
source "drivers/reset/Kconfig"
+source "drivers/gpu/arm_gpu/Kconfig"
+
source "drivers/fmc/Kconfig"
source "drivers/phy/Kconfig"
@@ -200,6 +204,8 @@ source "drivers/hwtracing/stm/Kconfig"
source "drivers/hwtracing/intel_th/Kconfig"
+source "drivers/hisi/Kconfig"
+
source "drivers/fpga/Kconfig"
source "drivers/tee/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 9a90575c3f38..606569e0344f 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -113,6 +113,7 @@ obj-$(CONFIG_W1) += w1/
obj-y += power/
obj-$(CONFIG_HWMON) += hwmon/
obj-$(CONFIG_THERMAL) += thermal/
+obj-$(CONFIG_TRUSTY) += trusty/
obj-$(CONFIG_WATCHDOG) += watchdog/
obj-$(CONFIG_MD) += md/
obj-$(CONFIG_BT) += bluetooth/
@@ -175,3 +176,4 @@ obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_NVMEM) += nvmem/
obj-$(CONFIG_FPGA) += fpga/
obj-$(CONFIG_TEE) += tee/
+obj-$(CONFIG_HISILICON_PLATFORM)+= hisi/
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index b1fc29a697b7..8a84c2060311 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -29,6 +29,7 @@ btmrvl-y := btmrvl_main.o
btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o
hci_uart-y := hci_ldisc.o
+hci_uart-$(CONFIG_SERIAL_DEV_BUS) += hci_serdev.o
hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o
hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o
hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index b6bb58c41df5..e1d087e1fbea 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -30,6 +30,7 @@
#include <linux/ti_wilink_st.h>
#include <linux/module.h>
+#include <linux/of.h>
/* Bluetooth Driver Version */
#define VERSION "1.0"
@@ -275,6 +276,14 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+static const struct of_device_id btwilink_of_match[] = {
+{
+ .compatible = "btwilink",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, btwilink_of_match);
+
static int bt_ti_probe(struct platform_device *pdev)
{
static struct ti_st *hst;
@@ -338,6 +347,8 @@ static struct platform_driver btwilink_driver = {
.remove = bt_ti_remove,
.driver = {
.name = "btwilink",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(btwilink_of_match),
},
};
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 9497c469efd2..7b4278516a49 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -134,6 +134,7 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
return 0;
}
+EXPORT_SYMBOL_GPL(hci_uart_tx_wakeup);
static void hci_uart_write_work(struct work_struct *work)
{
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 02692fe30279..200288c87fc4 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -34,20 +34,24 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/fcntl.h>
+#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/poll.h>
#include <linux/slab.h>
-#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/signal.h>
#include <linux/ioctl.h>
+#include <linux/of.h>
+#include <linux/serdev.h>
#include <linux/skbuff.h>
+#include <linux/ti_wilink_st.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <linux/gpio/consumer.h>
#include "hci_uart.h"
@@ -76,6 +80,12 @@ struct hcill_cmd {
u8 cmd;
} __packed;
+struct ll_device {
+ struct hci_uart hu;
+ struct serdev_device *serdev;
+ struct gpio_desc *enable_gpio;
+};
+
struct ll_struct {
unsigned long rx_state;
unsigned long rx_count;
@@ -136,6 +146,9 @@ static int ll_open(struct hci_uart *hu)
hu->priv = ll;
+ if (hu->serdev)
+ serdev_device_open(hu->serdev);
+
return 0;
}
@@ -164,6 +177,13 @@ static int ll_close(struct hci_uart *hu)
kfree_skb(ll->rx_skb);
+ if (hu->serdev) {
+ struct ll_device *lldev = serdev_device_get_drvdata(hu->serdev);
+ gpiod_set_value_cansleep(lldev->enable_gpio, 0);
+
+ serdev_device_close(hu->serdev);
+ }
+
hu->priv = NULL;
kfree(ll);
@@ -505,9 +525,245 @@ static struct sk_buff *ll_dequeue(struct hci_uart *hu)
return skb_dequeue(&ll->txq);
}
+#if IS_ENABLED(CONFIG_SERIAL_DEV_BUS)
+static int read_local_version(struct hci_dev *hdev)
+{
+ int err = 0;
+ unsigned short version = 0;
+ struct sk_buff *skb;
+ struct hci_rp_read_local_version *ver;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Reading TI version information failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ if (skb->len != sizeof(*ver)) {
+ err = -EILSEQ;
+ goto out;
+ }
+
+ ver = (struct hci_rp_read_local_version *)skb->data;
+ if (le16_to_cpu(ver->manufacturer) != 13) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ version = le16_to_cpu(ver->lmp_subver);
+
+out:
+ if (err) bt_dev_err(hdev, "Failed to read TI version info: %d", err);
+ kfree_skb(skb);
+ return err ? err : version;
+}
+
+/**
+ * download_firmware -
+ * internal function which parses through the .bts firmware
+ * script file intreprets SEND, DELAY actions only as of now
+ */
+static int download_firmware(struct ll_device *lldev)
+{
+ unsigned short chip, min_ver, maj_ver;
+ int version, err, len;
+ unsigned char *ptr, *action_ptr;
+ unsigned char bts_scr_name[40]; /* 40 char long bts scr name? */
+ const struct firmware *fw;
+ struct sk_buff *skb;
+ struct hci_command *cmd;
+
+ version = read_local_version(lldev->hu.hdev);
+ if (version < 0)
+ return version;
+
+ chip = (version & 0x7C00) >> 10;
+ min_ver = (version & 0x007F);
+ maj_ver = (version & 0x0380) >> 7;
+ if (version & 0x8000)
+ maj_ver |= 0x0008;
+
+ snprintf(bts_scr_name, sizeof(bts_scr_name),
+ "ti-connectivity/TIInit_%d.%d.%d.bts",
+ chip, maj_ver, min_ver);
+
+ err = request_firmware(&fw, bts_scr_name, &lldev->serdev->dev);
+ if (err || !fw->data || !fw->size) {
+ bt_dev_err(lldev->hu.hdev, "request_firmware failed(errno %d) for %s",
+ err, bts_scr_name);
+ return -EINVAL;
+ }
+ ptr = (void *)fw->data;
+ len = fw->size;
+ /* bts_header to remove out magic number and
+ * version
+ */
+ ptr += sizeof(struct bts_header);
+ len -= sizeof(struct bts_header);
+
+ while (len > 0 && ptr) {
+ bt_dev_dbg(lldev->hu.hdev, " action size %d, type %d ",
+ ((struct bts_action *)ptr)->size,
+ ((struct bts_action *)ptr)->type);
+
+ action_ptr = &(((struct bts_action *)ptr)->data[0]);
+
+ switch (((struct bts_action *)ptr)->type) {
+ case ACTION_SEND_COMMAND: /* action send */
+ bt_dev_dbg(lldev->hu.hdev, "S");
+ cmd = (struct hci_command *)action_ptr;
+ if (cmd->opcode == 0xff36) {
+ /* ignore remote change
+ * baud rate HCI VS command */
+ bt_dev_warn(lldev->hu.hdev, "change remote baud rate command in firmware");
+ break;
+ }
+ if (cmd->prefix != 1)
+ bt_dev_dbg(lldev->hu.hdev, "command type %d\n", cmd->prefix);
+
+ skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen, &cmd->speed, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(lldev->hu.hdev, "send command failed\n");
+ err = PTR_ERR(skb);
+ goto out_rel_fw;
+ }
+ kfree_skb(skb);
+ break;
+ case ACTION_WAIT_EVENT: /* wait */
+ /* no need to wait as command was synchronous */
+ bt_dev_dbg(lldev->hu.hdev, "W");
+ break;
+ case ACTION_DELAY: /* sleep */
+ bt_dev_info(lldev->hu.hdev, "sleep command in scr");
+ mdelay(((struct bts_action_delay *)action_ptr)->msec);
+ break;
+ }
+ len -= (sizeof(struct bts_action) +
+ ((struct bts_action *)ptr)->size);
+ ptr += sizeof(struct bts_action) +
+ ((struct bts_action *)ptr)->size;
+ }
+
+out_rel_fw:
+ /* fw download complete */
+ release_firmware(fw);
+ return err;
+}
+
+static int ll_setup(struct hci_uart *hu)
+{
+ int err, retry = 3;
+ struct ll_device *lldev;
+ struct serdev_device *serdev = hu->serdev;
+ u32 speed;
+
+ if (!serdev)
+ return 0;
+
+ lldev = serdev_device_get_drvdata(serdev);
+
+ serdev_device_set_flow_control(serdev, true);
+
+ do {
+ /* Configure BT_EN to HIGH state */
+ gpiod_set_value_cansleep(lldev->enable_gpio, 0);
+ msleep(5);
+ gpiod_set_value_cansleep(lldev->enable_gpio, 1);
+ msleep(100);
+
+ err = download_firmware(lldev);
+ if (!err)
+ break;
+
+ /* Toggle BT_EN and retry */
+ bt_dev_err(hu->hdev, "download firmware failed, retrying...");
+ } while (retry--);
+
+ if (err)
+ return err;
+
+ /* Operational speed if any */
+ if (hu->oper_speed)
+ speed = hu->oper_speed;
+ else if (hu->proto->oper_speed)
+ speed = hu->proto->oper_speed;
+ else
+ speed = 0;
+
+ if (speed) {
+ struct sk_buff *skb = __hci_cmd_sync(hu->hdev, 0xff36, sizeof(speed), &speed, HCI_INIT_TIMEOUT);
+ if (!IS_ERR(skb)) {
+ kfree_skb(skb);
+ serdev_device_set_baudrate(serdev, speed);
+ }
+ }
+
+ return 0;
+}
+
+static const struct hci_uart_proto llp;
+
+static int hci_ti_probe(struct serdev_device *serdev)
+{
+ struct hci_uart *hu;
+ struct ll_device *lldev;
+ u32 max_speed = 3000000;
+
+ lldev = devm_kzalloc(&serdev->dev, sizeof(struct ll_device), GFP_KERNEL);
+ if (!lldev)
+ return -ENOMEM;
+ hu = &lldev->hu;
+
+ serdev_device_set_drvdata(serdev, lldev);
+ lldev->serdev = hu->serdev = serdev;
+
+ lldev->enable_gpio = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(lldev->enable_gpio))
+ return PTR_ERR(lldev->enable_gpio);
+
+ of_property_read_u32(serdev->dev.of_node, "max-speed", &max_speed);
+ hci_uart_set_speeds(hu, 115200, max_speed);
+
+ return hci_uart_register_device(hu, &llp);
+}
+
+static void hci_ti_remove(struct serdev_device *serdev)
+{
+ struct ll_device *lldev = serdev_device_get_drvdata(serdev);
+ struct hci_uart *hu = &lldev->hu;
+ struct hci_dev *hdev = hu->hdev;
+
+ cancel_work_sync(&hu->write_work);
+
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+ hu->proto->close(hu);
+}
+
+static const struct of_device_id hci_ti_of_match[] = {
+ { .compatible = "ti,wl1831-st" },
+ { .compatible = "ti,wl1835-st" },
+ { .compatible = "ti,wl1837-st" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hci_ti_of_match);
+
+static struct serdev_device_driver hci_ti_drv = {
+ .driver = {
+ .name = "hci-ti",
+ .of_match_table = of_match_ptr(hci_ti_of_match),
+ },
+ .probe = hci_ti_probe,
+ .remove = hci_ti_remove,
+};
+#else
+#define ll_setup NULL
+#endif
+
static const struct hci_uart_proto llp = {
.id = HCI_UART_LL,
.name = "LL",
+ .setup = ll_setup,
.open = ll_open,
.close = ll_close,
.recv = ll_recv,
@@ -518,10 +774,14 @@ static const struct hci_uart_proto llp = {
int __init ll_init(void)
{
+ serdev_device_driver_register(&hci_ti_drv);
+
return hci_uart_register_proto(&llp);
}
int __exit ll_deinit(void)
{
+ serdev_device_driver_unregister(&hci_ti_drv);
+
return hci_uart_unregister_proto(&llp);
}
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
new file mode 100644
index 000000000000..7de0edc0ff8c
--- /dev/null
+++ b/drivers/bluetooth/hci_serdev.c
@@ -0,0 +1,356 @@
+/*
+ * Bluetooth HCI serdev driver lib
+ *
+ * Copyright (C) 2017 Linaro, Ltd., Rob Herring <robh@kernel.org>
+ *
+ * Based on hci_ldisc.c:
+ *
+ * Copyright (C) 2000-2001 Qualcomm Incorporated
+ * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com>
+ * Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/serdev.h>
+#include <linux/skbuff.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+
+struct serdev_device_ops hci_serdev_client_ops;
+
+static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type)
+{
+ struct hci_dev *hdev = hu->hdev;
+
+ /* Update HCI stat counters */
+ switch (pkt_type) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ break;
+ }
+}
+
+static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
+{
+ struct sk_buff *skb = hu->tx_skb;
+
+ if (!skb)
+ skb = hu->proto->dequeue(hu);
+ else
+ hu->tx_skb = NULL;
+
+ return skb;
+}
+
+static void hci_uart_write_work(struct work_struct *work)
+{
+ struct hci_uart *hu = container_of(work, struct hci_uart, write_work);
+ struct serdev_device *serdev = hu->serdev;
+ struct hci_dev *hdev = hu->hdev;
+ struct sk_buff *skb;
+
+ /* REVISIT:
+ * should we cope with bad skbs or ->write() returning an error value?
+ */
+ do {
+ clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+
+ while ((skb = hci_uart_dequeue(hu))) {
+ int len;
+
+ len = serdev_device_write_buf(serdev,
+ skb->data, skb->len);
+ hdev->stat.byte_tx += len;
+
+ skb_pull(skb, len);
+ if (skb->len) {
+ hu->tx_skb = skb;
+ break;
+ }
+
+ hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
+ kfree_skb(skb);
+ }
+ } while(test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
+
+ clear_bit(HCI_UART_SENDING, &hu->tx_state);
+}
+
+/* ------- Interface to HCI layer ------ */
+
+/* Initialize device */
+static int hci_uart_open(struct hci_dev *hdev)
+{
+ BT_DBG("%s %p", hdev->name, hdev);
+
+ return 0;
+}
+
+/* Reset device */
+static int hci_uart_flush(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
+ BT_DBG("hdev %p serdev %p", hdev, hu->serdev);
+
+ if (hu->tx_skb) {
+ kfree_skb(hu->tx_skb); hu->tx_skb = NULL;
+ }
+
+ /* Flush any pending characters in the driver and discipline. */
+ serdev_device_write_flush(hu->serdev);
+
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ hu->proto->flush(hu);
+
+ return 0;
+}
+
+/* Close device */
+static int hci_uart_close(struct hci_dev *hdev)
+{
+ BT_DBG("hdev %p", hdev);
+
+ hci_uart_flush(hdev);
+ hdev->flush = NULL;
+
+ return 0;
+}
+
+/* Send frames from HCI layer */
+static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
+ BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb),
+ skb->len);
+
+ hu->proto->enqueue(hu, skb);
+
+ hci_uart_tx_wakeup(hu);
+
+ return 0;
+}
+
+static int hci_uart_setup(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct hci_rp_read_local_version *ver;
+ struct sk_buff *skb;
+ unsigned int speed;
+ int err;
+
+ /* Init speed if any */
+ if (hu->init_speed)
+ speed = hu->init_speed;
+ else if (hu->proto->init_speed)
+ speed = hu->proto->init_speed;
+ else
+ speed = 0;
+
+ if (speed)
+ serdev_device_set_baudrate(hu->serdev, speed);
+
+ /* Operational speed if any */
+ if (hu->oper_speed)
+ speed = hu->oper_speed;
+ else if (hu->proto->oper_speed)
+ speed = hu->proto->oper_speed;
+ else
+ speed = 0;
+
+ if (hu->proto->set_baudrate && speed) {
+ err = hu->proto->set_baudrate(hu, speed);
+ if (err)
+ BT_ERR("%s: failed to set baudrate", hdev->name);
+ else
+ serdev_device_set_baudrate(hu->serdev, speed);
+ }
+
+ if (hu->proto->setup)
+ return hu->proto->setup(hu);
+
+ if (!test_bit(HCI_UART_VND_DETECT, &hu->hdev_flags))
+ return 0;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Reading local version information failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return 0;
+ }
+
+ if (skb->len != sizeof(*ver)) {
+ BT_ERR("%s: Event length mismatch for version information",
+ hdev->name);
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+
+/** hci_uart_write_wakeup - transmit buffer wakeup
+ * @serdev: serial device
+ *
+ * This function is called by the serdev framework when it accepts
+ * more data being sent.
+ */
+static void hci_uart_write_wakeup(struct serdev_device *serdev)
+{
+ struct hci_uart *hu = serdev_device_get_drvdata(serdev);
+
+ BT_DBG("");
+
+ if (!hu || serdev != hu->serdev) {
+ WARN_ON(1);
+ return;
+ }
+
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ hci_uart_tx_wakeup(hu);
+}
+
+/** hci_uart_receive_buf - receive buffer wakeup
+ * @serdev: serial device
+ * @data: pointer to received data
+ * @count: count of received data in bytes
+ *
+ * This function is called by the serdev framework when it received data
+ * in the RX buffer.
+ *
+ * Return: number of processed bytes
+ */
+static int hci_uart_receive_buf(struct serdev_device *serdev, const u8 *data,
+ size_t count)
+{
+ struct hci_uart *hu = serdev_device_get_drvdata(serdev);
+
+ if (!hu || serdev != hu->serdev) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ return 0;
+
+ /* It does not need a lock here as it is already protected by a mutex in
+ * tty caller
+ */
+ hu->proto->recv(hu, data, count);
+
+ if (hu->hdev)
+ hu->hdev->stat.byte_rx += count;
+
+ return count;
+}
+
+struct serdev_device_ops hci_serdev_client_ops = {
+ .receive_buf = hci_uart_receive_buf,
+ .write_wakeup = hci_uart_write_wakeup,
+};
+
+int hci_uart_register_device(struct hci_uart *hu,
+ const struct hci_uart_proto *p)
+{
+ int err;
+ struct hci_dev *hdev;
+
+ BT_DBG("");
+
+ serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops);
+
+ err = p->open(hu);
+ if (err)
+ return err;
+
+ hu->proto = p;
+ set_bit(HCI_UART_PROTO_READY, &hu->flags);
+
+ /* Initialize and register HCI device */
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ BT_ERR("Can't allocate HCI device");
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ hu->hdev = hdev;
+
+ hdev->bus = HCI_UART;
+ hci_set_drvdata(hdev, hu);
+
+ INIT_WORK(&hu->write_work, hci_uart_write_work);
+
+ /* Only when vendor specific setup callback is provided, consider
+ * the manufacturer information valid. This avoids filling in the
+ * value for Ericsson when nothing is specified.
+ */
+ if (hu->proto->setup)
+ hdev->manufacturer = hu->proto->manufacturer;
+
+ hdev->open = hci_uart_open;
+ hdev->close = hci_uart_close;
+ hdev->flush = hci_uart_flush;
+ hdev->send = hci_uart_send_frame;
+ hdev->setup = hci_uart_setup;
+ SET_HCIDEV_DEV(hdev, &hu->serdev->dev);
+
+ if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
+ set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+
+ if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
+ set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+
+ if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
+ if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
+ hdev->dev_type = HCI_AMP;
+ else
+ hdev->dev_type = HCI_PRIMARY;
+
+ if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
+ return 0;
+
+ if (hci_register_dev(hdev) < 0) {
+ BT_ERR("Can't register HCI device");
+ err = -ENODEV;
+ goto err_register;
+ }
+
+ set_bit(HCI_UART_REGISTERED, &hu->flags);
+
+ return 0;
+
+err_register:
+ hci_free_dev(hdev);
+err_alloc:
+ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+ p->close(hu);
+ return err;
+}
+EXPORT_SYMBOL_GPL(hci_uart_register_device);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 070139513e65..ced14f8ca89f 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -58,6 +58,7 @@
#define HCI_UART_VND_DETECT 5
struct hci_uart;
+struct serdev_device;
struct hci_uart_proto {
unsigned int id;
@@ -77,6 +78,7 @@ struct hci_uart_proto {
struct hci_uart {
struct tty_struct *tty;
+ struct serdev_device *serdev;
struct hci_dev *hdev;
unsigned long flags;
unsigned long hdev_flags;
@@ -105,6 +107,8 @@ struct hci_uart {
int hci_uart_register_proto(const struct hci_uart_proto *p);
int hci_uart_unregister_proto(const struct hci_uart_proto *p);
+int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p);
+
int hci_uart_tx_wakeup(struct hci_uart *hu);
int hci_uart_init_ready(struct hci_uart *hu);
void hci_uart_init_tty(struct hci_uart *hu);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 8453a49471d7..843ecfe92e69 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -47,6 +47,7 @@ config SGI_MBCS
say Y or M here, otherwise say N.
source "drivers/tty/serial/Kconfig"
+source "drivers/tty/serdev/Kconfig"
config TTY_PRINTK
tristate "TTY driver to output user messages via printk"
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig
index 3f537a04c6a6..afc21c0a7877 100644
--- a/drivers/clk/hisilicon/Kconfig
+++ b/drivers/clk/hisilicon/Kconfig
@@ -6,6 +6,13 @@ config COMMON_CLK_HI3519
help
Build the clock driver for hi3519.
+config COMMON_CLK_HI3660
+ bool "Hi3660 Clock Driver"
+ depends on ARCH_HISI || COMPILE_TEST
+ default ARCH_HISI
+ help
+ Build the clock driver for hi3660.
+
config COMMON_CLK_HI6220
bool "Hi6220 Clock Driver"
depends on ARCH_HISI || COMPILE_TEST
@@ -25,3 +32,9 @@ config STUB_CLK_HI6220
depends on COMMON_CLK_HI6220 && MAILBOX
help
Build the Hisilicon Hi6220 stub clock driver.
+
+config STUB_CLK_HI3660
+ bool "Hi3660 Stub Clock Driver"
+ depends on COMMON_CLK_HI3660 && MAILBOX
+ help
+ Build the Hisilicon Hi3660 stub clock driver.
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index e169ec7da023..5cd15f257f5f 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -8,6 +8,8 @@ obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o
obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o
obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o
obj-$(CONFIG_COMMON_CLK_HI3519) += clk-hi3519.o
+obj-$(CONFIG_COMMON_CLK_HI3660) += clk-hi3660.o
obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o
obj-$(CONFIG_RESET_HISI) += reset.o
obj-$(CONFIG_STUB_CLK_HI6220) += clk-hi6220-stub.o
+obj-$(CONFIG_STUB_CLK_HI3660) += clk-hi3660-stub.o
diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c
new file mode 100644
index 000000000000..bb475f8332ef
--- /dev/null
+++ b/drivers/clk/hisilicon/clk-hi3660-stub.c
@@ -0,0 +1,257 @@
+/*
+ * Hisilicon clock driver
+ *
+ * Copyright (c) 2013-2015 Hisilicon Limited.
+ * Copyright (c) 2017 Linaro Limited.
+ * Copyright (c) 2017 Hisilicon Limited.
+ *
+ * Author: Kai Zhao <zhaokai1@hisilicon.com>
+ * Author: Leo Yan <leo.yan@linaro.org>
+ * Author: Tao Wang <kevin.wangtao@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/mailbox_client.h>
+#include <dt-bindings/clock/hi3660-clock.h>
+
+#define FREQ_DATA_OFFSET 0x70
+#define MHZ 1000000
+
+struct hi3660_stub_clk_chan {
+ struct mbox_client cl;
+ struct mbox_chan *mbox;
+};
+
+struct hi3660_stub_clk {
+ unsigned int id;
+ struct device *dev;
+ struct clk_hw hw;
+ const char *clk_name;
+ unsigned int set_rate_cmd;
+ unsigned int msg[8];
+ unsigned int rate;
+};
+
+static void __iomem *freq_reg;
+static struct hi3660_stub_clk_chan *chan;
+
+static struct hi3660_stub_clk hisi_stub_clk[HI3660_CLK_STUB_NUM] = {
+ [HI3660_CLK_STUB_CLUSTER0] = {
+ .id = HI3660_CLK_STUB_CLUSTER0,
+ .clk_name = "cpu-cluster.0",
+ .set_rate_cmd = 0x0001030A,
+ },
+ [HI3660_CLK_STUB_CLUSTER1] = {
+ .id = HI3660_CLK_STUB_CLUSTER1,
+ .clk_name = "cpu-cluster.1",
+ .set_rate_cmd = 0x0002030A,
+ },
+ [HI3660_CLK_STUB_GPU] = {
+ .id = HI3660_CLK_STUB_GPU,
+ .clk_name = "clk-g3d",
+ .set_rate_cmd = 0x0003030A,
+ },
+ [HI3660_CLK_STUB_DDR] = {
+ .id = HI3660_CLK_STUB_DDR,
+ .clk_name = "clk-ddrc",
+ .set_rate_cmd = 0x0004030A,
+ },
+ [HI3660_CLK_STUB_DDR_VOTE] = {
+ .id = HI3660_CLK_STUB_DDR_VOTE,
+ .clk_name = "clk-ddrc-vote",
+ .set_rate_cmd = 0x00040309,
+ },
+ [HI3660_CLK_STUB_DDR_LIMIT] = {
+ .id = HI3660_CLK_STUB_DDR_LIMIT,
+ .clk_name = "clk-ddrc-limit",
+ .set_rate_cmd = 0x00040308,
+ },
+};
+
+static unsigned long hi3660_stub_clk_recalc_rate(
+ struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct hi3660_stub_clk *stub_clk =
+ container_of(hw, struct hi3660_stub_clk, hw);
+
+ if (stub_clk->id < HI3660_CLK_STUB_DDR_VOTE)
+ stub_clk->rate = readl(freq_reg + (stub_clk->id << 2)) * MHZ;
+
+ pr_debug("get rate%d;%u\n", stub_clk->id, stub_clk->rate);
+
+ return stub_clk->rate;
+}
+
+static long hi3660_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return rate;
+}
+
+int hi3660_stub_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ pr_debug("%s: enter %ld\n", __func__, req->rate);
+ return 0;
+}
+
+static int hi3660_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hi3660_stub_clk *stub_clk =
+ container_of(hw, struct hi3660_stub_clk, hw);
+
+ if (stub_clk->id < HI3660_CLK_STUB_NUM) {
+ stub_clk->msg[0] = stub_clk->set_rate_cmd;
+ stub_clk->msg[1] = rate / MHZ;
+
+ pr_debug("%s: set_rate_cmd[0] %x [1] %x\n", __func__,
+ stub_clk->msg[0], stub_clk->msg[1]);
+
+ mbox_send_message(chan->mbox, stub_clk->msg);
+ }
+
+ stub_clk->rate = rate;
+ return 0;
+}
+
+static struct clk_ops hi3660_stub_clk_ops = {
+ .recalc_rate = hi3660_stub_clk_recalc_rate,
+ .determine_rate = hi3660_stub_clk_determine_rate,
+ .round_rate = hi3660_stub_clk_round_rate,
+ .set_rate = hi3660_stub_clk_set_rate,
+};
+
+static struct clk *hi3660_register_stub_clk(struct device *dev,
+ struct hi3660_stub_clk *stub_clk)
+{
+ struct clk_init_data init = {};
+ struct clk *clk;
+
+ stub_clk->hw.init = &init;
+ stub_clk->dev = dev;
+
+ init.name = stub_clk->clk_name;
+ init.ops = &hi3660_stub_clk_ops;
+ init.num_parents = 0;
+ init.flags = CLK_GET_RATE_NOCACHE;
+
+ clk = devm_clk_register(dev, &stub_clk->hw);
+ if (!IS_ERR(clk))
+ dev_dbg(dev, "Registered clock '%s'\n", init.name);
+
+ return clk;
+}
+
+static int hi3660_stub_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct clk_onecell_data *data;
+ struct resource *res;
+ struct clk **clk_table;
+ struct clk *clk;
+ unsigned int idx;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(dev, "could not allocate clock data\n");
+ return -ENOMEM;
+ }
+
+ clk_table = devm_kzalloc(dev,
+ sizeof(*clk_table) * HI3660_CLK_STUB_NUM, GFP_KERNEL);
+ if (!clk_table) {
+ dev_err(dev, "could not allocate clock lookup table\n");
+ return -ENOMEM;
+ }
+ data->clks = clk_table;
+ data->clk_num = HI3660_CLK_STUB_NUM;
+
+ chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan) {
+ dev_err(dev, "failed to allocate memory for mbox\n");
+ return -ENOMEM;
+ }
+
+ /* Use mailbox client with blocking mode */
+ chan->cl.dev = dev;
+ chan->cl.tx_done = NULL;
+ chan->cl.tx_block = false;
+ chan->cl.tx_tout = 500;
+ chan->cl.knows_txdone = false;
+
+ /* Allocate mailbox channel */
+ chan->mbox = mbox_request_channel(&chan->cl, 0);
+ if (IS_ERR(chan->mbox)) {
+ dev_err(dev, "failed get mailbox channel\n");
+ return PTR_ERR(chan->mbox);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ freq_reg = devm_ioremap(dev, res->start, resource_size(res));
+ if (IS_ERR(freq_reg)) {
+ dev_err(dev, "failed get shared memory\n");
+ return -ENOMEM;
+ }
+ freq_reg += FREQ_DATA_OFFSET;
+
+ for (idx = 0; idx < HI3660_CLK_STUB_NUM; idx++) {
+ clk = hi3660_register_stub_clk(dev, &hisi_stub_clk[idx]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ data->clks[idx] = clk;
+ }
+ of_clk_add_provider(np, of_clk_src_onecell_get, data);
+
+ return 0;
+}
+
+static const struct of_device_id hi3660_stub_clk_of_match[] = {
+ { .compatible = "hisilicon,hi3660-stub-clk", },
+ {}
+};
+
+static struct platform_driver hi3660_stub_clk_driver = {
+ .driver = {
+ .name = "hi3660-stub-clk",
+ .of_match_table = hi3660_stub_clk_of_match,
+ },
+ .probe = hi3660_stub_clk_probe,
+};
+
+static int __init hi3660_stub_clk_init(void)
+{
+ return platform_driver_register(&hi3660_stub_clk_driver);
+}
+subsys_initcall(hi3660_stub_clk_init);
diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
new file mode 100644
index 000000000000..039f57cbc34d
--- /dev/null
+++ b/drivers/clk/hisilicon/clk-hi3660.c
@@ -0,0 +1,637 @@
+/*
+ * Copyright (c) 2016-2017 Linaro Ltd.
+ * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <dt-bindings/clock/hi3660-clock.h>
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk.h"
+
+static const struct hisi_fixed_rate_clock hi3660_fixed_rate_clks[] = {
+ { HI3660_CLKIN_SYS, "clkin_sys", NULL, 0, 19200000, },
+ { HI3660_CLKIN_REF, "clkin_ref", NULL, 0, 32764, },
+ { HI3660_CLK_FLL_SRC, "clk_fll_src", NULL, 0, 128000000, },
+ { HI3660_CLK_PPLL0, "clk_ppll0", NULL, 0, 1600000000, },
+ { HI3660_CLK_PPLL1, "clk_ppll1", NULL, 0, 1866000000, },
+ { HI3660_CLK_PPLL2, "clk_ppll2", NULL, 0, 2880000000, },
+ { HI3660_CLK_PPLL3, "clk_ppll3", NULL, 0, 1290000000, },
+ { HI3660_CLK_SCPLL, "clk_scpll", NULL, 0, 245760000, },
+ { HI3660_PCLK, "pclk", NULL, 0, 20000000, },
+ { HI3660_CLK_UART0_DBG, "clk_uart0_dbg", NULL, 0, 19200000, },
+ { HI3660_CLK_UART6, "clk_uart6", NULL, 0, 19200000, },
+ { HI3660_OSC32K, "osc32k", NULL, 0, 32764, },
+ { HI3660_OSC19M, "osc19m", NULL, 0, 19200000, },
+ { HI3660_CLK_480M, "clk_480m", NULL, 0, 480000000, },
+ { HI3660_CLK_INV, "clk_inv", NULL, 0, 10000000, },
+};
+
+/* crgctrl */
+static const struct hisi_fixed_factor_clock hi3660_crg_fixed_factor_clks[] = {
+ { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 16, 0, },
+ { HI3660_CLK_FACTOR_MMC, "clk_factor_mmc", "clkin_sys", 1, 6, 0, },
+ { HI3660_CLK_GATE_I2C0, "clk_gate_i2c0", "clk_i2c0_iomcu", 1, 4, 0, },
+ { HI3660_CLK_GATE_I2C1, "clk_gate_i2c1", "clk_i2c1_iomcu", 1, 4, 0, },
+ { HI3660_CLK_GATE_I2C2, "clk_gate_i2c2", "clk_i2c2_iomcu", 1, 4, 0, },
+ { HI3660_CLK_GATE_I2C6, "clk_gate_i2c6", "clk_i2c6_iomcu", 1, 4, 0, },
+ { HI3660_CLK_DIV_SYSBUS, "clk_div_sysbus", "clk_mux_sysbus", 1, 7, 0, },
+ { HI3660_CLK_DIV_320M, "clk_div_320m", "clk_320m_pll_gt", 1, 5, 0, },
+ { HI3660_CLK_DIV_A53, "clk_div_a53hpm", "clk_a53hpm_andgt", 1, 6, 0, },
+ { HI3660_CLK_GATE_SPI0, "clk_gate_spi0", "clk_ppll0", 1, 8, 0, },
+ { HI3660_CLK_GATE_SPI2, "clk_gate_spi2", "clk_ppll0", 1, 8, 0, },
+ { HI3660_PCIEPHY_REF, "clk_pciephy_ref", "clk_div_pciephy", 1, 1, 0, },
+ { HI3660_CLK_ABB_USB, "clk_abb_usb", "clk_gate_usb_tcxo_en", 1, 1, 0 },
+ { HI3660_VENC_VOLT_HOLD, "venc_volt_hold", "peri_volt_hold", 1, 1, 0, },
+ { HI3660_CLK_FAC_ISP_SNCLK, "clk_isp_snclk_fac", "clk_isp_snclk_angt",
+ 1, 10, 0, },
+};
+
+static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = {
+ { HI3660_PERI_VOLT_HOLD, "peri_volt_hold", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x0, 0, 0, },
+ { HI3660_HCLK_GATE_SDIO0, "hclk_gate_sdio0", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0x0, 21, 0, },
+ { HI3660_HCLK_GATE_SD, "hclk_gate_sd", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0x0, 30, 0, },
+ { HI3660_CLK_GATE_AOMM, "clk_gate_aomm", "clk_div_aomm",
+ CLK_SET_RATE_PARENT, 0x0, 31, 0, },
+ { HI3660_PCLK_GPIO0, "pclk_gpio0", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 0, 0, },
+ { HI3660_PCLK_GPIO1, "pclk_gpio1", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 1, 0, },
+ { HI3660_PCLK_GPIO2, "pclk_gpio2", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 2, 0, },
+ { HI3660_PCLK_GPIO3, "pclk_gpio3", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 3, 0, },
+ { HI3660_PCLK_GPIO4, "pclk_gpio4", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 4, 0, },
+ { HI3660_PCLK_GPIO5, "pclk_gpio5", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 5, 0, },
+ { HI3660_PCLK_GPIO6, "pclk_gpio6", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 6, 0, },
+ { HI3660_PCLK_GPIO7, "pclk_gpio7", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 7, 0, },
+ { HI3660_PCLK_GPIO8, "pclk_gpio8", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 8, 0, },
+ { HI3660_PCLK_GPIO9, "pclk_gpio9", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 9, 0, },
+ { HI3660_PCLK_GPIO10, "pclk_gpio10", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 10, 0, },
+ { HI3660_PCLK_GPIO11, "pclk_gpio11", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 11, 0, },
+ { HI3660_PCLK_GPIO12, "pclk_gpio12", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 12, 0, },
+ { HI3660_PCLK_GPIO13, "pclk_gpio13", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 13, 0, },
+ { HI3660_PCLK_GPIO14, "pclk_gpio14", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 14, 0, },
+ { HI3660_PCLK_GPIO15, "pclk_gpio15", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 15, 0, },
+ { HI3660_PCLK_GPIO16, "pclk_gpio16", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 16, 0, },
+ { HI3660_PCLK_GPIO17, "pclk_gpio17", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 17, 0, },
+ { HI3660_PCLK_GPIO18, "pclk_gpio18", "clk_div_ioperi",
+ CLK_SET_RATE_PARENT, 0x10, 18, 0, },
+ { HI3660_PCLK_GPIO19, "pclk_gpio19", "clk_div_ioperi",
+ CLK_SET_RATE_PARENT, 0x10, 19, 0, },
+ { HI3660_PCLK_GPIO20, "pclk_gpio20", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 20, 0, },
+ { HI3660_PCLK_GPIO21, "pclk_gpio21", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 21, 0, },
+ { HI3660_CLK_GATE_SPI3, "clk_gate_spi3", "clk_div_ioperi",
+ CLK_SET_RATE_PARENT, 0x10, 30, 0, },
+ { HI3660_CLK_GATE_I2C7, "clk_gate_i2c7", "clk_mux_i2c",
+ CLK_SET_RATE_PARENT, 0x10, 31, 0, },
+ { HI3660_CLK_GATE_I2C3, "clk_gate_i2c3", "clk_mux_i2c",
+ CLK_SET_RATE_PARENT, 0x20, 7, 0, },
+ { HI3660_CLK_GATE_SPI1, "clk_gate_spi1", "clk_mux_spi",
+ CLK_SET_RATE_PARENT, 0x20, 9, 0, },
+ { HI3660_CLK_GATE_UART1, "clk_gate_uart1", "clk_mux_uarth",
+ CLK_SET_RATE_PARENT, 0x20, 11, 0, },
+ { HI3660_CLK_GATE_UART2, "clk_gate_uart2", "clk_mux_uart1",
+ CLK_SET_RATE_PARENT, 0x20, 12, 0, },
+ { HI3660_CLK_GATE_UART4, "clk_gate_uart4", "clk_mux_uarth",
+ CLK_SET_RATE_PARENT, 0x20, 14, 0, },
+ { HI3660_CLK_GATE_UART5, "clk_gate_uart5", "clk_mux_uart1",
+ CLK_SET_RATE_PARENT, 0x20, 15, 0, },
+ { HI3660_CLK_GATE_I2C4, "clk_gate_i2c4", "clk_mux_i2c",
+ CLK_SET_RATE_PARENT, 0x20, 27, 0, },
+ { HI3660_CLK_GATE_DMAC, "clk_gate_dmac", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0x30, 1, 0, },
+ { HI3660_CLK_GATE_VENC, "clk_gate_venc", "clk_div_venc",
+ CLK_SET_RATE_PARENT, 0x30, 10, 0, },
+ { HI3660_CLK_GATE_VDEC, "clk_gate_vdec", "clk_div_vdec",
+ CLK_SET_RATE_PARENT, 0x30, 11, 0, },
+ { HI3660_PCLK_GATE_DSS, "pclk_gate_dss", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x30, 12, 0, },
+ { HI3660_ACLK_GATE_DSS, "aclk_gate_dss", "clk_gate_vivobus",
+ CLK_SET_RATE_PARENT, 0x30, 13, 0, },
+ { HI3660_CLK_GATE_LDI1, "clk_gate_ldi1", "clk_div_ldi1",
+ CLK_SET_RATE_PARENT, 0x30, 14, 0, },
+ { HI3660_CLK_GATE_LDI0, "clk_gate_ldi0", "clk_div_ldi0",
+ CLK_SET_RATE_PARENT, 0x30, 15, 0, },
+ { HI3660_CLK_GATE_VIVOBUS, "clk_gate_vivobus", "clk_div_vivobus",
+ CLK_SET_RATE_PARENT, 0x30, 16, 0, },
+ { HI3660_CLK_GATE_EDC0, "clk_gate_edc0", "clk_div_edc0",
+ CLK_SET_RATE_PARENT, 0x30, 17, 0, },
+ { HI3660_CLK_GATE_TXDPHY0_CFG, "clk_gate_txdphy0_cfg", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x30, 28, 0, },
+ { HI3660_CLK_GATE_TXDPHY0_REF, "clk_gate_txdphy0_ref", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x30, 29, 0, },
+ { HI3660_CLK_GATE_TXDPHY1_CFG, "clk_gate_txdphy1_cfg", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x30, 30, 0, },
+ { HI3660_CLK_GATE_TXDPHY1_REF, "clk_gate_txdphy1_ref", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x30, 31, 0, },
+ { HI3660_ACLK_GATE_USB3OTG, "aclk_gate_usb3otg", "clk_div_mmc0bus",
+ CLK_SET_RATE_PARENT, 0x40, 1, 0, },
+ { HI3660_CLK_GATE_SPI4, "clk_gate_spi4", "clk_mux_spi",
+ CLK_SET_RATE_PARENT, 0x40, 4, 0, },
+ { HI3660_CLK_GATE_SD, "clk_gate_sd", "clk_mux_sd_sys",
+ CLK_SET_RATE_PARENT, 0x40, 17, 0, },
+ { HI3660_CLK_GATE_SDIO0, "clk_gate_sdio0", "clk_mux_sdio_sys",
+ CLK_SET_RATE_PARENT, 0x40, 19, 0, },
+ { HI3660_CLK_GATE_ISP_SNCLK0, "clk_gate_isp_snclk0",
+ "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 16, 0, },
+ { HI3660_CLK_GATE_ISP_SNCLK1, "clk_gate_isp_snclk1",
+ "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 17, 0, },
+ { HI3660_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2",
+ "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 18, 0, },
+ { HI3660_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0x50, 21, 0, },
+ { HI3660_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x50, 28, 0, },
+ { HI3660_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x50, 29, 0, },
+ { HI3660_ACLK_GATE_PCIE, "aclk_gate_pcie", "clk_div_mmc1bus",
+ CLK_SET_RATE_PARENT, 0x420, 5, 0, },
+ { HI3660_PCLK_GATE_PCIE_SYS, "pclk_gate_pcie_sys", "clk_div_mmc1bus",
+ CLK_SET_RATE_PARENT, 0x420, 7, 0, },
+ { HI3660_CLK_GATE_PCIEAUX, "clk_gate_pcieaux", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x420, 8, 0, },
+ { HI3660_PCLK_GATE_PCIE_PHY, "pclk_gate_pcie_phy", "clk_div_mmc1bus",
+ CLK_SET_RATE_PARENT, 0x420, 9, 0, },
+};
+
+static const struct hisi_gate_clock hi3660_crgctrl_gate_clks[] = {
+ { HI3660_CLK_ANDGT_LDI0, "clk_andgt_ldi0", "clk_mux_ldi0",
+ CLK_SET_RATE_PARENT, 0xf0, 6, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_ANDGT_LDI1, "clk_andgt_ldi1", "clk_mux_ldi1",
+ CLK_SET_RATE_PARENT, 0xf0, 7, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_ANDGT_EDC0, "clk_andgt_edc0", "clk_mux_edc0",
+ CLK_SET_RATE_PARENT, 0xf0, 8, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_ANDGT_VDEC, "clk_andgt_vdec", "clk_mux_vdec",
+ CLK_SET_RATE_PARENT, 0xf0, 15, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_ANDGT_VENC, "clk_andgt_venc", "clk_mux_venc",
+ CLK_SET_RATE_PARENT, 0xf4, 0, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_GATE_UFSPHY_GT, "clk_gate_ufsphy_gt", "clk_div_ufsperi",
+ CLK_SET_RATE_PARENT, 0xf4, 1, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_ANDGT_MMC, "clk_andgt_mmc", "clk_mux_mmc_pll",
+ CLK_SET_RATE_PARENT, 0xf4, 2, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_ANDGT_SD, "clk_andgt_sd", "clk_mux_sd_pll",
+ CLK_SET_RATE_PARENT, 0xf4, 3, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_A53HPM_ANDGT, "clk_a53hpm_andgt", "clk_mux_a53hpm",
+ CLK_SET_RATE_PARENT, 0xf4, 7, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_ANDGT_SDIO, "clk_andgt_sdio", "clk_mux_sdio_pll",
+ CLK_SET_RATE_PARENT, 0xf4, 8, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_ANDGT_UART0, "clk_andgt_uart0", "clk_div_320m",
+ CLK_SET_RATE_PARENT, 0xf4, 9, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_ANDGT_UART1, "clk_andgt_uart1", "clk_div_320m",
+ CLK_SET_RATE_PARENT, 0xf4, 10, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_ANDGT_UARTH, "clk_andgt_uarth", "clk_div_320m",
+ CLK_SET_RATE_PARENT, 0xf4, 11, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_ANDGT_SPI, "clk_andgt_spi", "clk_div_320m",
+ CLK_SET_RATE_PARENT, 0xf4, 13, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_VIVOBUS_ANDGT, "clk_vivobus_andgt", "clk_mux_vivobus",
+ CLK_SET_RATE_PARENT, 0xf8, 1, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_AOMM_ANDGT, "clk_aomm_andgt", "clk_ppll2",
+ CLK_SET_RATE_PARENT, 0xf8, 3, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_320M_PLL_GT, "clk_320m_pll_gt", "clk_mux_320m",
+ CLK_SET_RATE_PARENT, 0xf8, 10, 0, },
+ { HI3660_CLK_ANGT_ISP_SNCLK, "clk_isp_snclk_angt", "clk_div_a53hpm",
+ CLK_SET_RATE_PARENT, 0x108, 2, CLK_GATE_HIWORD_MASK, },
+ { HI3660_AUTODIV_EMMC0BUS, "autodiv_emmc0bus", "autodiv_sysbus",
+ CLK_SET_RATE_PARENT, 0x404, 1, CLK_GATE_HIWORD_MASK, },
+ { HI3660_AUTODIV_SYSBUS, "autodiv_sysbus", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0x404, 5, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_GATE_UFSPHY_CFG, "clk_gate_ufsphy_cfg",
+ "clk_div_ufsphy_cfg", CLK_SET_RATE_PARENT, 0x420, 12, 0, },
+ { HI3660_CLK_GATE_UFSIO_REF, "clk_gate_ufsio_ref",
+ "clk_gate_ufs_tcxo_en", CLK_SET_RATE_PARENT, 0x420, 14, 0, },
+};
+
+static const char *const
+clk_mux_sysbus_p[] = {"clk_ppll1", "clk_ppll0"};
+static const char *const
+clk_mux_sdio_sys_p[] = {"clk_factor_mmc", "clk_div_sdio",};
+static const char *const
+clk_mux_sd_sys_p[] = {"clk_factor_mmc", "clk_div_sd",};
+static const char *const
+clk_mux_pll_p[] = {"clk_ppll0", "clk_ppll1", "clk_ppll2", "clk_ppll2",};
+static const char *const
+clk_mux_pll0123_p[] = {"clk_ppll0", "clk_ppll1", "clk_ppll2", "clk_ppll3",};
+static const char *const
+clk_mux_edc0_p[] = {"clk_inv", "clk_ppll0", "clk_ppll1", "clk_inv",
+ "clk_ppll2", "clk_inv", "clk_inv", "clk_inv",
+ "clk_ppll3", "clk_inv", "clk_inv", "clk_inv",
+ "clk_inv", "clk_inv", "clk_inv", "clk_inv",};
+static const char *const
+clk_mux_ldi0_p[] = {"clk_inv", "clk_ppll0", "clk_ppll2", "clk_inv",
+ "clk_ppll1", "clk_inv", "clk_inv", "clk_inv",
+ "clk_ppll3", "clk_inv", "clk_inv", "clk_inv",
+ "clk_inv", "clk_inv", "clk_inv", "clk_inv",};
+static const char *const
+clk_mux_uart0_p[] = {"clkin_sys", "clk_div_uart0",};
+static const char *const
+clk_mux_uart1_p[] = {"clkin_sys", "clk_div_uart1",};
+static const char *const
+clk_mux_uarth_p[] = {"clkin_sys", "clk_div_uarth",};
+static const char *const
+clk_mux_pll02p[] = {"clk_ppll0", "clk_ppll2",};
+static const char *const
+clk_mux_ioperi_p[] = {"clk_div_320m", "clk_div_a53hpm",};
+static const char *const
+clk_mux_spi_p[] = {"clkin_sys", "clk_div_spi",};
+static const char *const
+clk_mux_i2c_p[] = {"clkin_sys", "clk_div_i2c",};
+static const char *const
+clk_mux_venc_p[] = {"clk_ppll0", "clk_ppll1", "clk_ppll3", "clk_ppll3",};
+static const char *const
+clk_mux_isp_snclk_p[] = {"clkin_sys", "clk_isp_snclk_div"};
+
+static const struct hisi_mux_clock hi3660_crgctrl_mux_clks[] = {
+ { HI3660_CLK_MUX_SYSBUS, "clk_mux_sysbus", clk_mux_sysbus_p,
+ ARRAY_SIZE(clk_mux_sysbus_p), CLK_SET_RATE_PARENT, 0xac, 0, 1,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_UART0, "clk_mux_uart0", clk_mux_uart0_p,
+ ARRAY_SIZE(clk_mux_uart0_p), CLK_SET_RATE_PARENT, 0xac, 2, 1,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_UART1, "clk_mux_uart1", clk_mux_uart1_p,
+ ARRAY_SIZE(clk_mux_uart1_p), CLK_SET_RATE_PARENT, 0xac, 3, 1,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_UARTH, "clk_mux_uarth", clk_mux_uarth_p,
+ ARRAY_SIZE(clk_mux_uarth_p), CLK_SET_RATE_PARENT, 0xac, 4, 1,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_SPI, "clk_mux_spi", clk_mux_spi_p,
+ ARRAY_SIZE(clk_mux_spi_p), CLK_SET_RATE_PARENT, 0xac, 8, 1,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_I2C, "clk_mux_i2c", clk_mux_i2c_p,
+ ARRAY_SIZE(clk_mux_i2c_p), CLK_SET_RATE_PARENT, 0xac, 13, 1,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_MMC_PLL, "clk_mux_mmc_pll", clk_mux_pll02p,
+ ARRAY_SIZE(clk_mux_pll02p), CLK_SET_RATE_PARENT, 0xb4, 0, 1,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_LDI1, "clk_mux_ldi1", clk_mux_ldi0_p,
+ ARRAY_SIZE(clk_mux_ldi0_p), CLK_SET_RATE_PARENT, 0xb4, 8, 4,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_LDI0, "clk_mux_ldi0", clk_mux_ldi0_p,
+ ARRAY_SIZE(clk_mux_ldi0_p), CLK_SET_RATE_PARENT, 0xb4, 12, 4,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_SD_PLL, "clk_mux_sd_pll", clk_mux_pll_p,
+ ARRAY_SIZE(clk_mux_pll_p), CLK_SET_RATE_PARENT, 0xb8, 4, 2,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_SD_SYS, "clk_mux_sd_sys", clk_mux_sd_sys_p,
+ ARRAY_SIZE(clk_mux_sd_sys_p), CLK_SET_RATE_PARENT, 0xb8, 6, 1,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_EDC0, "clk_mux_edc0", clk_mux_edc0_p,
+ ARRAY_SIZE(clk_mux_edc0_p), CLK_SET_RATE_PARENT, 0xbc, 6, 4,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_SDIO_SYS, "clk_mux_sdio_sys", clk_mux_sdio_sys_p,
+ ARRAY_SIZE(clk_mux_sdio_sys_p), CLK_SET_RATE_PARENT, 0xc0, 6, 1,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_SDIO_PLL, "clk_mux_sdio_pll", clk_mux_pll_p,
+ ARRAY_SIZE(clk_mux_pll_p), CLK_SET_RATE_PARENT, 0xc0, 4, 2,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_VENC, "clk_mux_venc", clk_mux_venc_p,
+ ARRAY_SIZE(clk_mux_venc_p), CLK_SET_RATE_PARENT, 0xc8, 11, 2,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_VDEC, "clk_mux_vdec", clk_mux_pll0123_p,
+ ARRAY_SIZE(clk_mux_pll0123_p), CLK_SET_RATE_PARENT, 0xcc, 5, 2,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_VIVOBUS, "clk_mux_vivobus", clk_mux_pll0123_p,
+ ARRAY_SIZE(clk_mux_pll0123_p), CLK_SET_RATE_PARENT, 0xd0, 12, 2,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_A53HPM, "clk_mux_a53hpm", clk_mux_pll02p,
+ ARRAY_SIZE(clk_mux_pll02p), CLK_SET_RATE_PARENT, 0xd4, 9, 1,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_320M, "clk_mux_320m", clk_mux_pll02p,
+ ARRAY_SIZE(clk_mux_pll02p), CLK_SET_RATE_PARENT, 0x100, 0, 1,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_ISP_SNCLK, "clk_isp_snclk_mux", clk_mux_isp_snclk_p,
+ ARRAY_SIZE(clk_mux_isp_snclk_p), CLK_SET_RATE_PARENT, 0x108, 3, 1,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_IOPERI, "clk_mux_ioperi", clk_mux_ioperi_p,
+ ARRAY_SIZE(clk_mux_ioperi_p), CLK_SET_RATE_PARENT, 0x108, 10, 1,
+ CLK_MUX_HIWORD_MASK, },
+};
+
+static const struct hisi_divider_clock hi3660_crgctrl_divider_clks[] = {
+ { HI3660_CLK_DIV_UART0, "clk_div_uart0", "clk_andgt_uart0",
+ CLK_SET_RATE_PARENT, 0xb0, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_UART1, "clk_div_uart1", "clk_andgt_uart1",
+ CLK_SET_RATE_PARENT, 0xb0, 8, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_UARTH, "clk_div_uarth", "clk_andgt_uarth",
+ CLK_SET_RATE_PARENT, 0xb0, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_MMC, "clk_div_mmc", "clk_andgt_mmc",
+ CLK_SET_RATE_PARENT, 0xb4, 3, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_SD, "clk_div_sd", "clk_andgt_sd",
+ CLK_SET_RATE_PARENT, 0xb8, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_EDC0, "clk_div_edc0", "clk_andgt_edc0",
+ CLK_SET_RATE_PARENT, 0xbc, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_LDI0, "clk_div_ldi0", "clk_andgt_ldi0",
+ CLK_SET_RATE_PARENT, 0xbc, 10, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_SDIO, "clk_div_sdio", "clk_andgt_sdio",
+ CLK_SET_RATE_PARENT, 0xc0, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_LDI1, "clk_div_ldi1", "clk_andgt_ldi1",
+ CLK_SET_RATE_PARENT, 0xc0, 8, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_SPI, "clk_div_spi", "clk_andgt_spi",
+ CLK_SET_RATE_PARENT, 0xc4, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_VENC, "clk_div_venc", "clk_andgt_venc",
+ CLK_SET_RATE_PARENT, 0xc8, 6, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_VDEC, "clk_div_vdec", "clk_andgt_vdec",
+ CLK_SET_RATE_PARENT, 0xcc, 0, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_VIVOBUS, "clk_div_vivobus", "clk_vivobus_andgt",
+ CLK_SET_RATE_PARENT, 0xd0, 7, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_I2C, "clk_div_i2c", "clk_div_320m",
+ CLK_SET_RATE_PARENT, 0xe8, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_UFSPHY, "clk_div_ufsphy_cfg", "clk_gate_ufsphy_gt",
+ CLK_SET_RATE_PARENT, 0xe8, 9, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_CFGBUS, "clk_div_cfgbus", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0xec, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_MMC0BUS, "clk_div_mmc0bus", "autodiv_emmc0bus",
+ CLK_SET_RATE_PARENT, 0xec, 2, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_MMC1BUS, "clk_div_mmc1bus", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0xec, 3, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_UFSPERI, "clk_div_ufsperi", "clk_gate_ufs_subsys",
+ CLK_SET_RATE_PARENT, 0xec, 14, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_AOMM, "clk_div_aomm", "clk_aomm_andgt",
+ CLK_SET_RATE_PARENT, 0x100, 7, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_ISP_SNCLK, "clk_isp_snclk_div", "clk_isp_snclk_fac",
+ CLK_SET_RATE_PARENT, 0x108, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_IOPERI, "clk_div_ioperi", "clk_mux_ioperi",
+ CLK_SET_RATE_PARENT, 0x108, 11, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+};
+
+/* clk_pmuctrl */
+/* pmu register need shift 2 bits */
+static const struct hisi_gate_clock hi3660_pmu_gate_clks[] = {
+ { HI3660_GATE_ABB_192, "clk_gate_abb_192", "clkin_sys",
+ CLK_SET_RATE_PARENT, (0x10a << 2), 3, 0, },
+};
+
+/* clk_pctrl */
+static const struct hisi_gate_clock hi3660_pctrl_gate_clks[] = {
+ { HI3660_GATE_UFS_TCXO_EN, "clk_gate_ufs_tcxo_en",
+ "clk_gate_abb_192", CLK_SET_RATE_PARENT, 0x10, 0,
+ CLK_GATE_HIWORD_MASK, },
+ { HI3660_GATE_USB_TCXO_EN, "clk_gate_usb_tcxo_en", "clk_gate_abb_192",
+ CLK_SET_RATE_PARENT, 0x10, 1, CLK_GATE_HIWORD_MASK, },
+};
+
+/* clk_sctrl */
+static const struct hisi_gate_clock hi3660_sctrl_gate_sep_clks[] = {
+ { HI3660_PCLK_AO_GPIO0, "pclk_ao_gpio0", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 11, 0, },
+ { HI3660_PCLK_AO_GPIO1, "pclk_ao_gpio1", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 12, 0, },
+ { HI3660_PCLK_AO_GPIO2, "pclk_ao_gpio2", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 13, 0, },
+ { HI3660_PCLK_AO_GPIO3, "pclk_ao_gpio3", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 14, 0, },
+ { HI3660_PCLK_AO_GPIO4, "pclk_ao_gpio4", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 21, 0, },
+ { HI3660_PCLK_AO_GPIO5, "pclk_ao_gpio5", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 22, 0, },
+ { HI3660_PCLK_AO_GPIO6, "pclk_ao_gpio6", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 25, 0, },
+ { HI3660_PCLK_GATE_MMBUF, "pclk_gate_mmbuf", "pclk_div_mmbuf",
+ CLK_SET_RATE_PARENT, 0x170, 23, 0, },
+ { HI3660_CLK_GATE_DSS_AXI_MM, "clk_gate_dss_axi_mm", "aclk_mux_mmbuf",
+ CLK_SET_RATE_PARENT, 0x170, 24, 0, },
+};
+
+static const struct hisi_gate_clock hi3660_sctrl_gate_clks[] = {
+ { HI3660_PCLK_MMBUF_ANDGT, "pclk_mmbuf_andgt", "clk_sw_mmbuf",
+ CLK_SET_RATE_PARENT, 0x258, 7, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_MMBUF_PLL_ANDGT, "clk_mmbuf_pll_andgt", "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0x260, 11, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_FLL_MMBUF_ANDGT, "clk_fll_mmbuf_andgt", "clk_fll_src",
+ CLK_SET_RATE_PARENT, 0x260, 12, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_SYS_MMBUF_ANDGT, "clk_sys_mmbuf_andgt", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x260, 13, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_GATE_PCIEPHY_GT, "clk_gate_pciephy_gt", "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0x268, 11, CLK_DIVIDER_HIWORD_MASK, 0, },
+};
+
+static const char *const
+aclk_mux_mmbuf_p[] = {"aclk_div_mmbuf", "clk_gate_aomm",};
+static const char *const
+clk_sw_mmbuf_p[] = {"clk_sys_mmbuf_andgt", "clk_fll_mmbuf_andgt",
+ "aclk_mux_mmbuf", "aclk_mux_mmbuf"};
+
+static const struct hisi_mux_clock hi3660_sctrl_mux_clks[] = {
+ { HI3660_ACLK_MUX_MMBUF, "aclk_mux_mmbuf", aclk_mux_mmbuf_p,
+ ARRAY_SIZE(aclk_mux_mmbuf_p), CLK_SET_RATE_PARENT, 0x250, 12, 1,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_SW_MMBUF, "clk_sw_mmbuf", clk_sw_mmbuf_p,
+ ARRAY_SIZE(clk_sw_mmbuf_p), CLK_SET_RATE_PARENT, 0x258, 8, 2,
+ CLK_MUX_HIWORD_MASK, },
+};
+
+static const struct hisi_divider_clock hi3660_sctrl_divider_clks[] = {
+ { HI3660_CLK_DIV_AOBUS, "clk_div_aobus", "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0x254, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_PCLK_DIV_MMBUF, "pclk_div_mmbuf", "pclk_mmbuf_andgt",
+ CLK_SET_RATE_PARENT, 0x258, 10, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_ACLK_DIV_MMBUF, "aclk_div_mmbuf", "clk_mmbuf_pll_andgt",
+ CLK_SET_RATE_PARENT, 0x258, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_PCIEPHY, "clk_div_pciephy", "clk_gate_pciephy_gt",
+ CLK_SET_RATE_PARENT, 0x268, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+};
+
+/* clk_iomcu */
+static const struct hisi_gate_clock hi3660_iomcu_gate_sep_clks[] = {
+ { HI3660_CLK_I2C0_IOMCU, "clk_i2c0_iomcu", "clk_fll_src",
+ CLK_SET_RATE_PARENT, 0x10, 3, 0, },
+ { HI3660_CLK_I2C1_IOMCU, "clk_i2c1_iomcu", "clk_fll_src",
+ CLK_SET_RATE_PARENT, 0x10, 4, 0, },
+ { HI3660_CLK_I2C2_IOMCU, "clk_i2c2_iomcu", "clk_fll_src",
+ CLK_SET_RATE_PARENT, 0x10, 5, 0, },
+ { HI3660_CLK_I2C6_IOMCU, "clk_i2c6_iomcu", "clk_fll_src",
+ CLK_SET_RATE_PARENT, 0x10, 27, 0, },
+ { HI3660_CLK_IOMCU_PERI0, "iomcu_peri0", "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0x90, 0, 0, },
+};
+
+static struct hisi_clock_data *clk_crgctrl_data;
+
+static void hi3660_clk_iomcu_init(struct device_node *np)
+{
+ struct hisi_clock_data *clk_data;
+ int nr = ARRAY_SIZE(hi3660_iomcu_gate_sep_clks);
+
+ clk_data = hisi_clk_init(np, nr);
+ if (!clk_data)
+ return;
+
+ hisi_clk_register_gate_sep(hi3660_iomcu_gate_sep_clks,
+ ARRAY_SIZE(hi3660_iomcu_gate_sep_clks),
+ clk_data);
+}
+
+static void hi3660_clk_pmuctrl_init(struct device_node *np)
+{
+ struct hisi_clock_data *clk_data;
+ int nr = ARRAY_SIZE(hi3660_pmu_gate_clks);
+
+ clk_data = hisi_clk_init(np, nr);
+ if (!clk_data)
+ return;
+
+ hisi_clk_register_gate(hi3660_pmu_gate_clks,
+ ARRAY_SIZE(hi3660_pmu_gate_clks), clk_data);
+}
+
+static void hi3660_clk_pctrl_init(struct device_node *np)
+{
+ struct hisi_clock_data *clk_data;
+ int nr = ARRAY_SIZE(hi3660_pctrl_gate_clks);
+
+ clk_data = hisi_clk_init(np, nr);
+ if (!clk_data)
+ return;
+ hisi_clk_register_gate(hi3660_pctrl_gate_clks,
+ ARRAY_SIZE(hi3660_pctrl_gate_clks), clk_data);
+}
+
+static void hi3660_clk_sctrl_init(struct device_node *np)
+{
+ struct hisi_clock_data *clk_data;
+ int nr = ARRAY_SIZE(hi3660_sctrl_gate_clks) +
+ ARRAY_SIZE(hi3660_sctrl_gate_sep_clks) +
+ ARRAY_SIZE(hi3660_sctrl_mux_clks) +
+ ARRAY_SIZE(hi3660_sctrl_divider_clks);
+
+ clk_data = hisi_clk_init(np, nr);
+ if (!clk_data)
+ return;
+ hisi_clk_register_gate(hi3660_sctrl_gate_clks,
+ ARRAY_SIZE(hi3660_sctrl_gate_clks), clk_data);
+ hisi_clk_register_gate_sep(hi3660_sctrl_gate_sep_clks,
+ ARRAY_SIZE(hi3660_sctrl_gate_sep_clks),
+ clk_data);
+ hisi_clk_register_mux(hi3660_sctrl_mux_clks,
+ ARRAY_SIZE(hi3660_sctrl_mux_clks), clk_data);
+ hisi_clk_register_divider(hi3660_sctrl_divider_clks,
+ ARRAY_SIZE(hi3660_sctrl_divider_clks),
+ clk_data);
+}
+
+static void hi3660_clk_crgctrl_early_init(struct device_node *np)
+{
+ int nr = ARRAY_SIZE(hi3660_fixed_rate_clks) +
+ ARRAY_SIZE(hi3660_crgctrl_gate_sep_clks) +
+ ARRAY_SIZE(hi3660_crgctrl_gate_clks) +
+ ARRAY_SIZE(hi3660_crgctrl_mux_clks) +
+ ARRAY_SIZE(hi3660_crg_fixed_factor_clks) +
+ ARRAY_SIZE(hi3660_crgctrl_divider_clks);
+ int i;
+
+ clk_crgctrl_data = hisi_clk_init(np, nr);
+ if (!clk_crgctrl_data)
+ return;
+
+ for (i = 0; i < nr; i++)
+ clk_crgctrl_data->clk_data.clks[i] = ERR_PTR(-EPROBE_DEFER);
+
+ hisi_clk_register_fixed_rate(hi3660_fixed_rate_clks,
+ ARRAY_SIZE(hi3660_fixed_rate_clks),
+ clk_crgctrl_data);
+}
+CLK_OF_DECLARE_DRIVER(hi3660_clk_crgctrl, "hisilicon,hi3660-crgctrl",
+ hi3660_clk_crgctrl_early_init);
+
+static void hi3660_clk_crgctrl_init(struct device_node *np)
+{
+ struct clk **clks;
+ int i;
+
+ if (!clk_crgctrl_data)
+ hi3660_clk_crgctrl_early_init(np);
+
+ /* clk_crgctrl_data initialization failed */
+ if (!clk_crgctrl_data)
+ return;
+
+ hisi_clk_register_gate_sep(hi3660_crgctrl_gate_sep_clks,
+ ARRAY_SIZE(hi3660_crgctrl_gate_sep_clks),
+ clk_crgctrl_data);
+ hisi_clk_register_gate(hi3660_crgctrl_gate_clks,
+ ARRAY_SIZE(hi3660_crgctrl_gate_clks),
+ clk_crgctrl_data);
+ hisi_clk_register_mux(hi3660_crgctrl_mux_clks,
+ ARRAY_SIZE(hi3660_crgctrl_mux_clks),
+ clk_crgctrl_data);
+ hisi_clk_register_fixed_factor(hi3660_crg_fixed_factor_clks,
+ ARRAY_SIZE(hi3660_crg_fixed_factor_clks),
+ clk_crgctrl_data);
+ hisi_clk_register_divider(hi3660_crgctrl_divider_clks,
+ ARRAY_SIZE(hi3660_crgctrl_divider_clks),
+ clk_crgctrl_data);
+
+ clks = clk_crgctrl_data->clk_data.clks;
+ for (i = 0; i < clk_crgctrl_data->clk_data.clk_num; i++) {
+ if (IS_ERR(clks[i]) && PTR_ERR(clks[i]) != -EPROBE_DEFER)
+ pr_err("Failed to register crgctrl clock[%d] err=%ld\n",
+ i, PTR_ERR(clks[i]));
+ }
+}
+
+static const struct of_device_id hi3660_clk_match_table[] = {
+ { .compatible = "hisilicon,hi3660-crgctrl",
+ .data = hi3660_clk_crgctrl_init },
+ { .compatible = "hisilicon,hi3660-pctrl",
+ .data = hi3660_clk_pctrl_init },
+ { .compatible = "hisilicon,hi3660-pmuctrl",
+ .data = hi3660_clk_pmuctrl_init },
+ { .compatible = "hisilicon,hi3660-sctrl",
+ .data = hi3660_clk_sctrl_init },
+ { .compatible = "hisilicon,hi3660-iomcu",
+ .data = hi3660_clk_iomcu_init },
+ { }
+};
+
+static int hi3660_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ void (*init_func)(struct device_node *np);
+
+ init_func = of_device_get_match_data(dev);
+ if (!init_func)
+ return -ENODEV;
+
+ init_func(np);
+
+ return 0;
+}
+
+static struct platform_driver hi3660_clk_driver = {
+ .probe = hi3660_clk_probe,
+ .driver = {
+ .name = "hi3660-clk",
+ .of_match_table = hi3660_clk_match_table,
+ },
+};
+
+static int __init hi3660_clk_init(void)
+{
+ return platform_driver_register(&hi3660_clk_driver);
+}
+core_initcall(hi3660_clk_init);
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index 2bfaf22e6ffc..7cc335e7ef66 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -55,9 +55,9 @@ static struct hisi_fixed_factor_clock hi6220_fixed_factor_clks[] __initdata = {
};
static struct hisi_gate_clock hi6220_separated_gate_clks_ao[] __initdata = {
- { HI6220_WDT0_PCLK, "wdt0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 12, 0, },
- { HI6220_WDT1_PCLK, "wdt1_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 13, 0, },
- { HI6220_WDT2_PCLK, "wdt2_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 14, 0, },
+ { HI6220_WDT0_PCLK, "wdt0_pclk", "ref32k", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 12, 0, },
+ { HI6220_WDT1_PCLK, "wdt1_pclk", "ref32k", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 13, 0, },
+ { HI6220_WDT2_PCLK, "wdt2_pclk", "ref32k", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 14, 0, },
{ HI6220_TIMER0_PCLK, "timer0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 15, 0, },
{ HI6220_TIMER1_PCLK, "timer1_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 16, 0, },
{ HI6220_TIMER2_PCLK, "timer2_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 17, 0, },
diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c
index a47812f56a17..7908bc3c9ec7 100644
--- a/drivers/clk/hisilicon/clkgate-separated.c
+++ b/drivers/clk/hisilicon/clkgate-separated.c
@@ -120,6 +120,7 @@ struct clk *hisi_register_clkgate_sep(struct device *dev, const char *name,
sclk->bit_idx = bit_idx;
sclk->flags = clk_gate_flags;
sclk->hw.init = &init;
+ sclk->lock = lock;
clk = clk_register(dev, &sclk->hw);
if (IS_ERR(clk))
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 71267626456b..2c83c2e74276 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -27,6 +27,7 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "allwinner,sun8i-h3", },
{ .compatible = "hisilicon,hi6220", },
+ { .compatible = "hisilicon,hi3660", },
{ .compatible = "fsl,imx27", },
{ .compatible = "fsl,imx51", },
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 21340e0be73e..fd6c848d4763 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -37,6 +37,18 @@ config ARM_HIGHBANK_CPUIDLE
help
Select this to enable cpuidle on Calxeda processors.
+config ARM_HISI_CPUIDLE
+ bool "Support for Hisilicon big.LITTLE processors"
+ depends on ARCH_HISI
+ select ARM_CPU_SUSPEND
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ select DT_IDLE_STATES
+ help
+ Select this option to enable CPU idle driver for big.LITTLE based
+ ARM systems. Driver manages CPUs coordination through MCPM and
+ define different C-states for little and big cores through the
+ multiple CPU idle drivers infrastructure.
+
config ARM_KIRKWOOD_CPUIDLE
bool "CPU Idle Driver for Marvell Kirkwood SoCs"
depends on MACH_KIRKWOOD && !ARM64
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 3ba81b1dffad..c8ed48fbd19e 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o
+obj-$(CONFIG_ARM_HISI_CPUIDLE) += cpuidle-hisi.o
###############################################################################
# MIPS drivers
diff --git a/drivers/cpuidle/cpuidle-hisi.c b/drivers/cpuidle/cpuidle-hisi.c
new file mode 100644
index 000000000000..003635639620
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-hisi.c
@@ -0,0 +1,292 @@
+/*
+ * ARM64 generic CPU idle driver.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "CPUidle arm64: " fmt
+
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include <asm/cpuidle.h>
+#include <asm/suspend.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/cpu.h>
+#include "dt_idle_states.h"
+#ifdef CONFIG_HISI_CORESIGHT_TRACE
+#include <linux/coresight.h>
+#endif
+
+enum {
+ LITTLE_CLUSTER_ID = 0,
+ BIG_CLUSTER_ID,
+ MAX_CLUSTER_ID,
+};
+
+/*
+ * hisi_enter_idle_state - Programs CPU to enter the specified state
+ *
+ * dev: cpuidle device
+ * drv: cpuidle driver
+ * idx: state index
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified target state selected by the governor.
+ */
+
+extern int real_enable_cpuidle;
+
+static int hisi_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ int ret;
+
+ if (need_resched()) {
+ return idx;
+ }
+
+ if (!idx) {
+ cpu_do_idle();
+ return idx;
+ }
+
+ ret = cpu_pm_enter();
+ if (!ret) {
+#ifdef CONFIG_ARCH_HISI
+ local_fiq_disable();
+#endif
+
+ /*
+ * Pass idle state index to cpu_suspend which in turn will
+ * call the CPU ops suspend protocol with idle index as a
+ * parameter.
+ */
+ ret = arm_cpuidle_suspend(idx);
+
+#ifdef CONFIG_HISI_CORESIGHT_TRACE
+ /*Restore ETM registers */
+ _etm4_cpuilde_restore();
+#endif
+#ifdef CONFIG_ARCH_HISI
+ local_fiq_enable();
+#endif
+ cpu_pm_exit();
+
+ }
+
+ return ret ? -1 : idx;
+}
+
+static struct cpuidle_driver hisi_little_cluster_idle_driver = {
+ .name = "hisi_little_cluster_idle",
+ .owner = THIS_MODULE,
+ /*
+ * State at index 0 is standby wfi and considered standard
+ * on all ARM platforms. If in some platforms simple wfi
+ * can't be used as "state 0", DT bindings must be implemented
+ * to work around this issue and allow installing a special
+ * handler for idle state index 0.
+ */
+ .states[0] = {
+ .enter = hisi_enter_idle_state,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .power_usage = UINT_MAX,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,1,14)
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+#endif
+ .name = "WFI",
+ .desc = "ARM64 WFI",
+ }
+};
+static struct cpuidle_driver hisi_big_cluster_idle_driver = {
+ .name = "hisi_big_cluster_idle",
+ .owner = THIS_MODULE,
+ /*
+ * State at index 0 is standby wfi and considered standard
+ * on all ARM platforms. If in some platforms simple wfi
+ * can't be used as "state 0", DT bindings must be implemented
+ * to work around this issue and allow installing a special
+ * handler for idle state index 0.
+ */
+ .states[0] = {
+ .enter = hisi_enter_idle_state,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .power_usage = UINT_MAX,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,1,14)
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+#endif
+ .name = "WFI",
+ .desc = "ARM64 WFI",
+ }
+};
+
+static const struct of_device_id arm64_idle_state_match[] __initconst = {
+ { .compatible = "arm,idle-state",
+ .data = hisi_enter_idle_state },
+ { },
+};
+
+static int __init hisi_idle_drv_cpumask_init(struct cpuidle_driver *drv, int cluster_id)
+{
+ struct cpumask *cpumask;
+ int cpu;
+
+ cpumask = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!cpumask)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu_topology[cpu].cluster_id == cluster_id)
+ cpumask_set_cpu(cpu, cpumask);
+ }
+
+ drv->cpumask = cpumask;
+
+ return 0;
+}
+
+static void __init hisi_idle_drv_cpumask_uninit(struct cpuidle_driver *drv)
+{
+ kfree(drv->cpumask);
+}
+
+static int __init hisi_idle_drv_init(struct cpuidle_driver *drv)
+{
+ int cpu, ret;
+
+ /*
+ * Initialize idle states data, starting at index 1.
+ * This driver is DT only, if no DT idle states are detected (ret == 0)
+ * let the driver initialization fail accordingly since there is no
+ * reason to initialize the idle driver if only wfi is supported.
+ */
+ ret = dt_init_idle_driver(drv, arm64_idle_state_match, 1);
+ if (ret <= 0) {
+ if (ret)
+ pr_err("failed to initialize idle states\n");
+ return ret ? : -ENODEV;
+ }
+
+ /*
+ * Call arch CPU operations in order to initialize
+ * idle states suspend back-end specific data
+ */
+ for_each_possible_cpu(cpu) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,14)
+ ret = arm_cpuidle_init(cpu);
+#else
+ ret = cpu_init_idle(cpu);
+#endif
+ if (ret) {
+ pr_err("CPU %d failed to init idle CPU ops\n", cpu);
+ return ret;
+ }
+ }
+
+ ret = cpuidle_register(drv, NULL);
+ if (ret) {
+ pr_err("failed to register cpuidle driver\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init hisi_multidrv_idle_init(struct cpuidle_driver *drv, int cluster_id)
+{
+ int ret;
+ if (cluster_id >= MAX_CLUSTER_ID) {
+ pr_err("cluster id is out of range.\n");
+ return -ENODEV;
+ }
+
+ ret = hisi_idle_drv_cpumask_init(drv, cluster_id);
+ if (ret) {
+ pr_err("fail to init idle driver!\n");
+ return ret;
+ }
+
+ ret = hisi_idle_drv_init(drv);
+ if (ret) {
+ hisi_idle_drv_cpumask_uninit(drv);
+ pr_err("fail to register cluster%d cpuidle drv.\n", cluster_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+
+static int cpuidle_decoup_hotplug_notify(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ if(action & CPU_TASKS_FROZEN)
+ return NOTIFY_OK;
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ case CPU_DOWN_PREPARE:
+ cpuidle_pause();
+ break;
+ case CPU_DOWN_FAILED:
+ case CPU_UP_CANCELED:
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ cpuidle_resume();
+ kick_all_cpus_sync();
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpuidle_decoup_hotplug_notifier = {
+ .notifier_call = cpuidle_decoup_hotplug_notify,
+};
+
+/*
+ * hisi_idle_init
+ *
+ * Registers the hisi multi cpuidle driver with the cpuidle
+ * framework. It relies on core code to parse the idle states
+ * and initialize them using driver data structures accordingly.
+ */
+static int __init hisi_idle_init(void)
+{
+ int ret;
+
+ ret = hisi_multidrv_idle_init(&hisi_little_cluster_idle_driver, LITTLE_CLUSTER_ID);
+ if (ret) {
+ pr_err("fail to register little cluster cpuidle drv.\n");
+ return ret;
+ }
+
+ ret = hisi_multidrv_idle_init(&hisi_big_cluster_idle_driver, BIG_CLUSTER_ID);
+ if (ret) {
+ pr_err("fail to register big cluster cpuidle drv.\n");
+ return ret;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,14)
+ ret = register_cpu_notifier(&cpuidle_decoup_hotplug_notifier);
+ if (ret) {
+ pr_err("fail to register cpuidle_coupled_cpu_notifier.\n");
+ return ret;
+ }
+#endif
+ return 0;
+}
+device_initcall(hisi_idle_init);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 41254e702f1e..14c55e32df76 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -112,6 +112,16 @@ config ARM_RK3399_DMC_DEVFREQ
It sets the frequency for the memory controller and reads the usage counts
from hardware.
+config HISI_DDR_DEVFREQ
+ tristate "HISI DDR DEVFREQ Driver"
+ depends on ARCH_HISI
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select PM_OPP
+ help
+ This adds the DEVFREQ driver for the Hisi family of SoCs.
+ It adjusts ddr frequency according to pm qos memory bandwidth
+ request.
+
source "drivers/devfreq/event/Kconfig"
endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index fbff40a508a4..2432a48ead05 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra-devfreq.o
+obj-$(CONFIG_HISI_DDR_DEVFREQ) += hisi-ddr-devfreq.o
# DEVFREQ Event Drivers
obj-$(CONFIG_PM_DEVFREQ_EVENT) += event/
diff --git a/drivers/devfreq/hisi-ddr-devfreq.c b/drivers/devfreq/hisi-ddr-devfreq.c
new file mode 100644
index 000000000000..550407e1b5ad
--- /dev/null
+++ b/drivers/devfreq/hisi-ddr-devfreq.c
@@ -0,0 +1,329 @@
+/*
+ * linux/drivers/devfreq/hisi_ddr_devfreq.c
+ *
+ * Copyright (c) 2017 Hisilicon Technologies CO., Ltd.
+ *
+ * Author: Tao Wang <kevin.wangtao@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/devfreq.h>
+#include <linux/clk.h>
+#include <linux/pm_qos.h>
+#include "governor.h"
+
+struct ddr_devfreq_pdata {
+ int pm_qos_constraint;
+ unsigned int bytes_per_cycle;
+ unsigned int polling_ms;
+ char *governor;
+ void *governor_data;
+};
+
+struct ddr_devfreq_device {
+ struct devfreq *devfreq;
+ struct clk *get_current;
+ struct clk *req_dn_thres;
+ struct clk *req_up_thres;
+ unsigned long dn_thres_freq;
+ unsigned long up_thres_freq;
+ unsigned long max_freq;
+ struct notifier_block nb;
+ const struct ddr_devfreq_pdata *pdata;
+};
+
+static int ddr_devfreq_target(struct device *dev,
+ unsigned long *freq, u32 flags)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct ddr_devfreq_device *ddev = platform_get_drvdata(pdev);
+ struct dev_pm_opp *opp = NULL;
+ struct dev_pm_opp *max_opp = NULL;
+ unsigned long max_freq = ddev->devfreq->max_freq;
+
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ max_opp = devfreq_recommended_opp(dev, &max_freq,
+ DEVFREQ_FLAG_LEAST_UPPER_BOUND);
+ rcu_read_unlock();
+ if (IS_ERR(opp) || IS_ERR(max_opp)) {
+ dev_err(dev, "Failed to get Operating Point\n");
+ return IS_ERR(opp) ? PTR_ERR(opp) : PTR_ERR(max_opp);
+ }
+
+ if (ddev->devfreq->max_freq &&
+ max_freq != ddev->up_thres_freq &&
+ ddev->req_up_thres) {
+ dev_dbg(dev, "set up threshold:%lu\n", max_freq);
+ (void)clk_set_rate(ddev->req_up_thres, max_freq);
+ ddev->up_thres_freq = max_freq;
+ }
+
+ if (ddev->dn_thres_freq != *freq) {
+ /* undate ddr freqency down threshold */
+ dev_dbg(dev, "set down threshold:%lu\n", *freq);
+ (void)clk_set_rate(ddev->req_dn_thres, *freq);
+ ddev->dn_thres_freq = *freq;
+ }
+
+ return 0;
+}
+
+static int ddr_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct ddr_devfreq_device *ddev = platform_get_drvdata(pdev);
+
+#ifdef CONFIG_PM
+ if (ddev->pdata->pm_qos_constraint) {
+ stat->busy_time =
+ pm_qos_request(ddev->pdata->pm_qos_constraint);
+ stat->total_time =
+ (ddev->max_freq * ddev->pdata->bytes_per_cycle) >> 20;
+ stat->current_frequency = ddev->max_freq;
+ dev_dbg(&pdev->dev, "ddr bandwdith request: %lu / %lu\n",
+ stat->busy_time, stat->total_time);
+ }
+#endif
+
+ return 0;
+}
+
+static int ddr_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct ddr_devfreq_device *ddev = platform_get_drvdata(pdev);
+
+ if (ddev->get_current)
+ *freq = clk_get_rate(ddev->get_current);
+ else
+ *freq = ddev->dn_thres_freq;
+
+ return 0;
+}
+
+static struct devfreq_dev_profile ddr_devfreq_profile = {
+ .polling_ms = 0,
+ .target = ddr_devfreq_target,
+ .get_dev_status = ddr_devfreq_get_dev_status,
+ .get_cur_freq = ddr_devfreq_get_cur_freq,
+};
+
+static struct devfreq_simple_ondemand_data ddr_ondemand = {
+ .upthreshold = 60,
+ .downdifferential = 1,
+};
+
+static int devfreq_pm_qos_notifier(struct notifier_block *nb,
+ unsigned long val, void *v)
+{
+ struct ddr_devfreq_device *ddev = container_of(nb,
+ struct ddr_devfreq_device, nb);
+
+ mutex_lock(&ddev->devfreq->lock);
+ update_devfreq(ddev->devfreq);
+ mutex_unlock(&ddev->devfreq->lock);
+
+ return NOTIFY_OK;
+}
+
+static int hisi_devfreq_set_freq_table(struct device *dev,
+ struct devfreq_dev_profile *profile)
+{
+ struct dev_pm_opp *opp;
+ unsigned long freq;
+ int i, count, ret = 0;
+
+ /* Initialize the freq_table from OPP table */
+ count = dev_pm_opp_get_opp_count(dev);
+ if (count <= 0)
+ return -ENOMEM;
+
+ profile->max_state = count;
+ profile->freq_table = devm_kcalloc(dev,
+ profile->max_state,
+ sizeof(*profile->freq_table),
+ GFP_KERNEL);
+ if (!profile->freq_table) {
+ profile->max_state = 0;
+ return -ENOMEM;
+ }
+
+ rcu_read_lock();
+ for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp)) {
+ profile->max_state = 0;
+ ret = -ENOMEM;
+ break;
+ }
+ profile->freq_table[i] = freq;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static struct ddr_devfreq_pdata hi3660_pdata = {
+ .pm_qos_constraint = PM_QOS_MEMORY_BANDWIDTH,
+ .bytes_per_cycle = 16,
+ .polling_ms = 0,
+ .governor = "simple_ondemand",
+ .governor_data = &ddr_ondemand,
+};
+
+static const struct of_device_id ddr_devfreq_of_match[] = {
+ {
+ .compatible = "hisilicon,hi3660-ddrfreq",
+ .data = &hi3660_pdata,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ddr_devfreq_of_match);
+
+static int ddr_devfreq_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct ddr_devfreq_device *ddev = NULL;
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int max_state;
+ int ret = 0;
+
+ ddev = devm_kzalloc(dev, sizeof(struct ddr_devfreq_device),
+ GFP_KERNEL);
+ if (!ddev)
+ return -ENOMEM;
+
+ match = of_match_device(ddr_devfreq_of_match, dev);
+ ddev->pdata = match->data;
+ platform_set_drvdata(pdev, ddev);
+
+ ddev->req_dn_thres = of_clk_get(np, 0);
+ if (IS_ERR(ddev->req_dn_thres)) {
+ dev_err(dev, "Failed to get req-dn-thres-clk\n");
+ ret = -ENODEV;
+ goto no_clk;
+ }
+
+ ddev->get_current = of_clk_get(np, 1);
+ if (IS_ERR(ddev->get_current)) {
+ dev_err(dev, "Failed to get get-current-clk\n");
+ ddev->get_current = NULL;
+ }
+
+ ddev->req_up_thres = of_clk_get(np, 2);
+ if (IS_ERR(ddev->req_up_thres)) {
+ dev_err(dev, "Failed to get req-up-thres-clk\n");
+ ddev->req_up_thres = NULL;
+ }
+
+ if (dev_pm_opp_of_add_table(dev) ||
+ hisi_devfreq_set_freq_table(dev, &ddr_devfreq_profile)) {
+ dev_err(dev, "Failed to init freq table\n");
+ ret = -ENODEV;
+ goto no_devfreq;
+ }
+
+ ddr_devfreq_profile.polling_ms = ddev->pdata->polling_ms;
+ max_state = ddr_devfreq_profile.max_state;
+ ddev->max_freq = ddr_devfreq_profile.freq_table[max_state-1];
+ ddev->devfreq = devm_devfreq_add_device(dev,
+ &ddr_devfreq_profile,
+ ddev->pdata->governor,
+ ddev->pdata->governor_data);
+ if (IS_ERR_OR_NULL(ddev->devfreq)) {
+ dev_err(dev, "Failed to init ddr devfreq\n");
+ ret = -ENODEV;
+ goto no_devfreq;
+ }
+
+#ifdef CONFIG_PM
+ if (ddev->pdata->pm_qos_constraint) {
+ ddev->nb.notifier_call = devfreq_pm_qos_notifier;
+ ret = pm_qos_add_notifier(ddev->pdata->pm_qos_constraint,
+ &ddev->nb);
+ if (ret)
+ goto no_notifier;
+ }
+#endif
+
+ dev_info(dev, "init success\n");
+ return ret;
+
+no_notifier:
+ devfreq_remove_device(ddev->devfreq);
+no_devfreq:
+ if (ddev->req_up_thres)
+ clk_put(ddev->req_up_thres);
+
+ if (ddev->get_current)
+ clk_put(ddev->get_current);
+
+ clk_put(ddev->req_dn_thres);
+no_clk:
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int ddr_devfreq_remove(struct platform_device *pdev)
+{
+ struct ddr_devfreq_device *ddev;
+
+ ddev = platform_get_drvdata(pdev);
+
+#ifdef CONFIG_PM
+ if (ddev->pdata->pm_qos_constraint)
+ pm_qos_remove_notifier(ddev->pdata->pm_qos_constraint,
+ &ddev->nb);
+#endif
+
+ if (ddev->req_up_thres)
+ clk_put(ddev->req_up_thres);
+
+ if (ddev->get_current)
+ clk_put(ddev->get_current);
+
+ clk_put(ddev->req_dn_thres);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ddr_devfreq_driver = {
+ .probe = ddr_devfreq_probe,
+ .remove = ddr_devfreq_remove,
+ .driver = {
+ .name = "hisi_ddr_devfreq",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ddr_devfreq_of_match),
+ },
+};
+
+module_platform_driver(ddr_devfreq_driver);
+
+MODULE_AUTHOR("Tao Wang <kevin.wangtao@hisilicon.com>");
+MODULE_DESCRIPTION("hisi ddr devfreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 141aefbe37ec..6f4ec1a589d1 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -285,6 +285,15 @@ config K3_DMA
Support the DMA engine for Hisilicon K3 platform
devices.
+config HISI_ASP_DMA
+ tristate "Hisilicon Kirin ASP DMA support"
+ depends on ARCH_HISI
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the DMA engine for Hisilicon Kirin platform
+ devices.
+
config LPC18XX_DMAMUX
bool "NXP LPC18xx/43xx DMA MUX for PL080"
depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index e4dc9cac7ee8..f6b7636ba5d7 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+obj-$(CONFIG_HISI_ASP_DMA) += hisi_asp_dma.o
obj-y += qcom/
obj-y += xilinx/
diff --git a/drivers/dma/hisi_asp_dma.c b/drivers/dma/hisi_asp_dma.c
new file mode 100644
index 000000000000..08ecaa298d8b
--- /dev/null
+++ b/drivers/dma/hisi_asp_dma.c
@@ -0,0 +1,1025 @@
+/*
+ * Copyright (c) 2013 - 2015 Linaro Ltd.
+ * Copyright (c) 2013 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_dma.h>
+#include <linux/regulator/consumer.h>
+
+#include "virt-dma.h"
+
+#define DRIVER_NAME "hisi-asp-dma"
+#define DMA_ALIGN 3
+#define DMA_MAX_SIZE 0x1ffc
+#define DMA_CYCLIC_MAX_PERIOD 0x1000
+#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
+
+#define INT_STAT 0x00
+#define INT_TC1 0x04
+#define INT_TC2 0x08
+#define INT_ERR1 0x0c
+#define INT_ERR2 0x10
+#define INT_TC1_MASK 0x18
+#define INT_TC2_MASK 0x1c
+#define INT_ERR1_MASK 0x20
+#define INT_ERR2_MASK 0x24
+#define INT_TC1_RAW 0x600
+#define INT_TC2_RAW 0x608
+#define INT_ERR1_RAW 0x610
+#define INT_ERR2_RAW 0x618
+#define CH_PRI 0x688
+#define CH_STAT 0x690
+#define CX_CUR_CNT 0x704
+#define CX_LLI 0x800
+#define CX_CNT1 0x80c
+#define CX_CNT0 0x810
+#define CX_SRC 0x814
+#define CX_DST 0x818
+#define CX_CFG 0x81c
+#define AXI_CFG 0x820
+#define AXI_CFG_DEFAULT 0x201201
+
+#define CX_LLI_CHAIN_EN 0x2
+#define CX_CFG_EN 0x1
+#define CX_CFG_NODEIRQ BIT(1)
+#define CX_CFG_MEM2PER (0x1 << 2)
+#define CX_CFG_PER2MEM (0x2 << 2)
+#define CX_CFG_SRCINCR (0x1 << 31)
+#define CX_CFG_DSTINCR (0x1 << 30)
+
+struct hisi_asp_desc_hw {
+ u32 lli;
+ u32 reserved[3];
+ u32 count;
+ u32 saddr;
+ u32 daddr;
+ u32 config;
+} __aligned(32);
+
+struct hisi_asp_dma_desc_sw {
+ struct virt_dma_desc vd;
+ dma_addr_t desc_hw_lli;
+ size_t desc_num;
+ size_t size;
+ struct hisi_asp_desc_hw *desc_hw;
+};
+
+struct hisi_asp_dma_phy;
+
+struct hisi_asp_dma_chan {
+ u32 ccfg;
+ struct virt_dma_chan vc;
+ struct hisi_asp_dma_phy *phy;
+ struct list_head node;
+ enum dma_transfer_direction dir;
+ dma_addr_t dev_addr;
+ enum dma_status status;
+ bool cyclic;
+};
+
+struct hisi_asp_dma_phy {
+ u32 idx;
+ void __iomem *base;
+ struct hisi_asp_dma_chan *vchan;
+ struct hisi_asp_dma_desc_sw *ds_run;
+ struct hisi_asp_dma_desc_sw *ds_done;
+};
+
+struct hisi_asp_dma_dev {
+ struct dma_device slave;
+ void __iomem *base;
+ struct tasklet_struct task;
+ spinlock_t lock;
+ struct list_head chan_pending;
+ struct hisi_asp_dma_phy *phy;
+ struct hisi_asp_dma_chan *chans;
+ struct clk *clk;
+ struct dma_pool *pool;
+ u32 dma_channels;
+ u32 dma_requests;
+ struct regulator *asp_ip;
+};
+
+#define to_hisi_asp_dma(dmadev) container_of(dmadev, struct hisi_asp_dma_dev, slave)
+
+static struct hisi_asp_dma_chan *to_hisi_asp_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct hisi_asp_dma_chan, vc.chan);
+}
+
+static void hisi_asp_dma_pause_dma(struct hisi_asp_dma_phy *phy, bool on)
+{
+ u32 val = 0;
+
+ if (on) {
+ val = readl_relaxed(phy->base + CX_CFG);
+ val |= CX_CFG_EN;
+ writel_relaxed(val, phy->base + CX_CFG);
+ } else {
+ val = readl_relaxed(phy->base + CX_CFG);
+ val &= ~CX_CFG_EN;
+ writel_relaxed(val, phy->base + CX_CFG);
+ }
+}
+
+static void hisi_asp_dma_terminate_chan(struct hisi_asp_dma_phy *phy, struct hisi_asp_dma_dev *d)
+{
+ u32 val = 0;
+
+ hisi_asp_dma_pause_dma(phy, false);
+
+ val = 0x1 << phy->idx;
+ writel_relaxed(val, d->base + INT_TC1_RAW);
+ writel_relaxed(val, d->base + INT_TC2_RAW);
+ writel_relaxed(val, d->base + INT_ERR1_RAW);
+ writel_relaxed(val, d->base + INT_ERR2_RAW);
+}
+
+static void hisi_asp_dma_set_desc(struct hisi_asp_dma_phy *phy, struct hisi_asp_desc_hw *hw)
+{
+ writel_relaxed(hw->lli, phy->base + CX_LLI);
+ writel_relaxed(hw->count, phy->base + CX_CNT0);
+ writel_relaxed(hw->saddr, phy->base + CX_SRC);
+ writel_relaxed(hw->daddr, phy->base + CX_DST);
+ writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
+ writel_relaxed(hw->config, phy->base + CX_CFG);
+ wmb();
+ pr_debug("%s: desc %p: ch idx = %d, lli: 0x%x, count: 0x%x, saddr: 0x%x, daddr 0x%x, cfg: 0x%x\n", __func__, (void *)hw,
+ phy->idx, hw->lli, hw->count, hw->saddr, hw->daddr, hw->config);
+}
+
+static u32 hisi_asp_dma_get_curr_cnt(struct hisi_asp_dma_dev *d, struct hisi_asp_dma_phy *phy)
+{
+ u32 cnt = 0;
+
+ cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
+ cnt &= 0xffff;
+ return cnt;
+}
+
+static u32 hisi_asp_dma_get_curr_lli(struct hisi_asp_dma_phy *phy)
+{
+ return readl_relaxed(phy->base + CX_LLI);
+}
+
+static u32 hisi_asp_dma_get_chan_stat(struct hisi_asp_dma_dev *d)
+{
+ return readl_relaxed(d->base + CH_STAT);
+}
+
+static void hisi_asp_dma_enable_dma(struct hisi_asp_dma_dev *d, bool on)
+{
+ if (on) {
+ /* set same priority */
+ writel_relaxed(0x0, d->base + CH_PRI);
+
+ /* unmask irq */
+ writel_relaxed(0xffff, d->base + INT_TC1_MASK);
+ writel_relaxed(0xffff, d->base + INT_TC2_MASK);
+ writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
+ writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
+ } else {
+ /* mask irq */
+ writel_relaxed(0x0, d->base + INT_TC1_MASK);
+ writel_relaxed(0x0, d->base + INT_TC2_MASK);
+ writel_relaxed(0x0, d->base + INT_ERR1_MASK);
+ writel_relaxed(0x0, d->base + INT_ERR2_MASK);
+ }
+}
+
+static irqreturn_t hisi_asp_dma_int_handler(int irq, void *dev_id)
+{
+ struct hisi_asp_dma_dev *d = (struct hisi_asp_dma_dev *)dev_id;
+ struct hisi_asp_dma_phy *p;
+ struct hisi_asp_dma_chan *c;
+ u32 stat = readl_relaxed(d->base + INT_STAT);
+ u32 tc1 = readl_relaxed(d->base + INT_TC1);
+ u32 tc2 = readl_relaxed(d->base + INT_TC2);
+ u32 err1 = readl_relaxed(d->base + INT_ERR1);
+ u32 err2 = readl_relaxed(d->base + INT_ERR2);
+ u32 i, irq_chan = 0;
+
+ while (stat) {
+ i = __ffs(stat);
+ stat &= ~BIT(i);
+ if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
+ unsigned long flags;
+
+ p = &d->phy[i];
+ c = p->vchan;
+ if (c && (tc1 & BIT(i))) {
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vchan_cookie_complete(&p->ds_run->vd);
+ p->ds_done = p->ds_run;
+ p->ds_run = NULL;
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ }
+ if (c && (tc2 & BIT(i))) {
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (p->ds_run != NULL)
+ vchan_cyclic_callback(&p->ds_run->vd);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ }
+ irq_chan |= BIT(i);
+ }
+ if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
+ dev_warn(d->slave.dev, "DMA ERR\n");
+ }
+
+ writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
+ writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
+ writel_relaxed(err1, d->base + INT_ERR1_RAW);
+ writel_relaxed(err2, d->base + INT_ERR2_RAW);
+
+ if (irq_chan)
+ tasklet_schedule(&d->task);
+
+ if (irq_chan || err1 || err2)
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+static int hisi_asp_dma_start_txd(struct hisi_asp_dma_chan *c)
+{
+ struct hisi_asp_dma_dev *d = to_hisi_asp_dma(c->vc.chan.device);
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+ if (!c->phy)
+ return -EAGAIN;
+
+ if (BIT(c->phy->idx) & hisi_asp_dma_get_chan_stat(d))
+ return -EAGAIN;
+
+ if (vd) {
+ struct hisi_asp_dma_desc_sw *ds =
+ container_of(vd, struct hisi_asp_dma_desc_sw, vd);
+ /*
+ * fetch and remove request from vc->desc_issued
+ * so vc->desc_issued only contains desc pending
+ */
+ list_del(&ds->vd.node);
+
+ WARN_ON_ONCE(c->phy->ds_run);
+ WARN_ON_ONCE(c->phy->ds_done);
+ c->phy->ds_run = ds;
+ /* start dma */
+ hisi_asp_dma_set_desc(c->phy, &ds->desc_hw[0]);
+ return 0;
+ }
+ return -EAGAIN;
+}
+
+/*
+ * XXX This function doesn't seem to actually do much, as the behavior
+ * is the same with or without it. The 1 >> c->phy->idx bit doesn't make
+ * total sense, but for now I'm leaving it as-is until I can better
+ * understand the intent. -jstultz
+ */
+static void
+hisi_asp_dma_set_cyclic(struct hisi_asp_dma_chan *c, struct hisi_asp_dma_dev *d, int cyclic)
+{
+ int mask = 1 << c->phy->idx;
+
+ writel_relaxed(1 >> c->phy->idx, d->base + INT_TC2_RAW);
+ if (cyclic)
+ writel_relaxed(readl(d->base + INT_TC2_MASK) |mask,
+ d->base + INT_TC2_MASK);
+ else
+ writel_relaxed(readl(d->base + INT_TC2_MASK) & ~mask,
+ d->base + INT_TC2_MASK);
+}
+
+static void hisi_asp_dma_tasklet(unsigned long arg)
+{
+ struct hisi_asp_dma_dev *d = (struct hisi_asp_dma_dev *)arg;
+ struct hisi_asp_dma_phy *p;
+ struct hisi_asp_dma_chan *c, *cn;
+ unsigned pch, pch_alloc = 0;
+
+ /* check new dma request of running channel in vc->desc_issued */
+ list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
+ spin_lock_irq(&c->vc.lock);
+ p = c->phy;
+ if (p && p->ds_done) {
+ if (hisi_asp_dma_start_txd(c)) {
+ /* No current txd associated with this channel */
+ dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
+ hisi_asp_dma_set_cyclic(c, d, 0);
+ /* Mark this channel free */
+ c->phy = NULL;
+ p->vchan = NULL;
+ }
+ }
+ spin_unlock_irq(&c->vc.lock);
+ }
+
+ /* check new channel request in d->chan_pending */
+ spin_lock_irq(&d->lock);
+ for (pch = 0; pch < d->dma_channels; pch++) {
+ p = &d->phy[pch];
+
+ if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
+ c = list_first_entry(&d->chan_pending,
+ struct hisi_asp_dma_chan, node);
+ /* remove from d->chan_pending */
+ list_del_init(&c->node);
+ pch_alloc |= 1 << pch;
+ /* Mark this channel allocated */
+ p->vchan = c;
+ c->phy = p;
+ hisi_asp_dma_set_cyclic(c, d, c->cyclic);
+ dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
+ }
+ }
+ spin_unlock_irq(&d->lock);
+
+ for (pch = 0; pch < d->dma_channels; pch++) {
+ if (pch_alloc & (1 << pch)) {
+ p = &d->phy[pch];
+ c = p->vchan;
+ if (c) {
+ spin_lock_irq(&c->vc.lock);
+ hisi_asp_dma_start_txd(c);
+ spin_unlock_irq(&c->vc.lock);
+ }
+ }
+ }
+}
+
+static void hisi_asp_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct hisi_asp_dma_chan *c = to_hisi_asp_chan(chan);
+ struct hisi_asp_dma_dev *d = to_hisi_asp_dma(chan->device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->lock, flags);
+ list_del_init(&c->node);
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ vchan_free_chan_resources(&c->vc);
+ c->ccfg = 0;
+}
+
+static enum dma_status hisi_asp_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ struct hisi_asp_dma_chan *c = to_hisi_asp_chan(chan);
+ struct hisi_asp_dma_dev *d = to_hisi_asp_dma(chan->device);
+ struct hisi_asp_dma_phy *p;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ enum dma_status ret;
+ size_t bytes = 0;
+
+ ret = dma_cookie_status(&c->vc.chan, cookie, state);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ p = c->phy;
+ ret = c->status;
+
+ /*
+ * If the cookie is on our issue queue, then the residue is
+ * its total size.
+ */
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd && !c->cyclic) {
+ bytes = container_of(vd, struct hisi_asp_dma_desc_sw, vd)->size;
+ } else if ((!p) || (!p->ds_run)) {
+ bytes = 0;
+ } else {
+ struct hisi_asp_dma_desc_sw *ds = p->ds_run;
+ u32 clli = 0, index = 0;
+
+ bytes = hisi_asp_dma_get_curr_cnt(d, p);
+ clli = hisi_asp_dma_get_curr_lli(p);
+ index = ((clli - ds->desc_hw_lli) / sizeof(struct hisi_asp_desc_hw)) + 1;
+ for (; index < ds->desc_num; index++) {
+ bytes += ds->desc_hw[index].count;
+ /* end of lli */
+ if (!ds->desc_hw[index].lli)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ dma_set_residue(state, bytes);
+ return ret;
+}
+
+static void hisi_asp_dma_issue_pending(struct dma_chan *chan)
+{
+ struct hisi_asp_dma_chan *c = to_hisi_asp_chan(chan);
+ struct hisi_asp_dma_dev *d = to_hisi_asp_dma(chan->device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ /* add request to vc->desc_issued */
+ if (vchan_issue_pending(&c->vc)) {
+ spin_lock(&d->lock);
+ if (!c->phy) {
+ if (list_empty(&c->node)) {
+ /* if new channel, add chan_pending */
+ list_add_tail(&c->node, &d->chan_pending);
+ /* check in tasklet */
+ tasklet_schedule(&d->task);
+ dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+ }
+ }
+ spin_unlock(&d->lock);
+ } else
+ dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static void hisi_asp_dma_fill_desc(struct hisi_asp_dma_desc_sw *ds, dma_addr_t dst,
+ dma_addr_t src, size_t len, u32 num, u32 ccfg)
+{
+ if (num != ds->desc_num - 1)
+ ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
+ sizeof(struct hisi_asp_desc_hw);
+
+ ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
+ ds->desc_hw[num].count = len;
+ ds->desc_hw[num].saddr = src;
+ ds->desc_hw[num].daddr = dst;
+ ds->desc_hw[num].config = ccfg;
+
+ pr_debug("%s: hisi_asp_dma_desc_sw = %p, desc_hw = %p (num = %d) lli: 0x%x, count: 0x%x, saddr: 0x%x, daddr 0x%x, cfg: 0x%x\n", __func__,
+ (void *)ds, &ds->desc_hw[num], num,
+ ds->desc_hw[num].lli, ds->desc_hw[num].count, ds->desc_hw[num].saddr, ds->desc_hw[num].daddr, ds->desc_hw[num].config);
+
+}
+
+static struct hisi_asp_dma_desc_sw *hisi_asp_dma_alloc_desc_resource(int num,
+ struct dma_chan *chan)
+{
+ struct hisi_asp_dma_chan *c = to_hisi_asp_chan(chan);
+ struct hisi_asp_dma_desc_sw *ds;
+ struct hisi_asp_dma_dev *d = to_hisi_asp_dma(chan->device);
+ int lli_limit = LLI_BLOCK_SIZE / sizeof(struct hisi_asp_desc_hw);
+
+ if (num > lli_limit) {
+ dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
+ &c->vc, num, lli_limit);
+ return NULL;
+ }
+
+ ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
+ if (!ds)
+ return NULL;
+
+ ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+ if (!ds->desc_hw) {
+ dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
+ kfree(ds);
+ return NULL;
+ }
+ memset(ds->desc_hw, 0, sizeof(struct hisi_asp_desc_hw) * num);
+ ds->desc_num = num;
+ return ds;
+}
+
+static struct dma_async_tx_descriptor *hisi_asp_dma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct hisi_asp_dma_chan *c = to_hisi_asp_chan(chan);
+ struct hisi_asp_dma_desc_sw *ds;
+ size_t copy = 0;
+ int num = 0;
+
+ if (!len)
+ return NULL;
+
+ num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
+
+ ds = hisi_asp_dma_alloc_desc_resource(num, chan);
+ if (!ds) {
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ return NULL;
+ }
+ c->cyclic = 0;
+ ds->size = len;
+ num = 0;
+
+ if (!c->ccfg) {
+ /* default is memtomem, without calling device_config */
+ c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
+ c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
+ c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
+ }
+
+ do {
+ copy = min_t(size_t, len, DMA_MAX_SIZE);
+ hisi_asp_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
+
+ if (c->dir == DMA_MEM_TO_DEV) {
+ src += copy;
+ } else if (c->dir == DMA_DEV_TO_MEM) {
+ dst += copy;
+ } else {
+ src += copy;
+ dst += copy;
+ }
+ len -= copy;
+ } while (len);
+
+ ds->desc_hw[num-1].lli = 0; /* end of link */
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *hisi_asp_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
+ enum dma_transfer_direction dir, unsigned long flags, void *context)
+{
+ struct hisi_asp_dma_chan *c = to_hisi_asp_chan(chan);
+ struct hisi_asp_dma_desc_sw *ds;
+ size_t len, avail, total = 0;
+ struct scatterlist *sg;
+ dma_addr_t addr, src = 0, dst = 0;
+ int num = sglen, i;
+
+ if (sgl == NULL)
+ return NULL;
+
+ c->cyclic = 0;
+
+ for_each_sg(sgl, sg, sglen, i) {
+ avail = sg_dma_len(sg);
+ pr_err(" avail=0x%x\n", (int)avail);
+ if (avail > DMA_MAX_SIZE)
+ num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+ }
+
+ ds = hisi_asp_dma_alloc_desc_resource(num, chan);
+ if (!ds) {
+ dev_err(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ return NULL;
+ }
+ num = 0;
+
+ for_each_sg(sgl, sg, sglen, i) {
+ addr = sg_dma_address(sg);
+ avail = sg_dma_len(sg);
+ total += avail;
+
+ do {
+ len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = addr;
+ dst = c->dev_addr;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = c->dev_addr;
+ dst = addr;
+ }
+
+ hisi_asp_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
+
+ addr += len;
+ avail -= len;
+ } while (avail);
+ }
+
+ ds->desc_hw[num-1].lli = 0; /* end of link */
+ ds->size = total;
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+hisi_asp_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct hisi_asp_dma_chan *c = to_hisi_asp_chan(chan);
+ struct hisi_asp_dma_desc_sw *ds;
+ size_t len, avail, total = 0;
+ dma_addr_t addr, src = 0, dst = 0;
+ int num = 1, since = 0;
+ size_t modulo = DMA_CYCLIC_MAX_PERIOD;
+ u32 en_tc2 = 0;
+
+ pr_debug("%s: buf %p, dst %p, buf len %d, period_len = %d, dir %d\n",
+ __func__, (void *)buf_addr, (void *)to_hisi_asp_chan(chan)->dev_addr,
+ (int)buf_len, (int)period_len, (int)dir);
+
+ avail = buf_len;
+ if (avail > modulo)
+ num += DIV_ROUND_UP(avail, modulo) - 1;
+
+ ds = hisi_asp_dma_alloc_desc_resource(num, chan);
+ if (!ds) {
+ dev_err(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ return NULL;
+ }
+
+ c->cyclic = 1;
+ addr = buf_addr;
+ avail = buf_len;
+ total = avail;
+ num = 0;
+
+ if (period_len < modulo)
+ modulo = period_len;
+
+ do {
+ len = min_t(size_t, avail, modulo);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = addr;
+ dst = c->dev_addr;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = c->dev_addr;
+ dst = addr;
+ }
+ since += len;
+ if (since >= period_len) {
+ /* descriptor asks for TC2 interrupt on completion */
+ en_tc2 = CX_CFG_NODEIRQ;
+ since -= period_len;
+ } else
+ en_tc2 = 0;
+
+ hisi_asp_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
+
+ addr += len;
+ avail -= len;
+ } while (avail);
+
+ /* "Cyclic" == end of link points back to start of link */
+ ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
+
+ ds->size = total;
+
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+
+static int hisi_asp_dma_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct hisi_asp_dma_chan *c = to_hisi_asp_chan(chan);
+ u32 maxburst = 0, val = 0;
+ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+
+ if (cfg == NULL)
+ return -EINVAL;
+ c->dir = cfg->direction;
+ if (c->dir == DMA_DEV_TO_MEM) {
+ c->ccfg = CX_CFG_DSTINCR;
+ c->dev_addr = cfg->src_addr;
+ maxburst = cfg->src_maxburst;
+ width = cfg->src_addr_width;
+ } else if (c->dir == DMA_MEM_TO_DEV) {
+ c->ccfg = CX_CFG_SRCINCR;
+ c->dev_addr = cfg->dst_addr;
+ maxburst = cfg->dst_maxburst;
+ width = cfg->dst_addr_width;
+ }
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ val = __ffs(width);
+ break;
+ default:
+ val = 3;
+ break;
+ }
+ c->ccfg |= (val << 12) | (val << 16);
+
+ if ((maxburst == 0) || (maxburst > 16))
+ val = 15;
+ else
+ val = maxburst - 1;
+ c->ccfg |= (val << 20) | (val << 24);
+ c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
+
+ /* specific request line */
+ c->ccfg |= c->vc.chan.chan_id << 4;
+
+ return 0;
+}
+
+static void hisi_asp_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct hisi_asp_dma_desc_sw *ds =
+ container_of(vd, struct hisi_asp_dma_desc_sw, vd);
+ struct hisi_asp_dma_dev *d = to_hisi_asp_dma(vd->tx.chan->device);
+
+ dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
+ kfree(ds);
+}
+
+
+static int hisi_asp_dma_terminate_all(struct dma_chan *chan)
+{
+ struct hisi_asp_dma_chan *c = to_hisi_asp_chan(chan);
+ struct hisi_asp_dma_dev *d = to_hisi_asp_dma(chan->device);
+ struct hisi_asp_dma_phy *p = c->phy;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+
+ /* Prevent this channel being scheduled */
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+
+ /* Clear the tx descriptor lists */
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vchan_get_all_descriptors(&c->vc, &head);
+ if (p) {
+ /* vchan is assigned to a pchan - stop the channel */
+ hisi_asp_dma_terminate_chan(p, d);
+ c->phy = NULL;
+ p->vchan = NULL;
+ if (p->ds_run) {
+ hisi_asp_dma_free_desc(&p->ds_run->vd);
+ p->ds_run = NULL;
+ }
+ if (p->ds_done) {
+ hisi_asp_dma_free_desc(&p->ds_done->vd);
+ p->ds_done = NULL;
+ }
+
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+
+ return 0;
+}
+
+static int hisi_asp_dma_transfer_pause(struct dma_chan *chan)
+{
+ struct hisi_asp_dma_chan *c = to_hisi_asp_chan(chan);
+ struct hisi_asp_dma_dev *d = to_hisi_asp_dma(chan->device);
+ struct hisi_asp_dma_phy *p = c->phy;
+
+ dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+ if (c->status == DMA_IN_PROGRESS) {
+ c->status = DMA_PAUSED;
+ if (p) {
+ hisi_asp_dma_pause_dma(p, false);
+ } else {
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+ }
+ }
+
+ return 0;
+}
+
+static int hisi_asp_dma_transfer_resume(struct dma_chan *chan)
+{
+ struct hisi_asp_dma_chan *c = to_hisi_asp_chan(chan);
+ struct hisi_asp_dma_dev *d = to_hisi_asp_dma(chan->device);
+ struct hisi_asp_dma_phy *p = c->phy;
+ unsigned long flags;
+
+ dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (c->status == DMA_PAUSED) {
+ c->status = DMA_IN_PROGRESS;
+ if (p) {
+ hisi_asp_dma_pause_dma(p, true);
+ } else if (!list_empty(&c->vc.desc_issued)) {
+ spin_lock(&d->lock);
+ list_add_tail(&c->node, &d->chan_pending);
+ spin_unlock(&d->lock);
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+
+ return 0;
+}
+
+static const struct of_device_id hisi_asp_pdma_dt_ids[] = {
+ { .compatible = "hisilicon,hisi-pcm-asp-dma", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, hisi_asp_pdma_dt_ids);
+
+static struct dma_chan *hisi_asp_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct hisi_asp_dma_dev *d = ofdma->of_dma_data;
+ unsigned int request = dma_spec->args[0];
+
+ if (request > d->dma_requests)
+ return NULL;
+
+ return dma_get_slave_channel(&(d->chans[request].vc.chan));
+}
+
+static int hisi_asp_dma_probe(struct platform_device *op)
+{
+ struct hisi_asp_dma_dev *d;
+ const struct of_device_id *of_id;
+ struct resource *iores;
+ int i, ret, irq = 0;
+
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -EINVAL;
+
+ d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->base = devm_ioremap_resource(&op->dev, iores);
+ if (IS_ERR(d->base))
+ return PTR_ERR(d->base);
+
+ of_id = of_match_device(hisi_asp_pdma_dt_ids, &op->dev);
+ if (of_id) {
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-channels", &d->dma_channels);
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-requests", &d->dma_requests);
+ }
+
+ irq = platform_get_irq(op, 0);
+ ret = devm_request_irq(&op->dev, irq,
+ hisi_asp_dma_int_handler, 0, DRIVER_NAME, d);
+ if (ret)
+ return ret;
+
+ /* A DMA memory pool for LLIs, align on 32-byte boundary */
+ d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
+ LLI_BLOCK_SIZE, 32, 0);
+ if (!d->pool)
+ return -ENOMEM;
+
+ /* init phy channel */
+ d->phy = devm_kzalloc(&op->dev,
+ d->dma_channels * sizeof(struct hisi_asp_dma_phy), GFP_KERNEL);
+ if (d->phy == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < d->dma_channels; i++) {
+ struct hisi_asp_dma_phy *p = &d->phy[i];
+
+ p->idx = i;
+ p->base = d->base + i * 0x40;
+ }
+
+ INIT_LIST_HEAD(&d->slave.channels);
+ dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
+ d->slave.dev = &op->dev;
+ d->slave.device_free_chan_resources = hisi_asp_dma_free_chan_resources;
+ d->slave.device_tx_status = hisi_asp_dma_tx_status;
+ d->slave.device_prep_dma_memcpy = hisi_asp_dma_prep_memcpy;
+ d->slave.device_prep_slave_sg = hisi_asp_dma_prep_slave_sg;
+ d->slave.device_prep_dma_cyclic = hisi_asp_dma_prep_dma_cyclic;
+ d->slave.device_issue_pending = hisi_asp_dma_issue_pending;
+ d->slave.device_config = hisi_asp_dma_config;
+ d->slave.device_pause = hisi_asp_dma_transfer_pause;
+ d->slave.device_resume = hisi_asp_dma_transfer_resume;
+ d->slave.device_terminate_all = hisi_asp_dma_terminate_all;
+ d->slave.copy_align = DMA_ALIGN;
+
+ /* init virtual channel */
+ d->chans = devm_kzalloc(&op->dev,
+ d->dma_requests * sizeof(struct hisi_asp_dma_chan), GFP_KERNEL);
+ if (d->chans == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < d->dma_requests; i++) {
+ struct hisi_asp_dma_chan *c = &d->chans[i];
+
+ c->status = DMA_IN_PROGRESS;
+ INIT_LIST_HEAD(&c->node);
+ c->vc.desc_free = hisi_asp_dma_free_desc;
+ vchan_init(&c->vc, &d->slave);
+ }
+
+ /* Enable clock before accessing registers */
+ ret = clk_prepare_enable(d->clk);
+ if (ret < 0) {
+ dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ hisi_asp_dma_enable_dma(d, true);
+
+ ret = dma_async_device_register(&d->slave);
+ if (ret){
+ dev_err(&op->dev, "failed to register dma device\n");
+ goto err_disable_clk;
+ }
+
+ ret = of_dma_controller_register((&op->dev)->of_node,
+ hisi_asp_of_dma_simple_xlate, d);
+ if (ret){
+ dev_err(&op->dev, "failed to register dma controller\n");
+ goto of_dma_register_fail;
+ }
+
+ spin_lock_init(&d->lock);
+ INIT_LIST_HEAD(&d->chan_pending);
+ tasklet_init(&d->task, hisi_asp_dma_tasklet, (unsigned long)d);
+ platform_set_drvdata(op, d);
+ dev_info(&op->dev, "initialized\n");
+
+ return 0;
+
+of_dma_register_fail:
+ dma_async_device_unregister(&d->slave);
+err_disable_clk:
+ clk_disable_unprepare(d->clk);
+ return ret;
+}
+
+static int hisi_asp_dma_remove(struct platform_device *op)
+{
+ struct hisi_asp_dma_chan *c, *cn;
+ struct hisi_asp_dma_dev *d = platform_get_drvdata(op);
+
+ dma_async_device_unregister(&d->slave);
+ of_dma_controller_free((&op->dev)->of_node);
+
+ list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
+ }
+ tasklet_kill(&d->task);
+ clk_disable_unprepare(d->clk);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hisi_asp_dma_suspend_dev(struct device *dev)
+{
+ struct hisi_asp_dma_dev *d = dev_get_drvdata(dev);
+ u32 stat = 0;
+
+ stat = hisi_asp_dma_get_chan_stat(d);
+ if (stat) {
+ dev_warn(d->slave.dev,
+ "chan %d is running fail to suspend\n", stat);
+ return -1;
+ }
+ hisi_asp_dma_enable_dma(d, false);
+ clk_disable_unprepare(d->clk);
+ return 0;
+}
+
+static int hisi_asp_dma_resume_dev(struct device *dev)
+{
+ struct hisi_asp_dma_dev *d = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = clk_prepare_enable(d->clk);
+ if (ret < 0) {
+ dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+ hisi_asp_dma_enable_dma(d, true);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(hisi_asp_dma_pmops, hisi_asp_dma_suspend_dev, hisi_asp_dma_resume_dev);
+
+static struct platform_driver hisi_asp_pdma_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &hisi_asp_dma_pmops,
+ .of_match_table = hisi_asp_pdma_dt_ids,
+ },
+ .probe = hisi_asp_dma_probe,
+ .remove = hisi_asp_dma_remove,
+};
+
+module_platform_driver(hisi_asp_pdma_driver);
+
+MODULE_DESCRIPTION("Hisilicon hisi asp DMA Driver");
+MODULE_ALIAS("platform:hisiaspdma");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index aabcb7934b05..90ef37d1f486 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -223,7 +223,6 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
if (c && (tc1 & BIT(i))) {
spin_lock_irqsave(&c->vc.lock, flags);
vchan_cookie_complete(&p->ds_run->vd);
- WARN_ON_ONCE(p->ds_done);
p->ds_done = p->ds_run;
p->ds_run = NULL;
spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -274,13 +273,14 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
*/
list_del(&ds->vd.node);
- WARN_ON_ONCE(c->phy->ds_run);
- WARN_ON_ONCE(c->phy->ds_done);
c->phy->ds_run = ds;
+ c->phy->ds_done = NULL;
/* start dma */
k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
return 0;
}
+ c->phy->ds_run = NULL;
+ c->phy->ds_done = NULL;
return -EAGAIN;
}
@@ -723,11 +723,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
k3_dma_free_desc(&p->ds_run->vd);
p->ds_run = NULL;
}
- if (p->ds_done) {
- k3_dma_free_desc(&p->ds_done->vd);
- p->ds_done = NULL;
- }
-
+ p->ds_done = NULL;
}
spin_unlock_irqrestore(&c->vc.lock, flags);
vchan_dma_desc_free_list(&c->vc, &head);
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index e9ed439a5b65..475bfb5ee166 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -2,5 +2,6 @@
# taken to initialize them in the correct order. Link order is the only way
# to ensure this currently.
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
-obj-y += drm/ vga/
+obj-y += drm/ vga/ arm/
+obj-y += arm_gpu/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
diff --git a/drivers/gpu/arm/Kconfig b/drivers/gpu/arm/Kconfig
new file mode 100644
index 000000000000..255cc81c7d23
--- /dev/null
+++ b/drivers/gpu/arm/Kconfig
@@ -0,0 +1 @@
+source "drivers/gpu/arm/utgard/Kconfig"
diff --git a/drivers/gpu/arm/Makefile b/drivers/gpu/arm/Makefile
new file mode 100644
index 000000000000..e4dcf28c56f4
--- /dev/null
+++ b/drivers/gpu/arm/Makefile
@@ -0,0 +1 @@
+obj-y += utgard/
diff --git a/drivers/gpu/arm/utgard/.gitignore b/drivers/gpu/arm/utgard/.gitignore
new file mode 100644
index 000000000000..60f31adb1d99
--- /dev/null
+++ b/drivers/gpu/arm/utgard/.gitignore
@@ -0,0 +1,2 @@
+#Ignore generated files
+__malidrv_build_info.c
diff --git a/drivers/gpu/arm/utgard/Kbuild b/drivers/gpu/arm/utgard/Kbuild
new file mode 100644
index 000000000000..f4e4d44df1e4
--- /dev/null
+++ b/drivers/gpu/arm/utgard/Kbuild
@@ -0,0 +1,243 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+# This file is called by the Linux build system.
+
+# set up defaults if not defined by the user
+TIMESTAMP ?= default
+OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 16
+USING_GPU_UTILIZATION ?= 0
+PROFILING_SKIP_PP_JOBS ?= 0
+PROFILING_SKIP_PP_AND_GP_JOBS ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP ?= 0
+MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0
+MALI_UPPER_HALF_SCHEDULING ?= 1
+MALI_ENABLE_CPU_CYCLES ?= 0
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The in-tree driver will only use the GPL releases.
+ccflags-y += -I$(src)/linux/license/gpl
+
+ifeq ($(USING_GPU_UTILIZATION), 1)
+ ifeq ($(USING_DVFS), 1)
+ $(error USING_GPU_UTILIZATION conflict with USING_DVFS you can read the Integration Guide to choose which one do you need)
+ endif
+endif
+
+ifeq ($(MALI_PLATFORM_FILES),)
+ifeq ($(CONFIG_ARCH_EXYNOS4),y)
+EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
+export MALI_PLATFORM=exynos4
+export MALI_PLATFORM_FILES_BUILDIN = $(notdir $(wildcard $(src)/platform/$(MALI_PLATFORM)/*.c))
+export MALI_PLATFORM_FILES_ADD_PREFIX = $(addprefix platform/$(MALI_PLATFORM)/,$(MALI_PLATFORM_FILES_BUILDIN))
+endif
+endif
+
+mali-y += \
+ linux/mali_osk_atomics.o \
+ linux/mali_osk_irq.o \
+ linux/mali_osk_wq.o \
+ linux/mali_osk_locks.o \
+ linux/mali_osk_wait_queue.o \
+ linux/mali_osk_low_level_mem.o \
+ linux/mali_osk_math.o \
+ linux/mali_osk_memory.o \
+ linux/mali_osk_misc.o \
+ linux/mali_osk_mali.o \
+ linux/mali_osk_notification.o \
+ linux/mali_osk_time.o \
+ linux/mali_osk_timers.o \
+ linux/mali_osk_bitmap.o
+
+mali-y += linux/mali_memory.o linux/mali_memory_os_alloc.o
+mali-y += linux/mali_memory_external.o
+mali-y += linux/mali_memory_block_alloc.o
+mali-y += linux/mali_memory_swap_alloc.o
+
+mali-y += \
+ linux/mali_memory_manager.o \
+ linux/mali_memory_virtual.o \
+ linux/mali_memory_util.o \
+ linux/mali_memory_cow.o \
+ linux/mali_memory_defer_bind.o
+
+mali-y += \
+ linux/mali_ukk_mem.o \
+ linux/mali_ukk_gp.o \
+ linux/mali_ukk_pp.o \
+ linux/mali_ukk_core.o \
+ linux/mali_ukk_soft_job.o \
+ linux/mali_ukk_timeline.o
+
+mali-$(CONFIG_MALI_DEVFREQ) += \
+ linux/mali_devfreq.o \
+ common/mali_pm_metrics.o
+
+# Source files which always are included in a build
+mali-y += \
+ common/mali_kernel_core.o \
+ linux/mali_kernel_linux.o \
+ common/mali_session.o \
+ linux/mali_device_pause_resume.o \
+ common/mali_kernel_vsync.o \
+ linux/mali_ukk_vsync.o \
+ linux/mali_kernel_sysfs.o \
+ common/mali_mmu.o \
+ common/mali_mmu_page_directory.o \
+ common/mali_mem_validation.o \
+ common/mali_hw_core.o \
+ common/mali_gp.o \
+ common/mali_pp.o \
+ common/mali_pp_job.o \
+ common/mali_gp_job.o \
+ common/mali_soft_job.o \
+ common/mali_scheduler.o \
+ common/mali_executor.o \
+ common/mali_group.o \
+ common/mali_dlbu.o \
+ common/mali_broadcast.o \
+ common/mali_pm.o \
+ common/mali_pmu.o \
+ common/mali_user_settings_db.o \
+ common/mali_kernel_utilization.o \
+ common/mali_control_timer.o \
+ common/mali_l2_cache.o \
+ common/mali_timeline.o \
+ common/mali_timeline_fence_wait.o \
+ common/mali_timeline_sync_fence.o \
+ common/mali_spinlock_reentrant.o \
+ common/mali_pm_domain.o \
+ linux/mali_osk_pm.o \
+ linux/mali_pmu_power_up_down.o \
+ __malidrv_build_info.o
+
+# Files generated that shall be removed upon make clean
+clean-files := __malidrv_build_info.c
+
+ifneq ($(wildcard $(src)/linux/mali_slp_global_lock.c),)
+ mali-y += linux/mali_slp_global_lock.o
+endif
+
+EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
+mali-y += platform/hikey/mali_hikey.o
+
+ifneq ($(MALI_PLATFORM_FILES_ADD_PREFIX),)
+ mali-y += $(MALI_PLATFORM_FILES_ADD_PREFIX:.c=.o)
+endif
+
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_ukk_profiling.o
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_osk_profiling.o
+
+mali-$(CONFIG_MALI400_INTERNAL_PROFILING) += linux/mali_profiling_internal.o timestamp-$(TIMESTAMP)/mali_timestamp.o
+ccflags-$(CONFIG_MALI400_INTERNAL_PROFILING) += -I$(src)/timestamp-$(TIMESTAMP)
+
+mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_dma_buf.o
+mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_secure.o
+mali-$(CONFIG_SYNC) += linux/mali_sync.o
+mali-$(CONFIG_SYNC) += linux/mali_internal_sync.o
+mali-$(CONFIG_SYNC_FILE) += linux/mali_sync.o
+mali-$(CONFIG_SYNC_FILE) += linux/mali_internal_sync.o
+mali-$(CONFIG_MALI_DMA_BUF_FENCE) += linux/mali_dma_fence.o
+ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
+ccflags-$(CONFIG_SYNC_FILE) += -Idrivers/staging/android
+
+mali-$(CONFIG_MALI400_UMP) += linux/mali_memory_ump.o
+
+mali-$(CONFIG_MALI_DVFS) += common/mali_dvfs_policy.o
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI400) := mali.o
+
+ccflags-y += $(EXTRA_DEFINES)
+
+# Set up our defines, which will be passed to gcc
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP)
+ccflags-y += -DMALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED=$(MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED)
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS)
+ccflags-y += -DMALI_STATE_TRACKING=1
+ccflags-y += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+ccflags-y += -DUSING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+ccflags-y += -DMALI_ENABLE_CPU_CYCLES=$(MALI_ENABLE_CPU_CYCLES)
+
+ifeq ($(MALI_UPPER_HALF_SCHEDULING),1)
+ ccflags-y += -DMALI_UPPER_HALF_SCHEDULING
+endif
+
+#build-in include path is different
+ifeq ($(MALI_PLATFORM_FILES),)
+ccflags-$(CONFIG_MALI400_UMP) += -I$(src)/../ump/include/
+else
+ccflags-$(CONFIG_MALI400_UMP) += -I$(src)/../../ump/include/ump
+endif
+ccflags-$(CONFIG_MALI400_DEBUG) += -DDEBUG
+
+# Use our defines when compiling
+ccflags-y += -I$(src) -I$(src)/include -I$(src)/common -I$(src)/linux -I$(src)/platform -Wno-date-time
+
+# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
+MALI_RELEASE_NAME=$(shell cat $(src)/.version 2> /dev/null)
+
+SVN_INFO = (cd $(src); svn info 2>/dev/null)
+
+ifneq ($(shell $(SVN_INFO) 2>/dev/null),)
+# SVN detected
+SVN_REV := $(shell $(SVN_INFO) | grep '^Revision: '| sed -e 's/^Revision: //' 2>/dev/null)
+DRIVER_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+CHANGE_DATE := $(shell $(SVN_INFO) | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+CHANGED_REVISION := $(shell $(SVN_INFO) | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+REPO_URL := $(shell $(SVN_INFO) | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+
+else # SVN
+GIT_REV := $(shell cd $(src); git describe --always 2>/dev/null)
+ifneq ($(GIT_REV),)
+# Git detected
+DRIVER_REV := $(MALI_RELEASE_NAME)-$(GIT_REV)
+CHANGE_DATE := $(shell cd $(src); git log -1 --format="%ci")
+CHANGED_REVISION := $(GIT_REV)
+REPO_URL := $(shell cd $(src); git describe --all --always 2>/dev/null)
+
+else # Git
+# No Git or SVN detected
+DRIVER_REV := $(MALI_RELEASE_NAME)
+CHANGE_DATE := $(MALI_RELEASE_NAME)
+CHANGED_REVISION := $(MALI_RELEASE_NAME)
+endif
+endif
+
+ccflags-y += -DSVN_REV_STRING=\"$(DRIVER_REV)\"
+
+VERSION_STRINGS :=
+VERSION_STRINGS += API_VERSION=$(shell cd $(src); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += REPO_URL=$(REPO_URL)
+VERSION_STRINGS += REVISION=$(DRIVER_REV)
+VERSION_STRINGS += CHANGED_REVISION=$(CHANGED_REVISION)
+VERSION_STRINGS += CHANGE_DATE=$(CHANGE_DATE)
+VERSION_STRINGS += BUILD_DATE=$(shell date)
+ifdef CONFIG_MALI400_DEBUG
+VERSION_STRINGS += BUILD=debug
+else
+VERSION_STRINGS += BUILD=release
+endif
+VERSION_STRINGS += TARGET_PLATFORM=$(TARGET_PLATFORM)
+VERSION_STRINGS += MALI_PLATFORM=$(MALI_PLATFORM)
+VERSION_STRINGS += KDIR=$(KDIR)
+VERSION_STRINGS += OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+VERSION_STRINGS += USING_UMP=$(CONFIG_MALI400_UMP)
+VERSION_STRINGS += USING_PROFILING=$(CONFIG_MALI400_PROFILING)
+VERSION_STRINGS += USING_INTERNAL_PROFILING=$(CONFIG_MALI400_INTERNAL_PROFILING)
+VERSION_STRINGS += USING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+VERSION_STRINGS += USING_DVFS=$(CONFIG_MALI_DVFS)
+VERSION_STRINGS += USING_DMA_BUF_FENCE = $(CONFIG_MALI_DMA_BUF_FENCE)
+VERSION_STRINGS += MALI_UPPER_HALF_SCHEDULING=$(MALI_UPPER_HALF_SCHEDULING)
+
+# Create file with Mali driver configuration
+$(src)/__malidrv_build_info.c:
+ @echo 'const char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(src)/__malidrv_build_info.c
diff --git a/drivers/gpu/arm/utgard/Kconfig b/drivers/gpu/arm/utgard/Kconfig
new file mode 100644
index 000000000000..0a31ff436456
--- /dev/null
+++ b/drivers/gpu/arm/utgard/Kconfig
@@ -0,0 +1,129 @@
+config MALI400
+ tristate "Mali-300/400/450 support"
+ depends on ARM || ARM64
+ select DMA_SHARED_BUFFER
+ ---help---
+ This enables support for the ARM Mali-300, Mali-400, and Mali-450
+ GPUs.
+
+ To compile this driver as a module, choose M here: the module will be
+ called mali.
+
+config MALI450
+ bool "Enable Mali-450 support"
+ depends on MALI400
+ ---help---
+ This enables support for Mali-450 specific features.
+
+config MALI470
+ bool "Enable Mali-470 support"
+ depends on MALI400
+ ---help---
+ This enables support for Mali-470 specific features.
+
+config MALI400_DEBUG
+ bool "Enable debug in Mali driver"
+ depends on MALI400
+ ---help---
+ This enabled extra debug checks and messages in the Mali driver.
+
+config MALI400_PROFILING
+ bool "Enable Mali profiling"
+ depends on MALI400
+ select TRACEPOINTS
+ default y
+ ---help---
+ This enables gator profiling of Mali GPU events.
+
+config MALI400_INTERNAL_PROFILING
+ bool "Enable internal Mali profiling API"
+ depends on MALI400_PROFILING
+ default n
+ ---help---
+ This enables the internal legacy Mali profiling API.
+
+config MALI400_UMP
+ bool "Enable UMP support"
+ depends on MALI400
+ ---help---
+ This enables support for the UMP memory sharing API in the Mali driver.
+
+config MALI_DVFS
+ bool "Enable Mali dynamically frequency change"
+ depends on MALI400 && !MALI_DEVFREQ
+ default y
+ ---help---
+ This enables support for dynamic change frequency of Mali with the goal of lowering power consumption.
+
+config MALI_DMA_BUF_MAP_ON_ATTACH
+ bool "Map dma-buf attachments on attach"
+ depends on MALI400 && DMA_SHARED_BUFFER
+ default y
+ ---help---
+ This makes the Mali driver map dma-buf attachments after doing
+ attach. If this is not set the dma-buf attachments will be mapped for
+ every time the GPU need to access the buffer.
+
+ Mapping for each access can cause lower performance.
+
+config MALI_SHARED_INTERRUPTS
+ bool "Support for shared interrupts"
+ depends on MALI400
+ default n
+ ---help---
+ Adds functionality required to properly support shared interrupts. Without this support,
+ the device driver will fail during insmod if it detects shared interrupts. This also
+ works when the GPU is not using shared interrupts, but might have a slight performance
+ impact.
+
+config MALI_PMU_PARALLEL_POWER_UP
+ bool "Power up Mali PMU domains in parallel"
+ depends on MALI400
+ default n
+ ---help---
+ This makes the Mali driver power up all PMU power domains in parallel, instead of
+ powering up domains one by one, with a slight delay in between. Powering on all power
+ domains at the same time may cause peak currents higher than what some systems can handle.
+ These systems must not enable this option.
+
+config MALI_DT
+ bool "Using device tree to initialize module"
+ depends on MALI400 && OF
+ default n
+ ---help---
+ This enable the Mali driver to choose the device tree path to get platform resoures
+ and disable the old config method. Mali driver could run on the platform which the
+ device tree is enabled in kernel and corresponding hardware description is implemented
+ properly in device DTS file.
+
+config MALI_PLAT_SPECIFIC_DT
+ bool "Platform specific Device Tree is being used"
+ depends on MALI_DT
+ default n
+ ---help---
+ This is a pragmatic approach for some platforms which make
+ use of a device tree entry that does not strictly comply to
+ what the standard Utgard driver expects to find, but have
+ their platform data implemented the old way. Such platforms
+ should be converted to using the Device Tree so this
+ configuration option can be removed.
+
+config MALI_DEVFREQ
+ bool "Using devfreq to tuning frequency"
+ depends on MALI400 && PM_DEVFREQ
+ default n
+ ---help---
+ Support devfreq for Mali.
+
+ Using the devfreq framework and, by default, the simpleondemand
+ governor, the frequency of Mali will be dynamically selected from the
+ available OPPs.
+
+config MALI_QUIET
+ bool "Make Mali driver very quiet"
+ depends on MALI400 && !MALI400_DEBUG
+ default n
+ ---help---
+ This forces the Mali driver to never print any messages.
+
+ If unsure, say N.
diff --git a/drivers/gpu/arm/utgard/Makefile b/drivers/gpu/arm/utgard/Makefile
new file mode 100644
index 000000000000..0b91321a5af1
--- /dev/null
+++ b/drivers/gpu/arm/utgard/Makefile
@@ -0,0 +1,206 @@
+#
+# Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+USE_UMPV2=0
+USING_PROFILING ?= 1
+USING_INTERNAL_PROFILING ?= 0
+USING_DVFS ?= 1
+USING_DMA_BUF_FENCE ?= 0
+MALI_HEATMAPS_ENABLED ?= 0
+MALI_DMA_BUF_MAP_ON_ATTACH ?= 1
+MALI_PMU_PARALLEL_POWER_UP ?= 0
+USING_DT ?= 0
+MALI_MEM_SWAP_TRACKING ?= 0
+USING_DEVFREQ ?= 0
+
+# The Makefile sets up "arch" based on the CONFIG, creates the version info
+# string and the __malidrv_build_info.c file, and then call the Linux build
+# system to actually build the driver. After that point the Kbuild file takes
+# over.
+
+# set up defaults if not defined by the user
+ARCH ?= arm
+
+OSKOS=linux
+FILES_PREFIX=
+
+check_cc2 = \
+ $(shell if $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; \
+ then \
+ echo "$(2)"; \
+ else \
+ echo "$(3)"; \
+ fi ;)
+
+# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak
+-include ../../../arm_internal.mak
+
+# Give warning of old config parameters are used
+ifneq ($(CONFIG),)
+$(warning "You have specified the CONFIG variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+ifneq ($(CPU),)
+$(warning "You have specified the CPU variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+# Include the mapping between TARGET_PLATFORM and KDIR + MALI_PLATFORM
+-include MALI_CONFIGURATION
+export KDIR ?= $(KDIR-$(TARGET_PLATFORM))
+export MALI_PLATFORM ?= $(MALI_PLATFORM-$(TARGET_PLATFORM))
+
+ifneq ($(TARGET_PLATFORM),)
+ifeq ($(MALI_PLATFORM),)
+$(error "Invalid TARGET_PLATFORM: $(TARGET_PLATFORM)")
+endif
+endif
+
+# validate lookup result
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(TARGET_PLATFORM))
+endif
+
+ifeq ($(USING_GPU_UTILIZATION), 1)
+ ifeq ($(USING_DVFS), 1)
+ $(error USING_GPU_UTILIZATION conflict with USING_DVFS you can read the Integration Guide to choose which one do you need)
+ endif
+endif
+
+ifeq ($(USING_UMP),1)
+export CONFIG_MALI400_UMP=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_UMP=1
+ifeq ($(USE_UMPV2),1)
+UMP_SYMVERS_FILE ?= ../umpv2/Module.symvers
+else
+UMP_SYMVERS_FILE ?= ../ump/Module.symvers
+endif
+KBUILD_EXTRA_SYMBOLS = $(realpath $(UMP_SYMVERS_FILE))
+$(warning $(KBUILD_EXTRA_SYMBOLS))
+endif
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+include $(KDIR)/.config
+
+ifeq ($(ARCH), arm)
+# when compiling for ARM we're cross compiling
+export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-)
+endif
+
+# report detected/selected settings
+ifdef ARM_INTERNAL_BUILD
+$(warning TARGET_PLATFORM $(TARGET_PLATFORM))
+$(warning KDIR $(KDIR))
+$(warning MALI_PLATFORM $(MALI_PLATFORM))
+endif
+
+# Set up build config
+export CONFIG_MALI400=m
+export CONFIG_MALI450=y
+export CONFIG_MALI470=y
+
+export EXTRA_DEFINES += -DCONFIG_MALI400=1
+export EXTRA_DEFINES += -DCONFIG_MALI450=1
+export EXTRA_DEFINES += -DCONFIG_MALI470=1
+
+ifneq ($(MALI_PLATFORM),)
+export EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
+export MALI_PLATFORM_FILES = $(wildcard platform/$(MALI_PLATFORM)/*.c)
+endif
+
+ifeq ($(USING_PROFILING),1)
+ifeq ($(CONFIG_TRACEPOINTS),)
+$(warning CONFIG_TRACEPOINTS required for profiling)
+else
+export CONFIG_MALI400_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_PROFILING=1
+ifeq ($(USING_INTERNAL_PROFILING),1)
+export CONFIG_MALI400_INTERNAL_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_INTERNAL_PROFILING=1
+endif
+ifeq ($(MALI_HEATMAPS_ENABLED),1)
+export MALI_HEATMAPS_ENABLED=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_HEATMAPS_ENABLED
+endif
+endif
+endif
+
+ifeq ($(MALI_DMA_BUF_MAP_ON_ATTACH),1)
+export CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
+endif
+
+ifeq ($(MALI_SHARED_INTERRUPTS),1)
+export CONFIG_MALI_SHARED_INTERRUPTS=y
+export EXTRA_DEFINES += -DCONFIG_MALI_SHARED_INTERRUPTS
+endif
+
+ifeq ($(USING_DVFS),1)
+export CONFIG_MALI_DVFS=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DVFS
+endif
+
+ifeq ($(USING_DMA_BUF_FENCE),1)
+export CONFIG_MALI_DMA_BUF_FENCE=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DMA_BUF_FENCE
+endif
+
+ifeq ($(MALI_PMU_PARALLEL_POWER_UP),1)
+export CONFIG_MALI_PMU_PARALLEL_POWER_UP=y
+export EXTRA_DEFINES += -DCONFIG_MALI_PMU_PARALLEL_POWER_UP
+endif
+
+ifdef CONFIG_OF
+ifeq ($(USING_DT),1)
+export CONFIG_MALI_DT=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DT
+endif
+endif
+
+ifeq ($(USING_DEVFREQ), 1)
+ifdef CONFIG_PM_DEVFREQ
+export CONFIG_MALI_DEVFREQ=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DEVFREQ=1
+else
+$(warning "You want to support DEVFREQ but kernel didn't support DEVFREQ.")
+endif
+endif
+
+ifneq ($(BUILD),release)
+# Debug
+export CONFIG_MALI400_DEBUG=y
+else
+# Release
+ifeq ($(MALI_QUIET),1)
+export CONFIG_MALI_QUIET=y
+export EXTRA_DEFINES += -DCONFIG_MALI_QUIET
+endif
+endif
+
+ifeq ($(MALI_SKIP_JOBS),1)
+EXTRA_DEFINES += -DPROFILING_SKIP_PP_JOBS=1 -DPROFILING_SKIP_GP_JOBS=1
+endif
+
+ifeq ($(MALI_MEM_SWAP_TRACKING),1)
+EXTRA_DEFINES += -DMALI_MEM_SWAP_TRACKING=1
+endif
+
+all: $(UMP_SYMVERS_FILE)
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules
+ @rm $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
+kernelrelease:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) kernelrelease
+
+export CONFIG KBUILD_EXTRA_SYMBOLS
diff --git a/drivers/gpu/arm/utgard/common/mali_broadcast.c b/drivers/gpu/arm/utgard/common/mali_broadcast.c
new file mode 100644
index 000000000000..79a418c36ccb
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_broadcast.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2012-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_broadcast.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+#define MALI_BROADCAST_REGISTER_SIZE 0x1000
+#define MALI_BROADCAST_REG_BROADCAST_MASK 0x0
+#define MALI_BROADCAST_REG_INTERRUPT_MASK 0x4
+
+struct mali_bcast_unit {
+ struct mali_hw_core hw_core;
+ u32 current_mask;
+};
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource)
+{
+ struct mali_bcast_unit *bcast_unit = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(resource);
+ MALI_DEBUG_PRINT(2, ("Broadcast: Creating Mali Broadcast unit: %s\n",
+ resource->description));
+
+ bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit));
+ if (NULL == bcast_unit) {
+ MALI_PRINT_ERROR(("Broadcast: Failed to allocate memory for Broadcast unit\n"));
+ return NULL;
+ }
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core,
+ resource, MALI_BROADCAST_REGISTER_SIZE)) {
+ bcast_unit->current_mask = 0;
+ mali_bcast_reset(bcast_unit);
+
+ return bcast_unit;
+ } else {
+ MALI_PRINT_ERROR(("Broadcast: Failed map broadcast unit\n"));
+ }
+
+ _mali_osk_free(bcast_unit);
+
+ return NULL;
+}
+
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ mali_hw_core_delete(&bcast_unit->hw_core);
+ _mali_osk_free(bcast_unit);
+}
+
+/* Call this function to add the @group's id into bcast mask
+ * Note: redundant calling this function with same @group
+ * doesn't make any difference as calling it once
+ */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit,
+ struct mali_group *group)
+{
+ u32 bcast_id;
+ u32 broadcast_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+ broadcast_mask = bcast_unit->current_mask;
+
+ broadcast_mask |= (bcast_id); /* add PP core to broadcast */
+ broadcast_mask |= (bcast_id << 16); /* add MMU to broadcast */
+
+ /* store mask so we can restore on reset */
+ bcast_unit->current_mask = broadcast_mask;
+}
+
+/* Call this function to remove @group's id from bcast mask
+ * Note: redundant calling this function with same @group
+ * doesn't make any difference as calling it once
+ */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit,
+ struct mali_group *group)
+{
+ u32 bcast_id;
+ u32 broadcast_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+ broadcast_mask = bcast_unit->current_mask;
+
+ broadcast_mask &= ~((bcast_id << 16) | bcast_id);
+
+ /* store mask so we can restore on reset */
+ bcast_unit->current_mask = broadcast_mask;
+}
+
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+ MALI_DEBUG_PRINT(4,
+ ("Broadcast: setting mask 0x%08X + 0x%08X (reset)\n",
+ bcast_unit->current_mask,
+ bcast_unit->current_mask & 0xFF));
+
+ /* set broadcast mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ MALI_BROADCAST_REG_BROADCAST_MASK,
+ bcast_unit->current_mask);
+
+ /* set IRQ override mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ MALI_BROADCAST_REG_INTERRUPT_MASK,
+ bcast_unit->current_mask & 0xFF);
+}
+
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+ MALI_DEBUG_PRINT(4, ("Broadcast: setting mask 0x0 + 0x0 (disable)\n"));
+
+ /* set broadcast mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ MALI_BROADCAST_REG_BROADCAST_MASK,
+ 0x0);
+
+ /* set IRQ override mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ MALI_BROADCAST_REG_INTERRUPT_MASK,
+ 0x0);
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_broadcast.h b/drivers/gpu/arm/utgard/common/mali_broadcast.h
new file mode 100644
index 000000000000..0475b7171d8d
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_broadcast.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2012-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_BROADCAST_H__
+#define __MALI_BROADCAST_H__
+
+/*
+ * Interface for the broadcast unit on Mali-450.
+ *
+ * - Represents up to 8 × (MMU + PP) pairs.
+ * - Supports dynamically changing which (MMU + PP) pairs receive the broadcast by
+ * setting a mask.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_group.h"
+
+struct mali_bcast_unit;
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource);
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit);
+
+/* Add a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Remove a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Re-set cached mask. This needs to be called after having been suspended. */
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Disable broadcast unit
+ *
+ * mali_bcast_enable must be called to re-enable the unit. Cores may not be
+ * added or removed when the unit is disabled.
+ */
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Re-enable broadcast unit
+ *
+ * This resets the masks to include the cores present when mali_bcast_disable was called.
+ */
+MALI_STATIC_INLINE void mali_bcast_enable(struct mali_bcast_unit *bcast_unit)
+{
+ mali_bcast_reset(bcast_unit);
+}
+
+#endif /* __MALI_BROADCAST_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_control_timer.c b/drivers/gpu/arm/utgard/common/mali_control_timer.c
new file mode 100644
index 000000000000..a499de8532cc
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_control_timer.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2010-2012, 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
+
+static u64 period_start_time = 0;
+
+static _mali_osk_timer_t *mali_control_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+static u32 mali_control_timeout = 1000;
+
+void mali_control_timer_add(u32 timeout)
+{
+ _mali_osk_timer_add(mali_control_timer, _mali_osk_time_mstoticks(timeout));
+}
+
+static void mali_control_timer_callback(void *arg)
+{
+ if (mali_utilization_enabled()) {
+ struct mali_gpu_utilization_data *util_data = NULL;
+ u64 time_period = 0;
+ mali_bool need_add_timer = MALI_TRUE;
+
+ /* Calculate gpu utilization */
+ util_data = mali_utilization_calculate(&period_start_time, &time_period, &need_add_timer);
+
+ if (util_data) {
+#if defined(CONFIG_MALI_DVFS)
+ mali_dvfs_policy_realize(util_data, time_period);
+#else
+ mali_utilization_platform_realize(util_data);
+#endif
+
+ if (MALI_TRUE == need_add_timer) {
+ mali_control_timer_add(mali_control_timeout);
+ }
+ }
+ }
+}
+
+/* Init a timer (for now it is used for GPU utilization and dvfs) */
+_mali_osk_errcode_t mali_control_timer_init(void)
+{
+ _mali_osk_device_data data;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ if (0 != data.control_interval) {
+ mali_control_timeout = data.control_interval;
+ MALI_DEBUG_PRINT(2, ("Mali GPU Timer: %u\n", mali_control_timeout));
+ }
+ }
+
+ mali_control_timer = _mali_osk_timer_init();
+ if (NULL == mali_control_timer) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_timer_setcallback(mali_control_timer, mali_control_timer_callback, NULL);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_control_timer_term(void)
+{
+ if (NULL != mali_control_timer) {
+ _mali_osk_timer_del(mali_control_timer);
+ timer_running = MALI_FALSE;
+ _mali_osk_timer_term(mali_control_timer);
+ mali_control_timer = NULL;
+ }
+}
+
+mali_bool mali_control_timer_resume(u64 time_now)
+{
+ mali_utilization_data_assert_locked();
+
+ if (timer_running != MALI_TRUE) {
+ timer_running = MALI_TRUE;
+
+ period_start_time = time_now;
+
+ mali_utilization_reset();
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+void mali_control_timer_pause(void)
+{
+ mali_utilization_data_assert_locked();
+ if (timer_running == MALI_TRUE) {
+ timer_running = MALI_FALSE;
+ }
+}
+
+void mali_control_timer_suspend(mali_bool suspend)
+{
+ mali_utilization_data_lock();
+
+ if (timer_running == MALI_TRUE) {
+ timer_running = MALI_FALSE;
+
+ mali_utilization_data_unlock();
+
+ if (suspend == MALI_TRUE) {
+ _mali_osk_timer_del(mali_control_timer);
+ mali_utilization_reset();
+ }
+ } else {
+ mali_utilization_data_unlock();
+ }
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_control_timer.h b/drivers/gpu/arm/utgard/common/mali_control_timer.h
new file mode 100644
index 000000000000..fc6a21cd7bd5
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_control_timer.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2010-2012, 2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_CONTROL_TIMER_H__
+#define __MALI_CONTROL_TIMER_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_control_timer_init(void);
+
+void mali_control_timer_term(void);
+
+mali_bool mali_control_timer_resume(u64 time_now);
+
+void mali_control_timer_suspend(mali_bool suspend);
+void mali_control_timer_pause(void);
+
+void mali_control_timer_add(u32 timeout);
+
+#endif /* __MALI_CONTROL_TIMER_H__ */
+
diff --git a/drivers/gpu/arm/utgard/common/mali_dlbu.c b/drivers/gpu/arm/utgard/common/mali_dlbu.c
new file mode 100644
index 000000000000..99b7f360768b
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_dlbu.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2012-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_dlbu.h"
+#include "mali_memory.h"
+#include "mali_pp.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+/**
+ * Size of DLBU registers in bytes
+ */
+#define MALI_DLBU_SIZE 0x400
+
+mali_dma_addr mali_dlbu_phys_addr = 0;
+static mali_io_address mali_dlbu_cpu_addr = NULL;
+
+/**
+ * DLBU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_dlbu_register {
+ MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR = 0x0000, /**< Master tile list physical base address;
+ 31:12 Physical address to the page used for the DLBU
+ 0 DLBU enable - set this bit to 1 enables the AXI bus
+ between PPs and L2s, setting to 0 disables the router and
+ no further transactions are sent to DLBU */
+ MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR = 0x0004, /**< Master tile list virtual base address;
+ 31:12 Virtual address to the page used for the DLBU */
+ MALI_DLBU_REGISTER_TLLIST_VBASEADDR = 0x0008, /**< Tile list virtual base address;
+ 31:12 Virtual address to the tile list. This address is used when
+ calculating the call address sent to PP.*/
+ MALI_DLBU_REGISTER_FB_DIM = 0x000C, /**< Framebuffer dimension;
+ 23:16 Number of tiles in Y direction-1
+ 7:0 Number of tiles in X direction-1 */
+ MALI_DLBU_REGISTER_TLLIST_CONF = 0x0010, /**< Tile list configuration;
+ 29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024
+ 21:16 2^n number of tiles to be binned to one tile list in Y direction
+ 5:0 2^n number of tiles to be binned to one tile list in X direction */
+ MALI_DLBU_REGISTER_START_TILE_POS = 0x0014, /**< Start tile positions;
+ 31:24 start position in Y direction for group 1
+ 23:16 start position in X direction for group 1
+ 15:8 start position in Y direction for group 0
+ 7:0 start position in X direction for group 0 */
+ MALI_DLBU_REGISTER_PP_ENABLE_MASK = 0x0018, /**< PP enable mask;
+ 7 enable PP7 for load balancing
+ 6 enable PP6 for load balancing
+ 5 enable PP5 for load balancing
+ 4 enable PP4 for load balancing
+ 3 enable PP3 for load balancing
+ 2 enable PP2 for load balancing
+ 1 enable PP1 for load balancing
+ 0 enable PP0 for load balancing */
+} mali_dlbu_register;
+
+typedef enum {
+ PP0ENABLE = 0,
+ PP1ENABLE,
+ PP2ENABLE,
+ PP3ENABLE,
+ PP4ENABLE,
+ PP5ENABLE,
+ PP6ENABLE,
+ PP7ENABLE
+} mali_dlbu_pp_enable;
+
+struct mali_dlbu_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ u32 pp_cores_mask; /**< This is a mask for the PP cores whose operation will be controlled by LBU
+ see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */
+};
+
+_mali_osk_errcode_t mali_dlbu_initialize(void)
+{
+ MALI_DEBUG_PRINT(2, ("Mali DLBU: Initializing\n"));
+
+ if (_MALI_OSK_ERR_OK ==
+ mali_mmu_get_table_page(&mali_dlbu_phys_addr,
+ &mali_dlbu_cpu_addr)) {
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_dlbu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n"));
+
+ if (0 != mali_dlbu_phys_addr && 0 != mali_dlbu_cpu_addr) {
+ mali_mmu_release_table_page(mali_dlbu_phys_addr,
+ mali_dlbu_cpu_addr);
+ mali_dlbu_phys_addr = 0;
+ mali_dlbu_cpu_addr = 0;
+ }
+}
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource)
+{
+ struct mali_dlbu_core *core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali DLBU: Creating Mali dynamic load balancing unit: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_dlbu_core));
+ if (NULL != core) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI_DLBU_SIZE)) {
+ core->pp_cores_mask = 0;
+ if (_MALI_OSK_ERR_OK == mali_dlbu_reset(core)) {
+ return core;
+ }
+ MALI_PRINT_ERROR(("Failed to reset DLBU %s\n", core->hw_core.description));
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ } else {
+ MALI_PRINT_ERROR(("Mali DLBU: Failed to allocate memory for DLBU core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
+{
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+ mali_hw_core_delete(&dlbu->hw_core);
+ _mali_osk_free(dlbu);
+}
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu)
+{
+ u32 dlbu_registers[7];
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ MALI_DEBUG_PRINT(4, ("Mali DLBU: mali_dlbu_reset: %s\n", dlbu->hw_core.description));
+
+ dlbu_registers[0] = mali_dlbu_phys_addr | 1; /* bit 0 enables the whole core */
+ dlbu_registers[1] = MALI_DLBU_VIRT_ADDR;
+ dlbu_registers[2] = 0;
+ dlbu_registers[3] = 0;
+ dlbu_registers[4] = 0;
+ dlbu_registers[5] = 0;
+ dlbu_registers[6] = dlbu->pp_cores_mask;
+
+ /* write reset values to core registers */
+ mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, dlbu_registers, 7);
+
+ err = _MALI_OSK_ERR_OK;
+
+ return err;
+}
+
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu)
+{
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
+}
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+ struct mali_pp_core *pp_core;
+ u32 bcast_id;
+
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ pp_core = mali_group_get_pp_core(group);
+ bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+ dlbu->pp_cores_mask |= bcast_id;
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: Adding core[%d] New mask= 0x%02x\n", bcast_id , dlbu->pp_cores_mask));
+}
+
+/* Remove a group from the DLBU */
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+ struct mali_pp_core *pp_core;
+ u32 bcast_id;
+
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ pp_core = mali_group_get_pp_core(group);
+ bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+ dlbu->pp_cores_mask &= ~bcast_id;
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: Removing core[%d] New mask= 0x%02x\n", bcast_id, dlbu->pp_cores_mask));
+}
+
+/* Configure the DLBU for \a job. This needs to be done before the job is started on the groups in the DLBU. */
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job)
+{
+ u32 *registers;
+ MALI_DEBUG_ASSERT(job);
+ registers = mali_pp_job_get_dlbu_registers(job);
+ MALI_DEBUG_PRINT(4, ("Mali DLBU: Starting job\n"));
+
+ /* Writing 4 registers:
+ * DLBU registers except the first two (written once at DLBU initialisation / reset) and the PP_ENABLE_MASK register */
+ mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, registers, 4);
+
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_dlbu.h b/drivers/gpu/arm/utgard/common/mali_dlbu.h
new file mode 100644
index 000000000000..a7ecf41471d8
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_dlbu.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DLBU_H__
+#define __MALI_DLBU_H__
+
+#define MALI_DLBU_VIRT_ADDR 0xFFF00000 /* master tile virtual address fixed at this value and mapped into every session */
+
+#include "mali_osk.h"
+
+struct mali_pp_job;
+struct mali_group;
+struct mali_dlbu_core;
+
+extern mali_dma_addr mali_dlbu_phys_addr;
+
+_mali_osk_errcode_t mali_dlbu_initialize(void);
+void mali_dlbu_terminate(void);
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource);
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu);
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+
+/** @brief Called to update HW after DLBU state changed
+ *
+ * This function must be called after \a mali_dlbu_add_group or \a
+ * mali_dlbu_remove_group to write the updated mask to hardware, unless the
+ * same is accomplished by calling \a mali_dlbu_reset.
+ */
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job);
+
+#endif /* __MALI_DLBU_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_dvfs_policy.c b/drivers/gpu/arm/utgard/common/mali_dvfs_policy.c
new file mode 100644
index 000000000000..55b21a410754
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_dvfs_policy.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2010-2012, 2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_scheduler.h"
+#include "mali_dvfs_policy.h"
+#include "mali_osk_mali.h"
+#include "mali_osk_profiling.h"
+
+#define CLOCK_TUNING_TIME_DEBUG 0
+
+#define MAX_PERFORMANCE_VALUE 256
+#define MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(percent) ((int) ((percent)*(MAX_PERFORMANCE_VALUE)/100.0 + 0.5))
+
+/** The max fps the same as display vsync default 60, can set by module insert parameter */
+int mali_max_system_fps = 60;
+/** A lower limit on their desired FPS default 58, can set by module insert parameter */
+int mali_desired_fps = 58;
+
+static int mali_fps_step1 = 0;
+static int mali_fps_step2 = 0;
+
+static int clock_step = -1;
+static int cur_clk_step = -1;
+static struct mali_gpu_clock *gpu_clk = NULL;
+
+/*Function prototype */
+static int (*mali_gpu_set_freq)(int) = NULL;
+static int (*mali_gpu_get_freq)(void) = NULL;
+
+static mali_bool mali_dvfs_enabled = MALI_FALSE;
+
+#define NUMBER_OF_NANOSECONDS_PER_SECOND 1000000000ULL
+static u32 calculate_window_render_fps(u64 time_period)
+{
+ u32 max_window_number;
+ u64 tmp;
+ u64 max = time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 time_period_shift;
+ u32 max_window_number_shift;
+ u32 ret_val;
+
+ max_window_number = mali_session_max_window_num();
+
+ /* To avoid float division, extend the dividend to ns unit */
+ tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND;
+ if (tmp > time_period) {
+ max = tmp;
+ }
+
+ /*
+ * We may have 64-bit values, a dividend or a divisor or both
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ */
+ leading_zeroes = _mali_osk_clz((u32)(max >> 32));
+ shift_val = 32 - leading_zeroes;
+
+ time_period_shift = (u32)(time_period >> shift_val);
+ max_window_number_shift = (u32)(tmp >> shift_val);
+
+ ret_val = max_window_number_shift / time_period_shift;
+
+ return ret_val;
+}
+
+static bool mali_pickup_closest_avail_clock(int target_clock_mhz, mali_bool pick_clock_up)
+{
+ int i = 0;
+ bool clock_changed = false;
+
+ /* Round up the closest available frequency step for target_clock_hz */
+ for (i = 0; i < gpu_clk->num_of_steps; i++) {
+ /* Find the first item > target_clock_hz */
+ if (((int)(gpu_clk->item[i].clock) - target_clock_mhz) > 0) {
+ break;
+ }
+ }
+
+ /* If the target clock greater than the maximum clock just pick the maximum one*/
+ if (i == gpu_clk->num_of_steps) {
+ i = gpu_clk->num_of_steps - 1;
+ } else {
+ if ((!pick_clock_up) && (i > 0)) {
+ i = i - 1;
+ }
+ }
+
+ clock_step = i;
+ if (cur_clk_step != clock_step) {
+ clock_changed = true;
+ }
+
+ return clock_changed;
+}
+
+void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period)
+{
+ int under_perform_boundary_value = 0;
+ int over_perform_boundary_value = 0;
+ int current_fps = 0;
+ int current_gpu_util = 0;
+ bool clock_changed = false;
+#if CLOCK_TUNING_TIME_DEBUG
+ struct timeval start;
+ struct timeval stop;
+ unsigned int elapse_time;
+ do_gettimeofday(&start);
+#endif
+ u32 window_render_fps;
+
+ if (NULL == gpu_clk) {
+ MALI_DEBUG_PRINT(2, ("Enable DVFS but patform doesn't Support freq change. \n"));
+ return;
+ }
+
+ window_render_fps = calculate_window_render_fps(time_period);
+
+ current_fps = window_render_fps;
+ current_gpu_util = data->utilization_gpu;
+
+ /* Get the specific under_perform_boundary_value and over_perform_boundary_value */
+ if ((mali_desired_fps <= current_fps) && (current_fps < mali_max_system_fps)) {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(90);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
+ } else if ((mali_fps_step1 <= current_fps) && (current_fps < mali_desired_fps)) {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
+ } else if ((mali_fps_step2 <= current_fps) && (current_fps < mali_fps_step1)) {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(50);
+ } else {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
+ }
+
+ MALI_DEBUG_PRINT(5, ("Using ARM power policy: gpu util = %d \n", current_gpu_util));
+ MALI_DEBUG_PRINT(5, ("Using ARM power policy: under_perform = %d, over_perform = %d \n", under_perform_boundary_value, over_perform_boundary_value));
+ MALI_DEBUG_PRINT(5, ("Using ARM power policy: render fps = %d, pressure render fps = %d \n", current_fps, window_render_fps));
+
+ /* Get current clock value */
+ cur_clk_step = mali_gpu_get_freq();
+
+ /* Consider offscreen */
+ if (0 == current_fps) {
+ /* GP or PP under perform, need to give full power */
+ if (current_gpu_util > over_perform_boundary_value) {
+ if (cur_clk_step != gpu_clk->num_of_steps - 1) {
+ clock_changed = true;
+ clock_step = gpu_clk->num_of_steps - 1;
+ }
+ }
+
+ /* If GPU is idle, use lowest power */
+ if (0 == current_gpu_util) {
+ if (cur_clk_step != 0) {
+ clock_changed = true;
+ clock_step = 0;
+ }
+ }
+
+ goto real_setting;
+ }
+
+ /* 2. Calculate target clock if the GPU clock can be tuned */
+ if (-1 != cur_clk_step) {
+ int target_clk_mhz = -1;
+ mali_bool pick_clock_up = MALI_TRUE;
+
+ if (current_gpu_util > under_perform_boundary_value) {
+ /* when under perform, need to consider the fps part */
+ target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util * mali_desired_fps / under_perform_boundary_value / current_fps;
+ pick_clock_up = MALI_TRUE;
+ } else if (current_gpu_util < over_perform_boundary_value) {
+ /* when over perform, did't need to consider fps, system didn't want to reach desired fps */
+ target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util / under_perform_boundary_value;
+ pick_clock_up = MALI_FALSE;
+ }
+
+ if (-1 != target_clk_mhz) {
+ clock_changed = mali_pickup_closest_avail_clock(target_clk_mhz, pick_clock_up);
+ }
+ }
+
+real_setting:
+ if (clock_changed) {
+ mali_gpu_set_freq(clock_step);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ gpu_clk->item[clock_step].clock,
+ gpu_clk->item[clock_step].vol / 1000,
+ 0, 0, 0);
+ }
+
+#if CLOCK_TUNING_TIME_DEBUG
+ do_gettimeofday(&stop);
+
+ elapse_time = timeval_to_ns(&stop) - timeval_to_ns(&start);
+ MALI_DEBUG_PRINT(2, ("Using ARM power policy: eclapse time = %d\n", elapse_time));
+#endif
+}
+
+_mali_osk_errcode_t mali_dvfs_policy_init(void)
+{
+ _mali_osk_device_data data;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ if ((NULL != data.get_clock_info) && (NULL != data.set_freq) && (NULL != data.get_freq)) {
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: using arm dvfs policy \n"));
+
+
+ mali_fps_step1 = mali_max_system_fps / 3;
+ mali_fps_step2 = mali_max_system_fps / 5;
+
+ data.get_clock_info(&gpu_clk);
+
+ if (gpu_clk != NULL) {
+#ifdef DEBUG
+ int i;
+ for (i = 0; i < gpu_clk->num_of_steps; i++) {
+ MALI_DEBUG_PRINT(5, ("mali gpu clock info: step%d clock(%d)Hz,vol(%d) \n",
+ i, gpu_clk->item[i].clock, gpu_clk->item[i].vol));
+ }
+#endif
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform didn't define enough info for ddk to do DVFS \n"));
+ }
+
+ mali_gpu_get_freq = data.get_freq;
+ mali_gpu_set_freq = data.set_freq;
+
+ if ((NULL != gpu_clk) && (gpu_clk->num_of_steps > 0)
+ && (NULL != mali_gpu_get_freq) && (NULL != mali_gpu_set_freq)) {
+ mali_dvfs_enabled = MALI_TRUE;
+ }
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform function callback incomplete, need check mali_gpu_device_data in platform .\n"));
+ }
+ } else {
+ err = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: get platform data error .\n"));
+ }
+
+ return err;
+}
+
+/*
+ * Always give full power when start a new period,
+ * if mali dvfs enabled, for performance consideration
+ */
+void mali_dvfs_policy_new_period(void)
+{
+ /* Always give full power when start a new period */
+ unsigned int cur_clk_step = 0;
+
+ cur_clk_step = mali_gpu_get_freq();
+
+ if (cur_clk_step != (gpu_clk->num_of_steps - 1)) {
+ mali_gpu_set_freq(gpu_clk->num_of_steps - 1);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, gpu_clk->item[gpu_clk->num_of_steps - 1].clock,
+ gpu_clk->item[gpu_clk->num_of_steps - 1].vol / 1000, 0, 0, 0);
+ }
+}
+
+mali_bool mali_dvfs_policy_enabled(void)
+{
+ return mali_dvfs_enabled;
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item)
+{
+ if (mali_platform_device != NULL) {
+
+ struct mali_gpu_device_data *device_data = NULL;
+ device_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data;
+
+ if ((NULL != device_data->get_clock_info) && (NULL != device_data->get_freq)) {
+
+ int cur_clk_step = device_data->get_freq();
+ struct mali_gpu_clock *mali_gpu_clk = NULL;
+
+ device_data->get_clock_info(&mali_gpu_clk);
+ clk_item->clock = mali_gpu_clk->item[cur_clk_step].clock;
+ clk_item->vol = mali_gpu_clk->item[cur_clk_step].vol;
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: platform function callback incomplete, need check mali_gpu_device_data in platform .\n"));
+ }
+ }
+}
+#endif
+
diff --git a/drivers/gpu/arm/utgard/common/mali_dvfs_policy.h b/drivers/gpu/arm/utgard/common/mali_dvfs_policy.h
new file mode 100644
index 000000000000..662348c4e6ac
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_dvfs_policy.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010-2012, 2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DVFS_POLICY_H__
+#define __MALI_DVFS_POLICY_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period);
+
+_mali_osk_errcode_t mali_dvfs_policy_init(void);
+
+void mali_dvfs_policy_new_period(void);
+
+mali_bool mali_dvfs_policy_enabled(void);
+
+#if defined(CONFIG_MALI400_PROFILING)
+void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif/* __MALI_DVFS_POLICY_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_executor.c b/drivers/gpu/arm/utgard/common/mali_executor.c
new file mode 100644
index 000000000000..ea263fdf4c6a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_executor.c
@@ -0,0 +1,2693 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_executor.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_timeline.h"
+#include "mali_osk_profiling.h"
+#include "mali_session.h"
+#include "mali_osk_mali.h"
+
+/*
+ * If dma_buf with map on demand is used, we defer job deletion and job queue
+ * if in atomic context, since both might sleep.
+ */
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_DELETE 1
+#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_QUEUE 1
+#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
+
+/*
+ * ---------- static type definitions (structs, enums, etc) ----------
+ */
+
+enum mali_executor_state_t {
+ EXEC_STATE_NOT_PRESENT, /* Virtual group on Mali-300/400 (do not use) */
+ EXEC_STATE_DISABLED, /* Disabled by core scaling (do not use) */
+ EXEC_STATE_EMPTY, /* No child groups for virtual group (do not use) */
+ EXEC_STATE_INACTIVE, /* Can be used, but must be activate first */
+ EXEC_STATE_IDLE, /* Active and ready to be used */
+ EXEC_STATE_WORKING, /* Executing a job */
+};
+
+/*
+ * ---------- global variables (exported due to inline functions) ----------
+ */
+
+/* Lock for this module (protecting all HW access except L2 caches) */
+_mali_osk_spinlock_irq_t *mali_executor_lock_obj = NULL;
+
+mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX];
+
+/*
+ * ---------- static variables ----------
+ */
+
+/* Used to defer job scheduling */
+static _mali_osk_wq_work_t *executor_wq_high_pri = NULL;
+
+/* Store version from GP and PP (user space wants to know this) */
+static u32 pp_version = 0;
+static u32 gp_version = 0;
+
+/* List of physical PP groups which are disabled by some external source */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled);
+static u32 group_list_disabled_count = 0;
+
+/* List of groups which can be used, but activate first */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_inactive);
+static u32 group_list_inactive_count = 0;
+
+/* List of groups which are active and ready to be used */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle);
+static u32 group_list_idle_count = 0;
+
+/* List of groups which are executing a job */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working);
+static u32 group_list_working_count = 0;
+
+/* Virtual group (if any) */
+static struct mali_group *virtual_group = NULL;
+
+/* Virtual group state is tracked with a state variable instead of 4 lists */
+static enum mali_executor_state_t virtual_group_state = EXEC_STATE_NOT_PRESENT;
+
+/* GP group */
+static struct mali_group *gp_group = NULL;
+
+/* GP group state is tracked with a state variable instead of 4 lists */
+static enum mali_executor_state_t gp_group_state = EXEC_STATE_NOT_PRESENT;
+
+static u32 gp_returned_cookie = 0;
+
+/* Total number of physical PP cores present */
+static u32 num_physical_pp_cores_total = 0;
+
+/* Number of physical cores which are enabled */
+static u32 num_physical_pp_cores_enabled = 0;
+
+/* Enable or disable core scaling */
+static mali_bool core_scaling_enabled = MALI_TRUE;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *executor_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+/* PP cores haven't been enabled because of some pp cores haven't been disabled. */
+static int core_scaling_delay_up_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+
+/* Variables used to implement notify pp core changes to userspace when core scaling
+ * is finished in mali_executor_complete_group() function. */
+static _mali_osk_wq_work_t *executor_wq_notify_core_change = NULL;
+static _mali_osk_wait_queue_t *executor_notify_core_change_wait_queue = NULL;
+
+/*
+ * ---------- Forward declaration of static functions ----------
+ */
+static mali_bool mali_executor_is_suspended(void *data);
+static mali_bool mali_executor_is_working(void);
+static void mali_executor_disable_empty_virtual(void);
+static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group);
+static mali_bool mali_executor_has_virtual_group(void);
+static mali_bool mali_executor_virtual_group_is_usable(void);
+static void mali_executor_schedule(void);
+static void mali_executor_wq_schedule(void *arg);
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job);
+static void mali_executor_complete_group(struct mali_group *group,
+ mali_bool success,
+ struct mali_gp_job **gp_job_done,
+ struct mali_pp_job **pp_job_done);
+static void mali_executor_change_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *old_list,
+ u32 *old_count,
+ _mali_osk_list_t *new_list,
+ u32 *new_count);
+static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
+ enum mali_executor_state_t state);
+
+static void mali_executor_group_enable_internal(struct mali_group *group);
+static void mali_executor_group_disable_internal(struct mali_group *group);
+static void mali_executor_core_scale(unsigned int target_core_nr);
+static void mali_executor_core_scale_in_group_complete(struct mali_group *group);
+static void mali_executor_notify_core_change(u32 num_cores);
+static void mali_executor_wq_notify_core_change(void *arg);
+static void mali_executor_change_group_status_disabled(struct mali_group *group);
+static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group);
+static void mali_executor_set_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *new_list,
+ u32 *new_count);
+
+/*
+ * ---------- Actual implementation ----------
+ */
+
+_mali_osk_errcode_t mali_executor_initialize(void)
+{
+ mali_executor_lock_obj = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_EXECUTOR);
+ if (NULL == mali_executor_lock_obj) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_executor_wq_schedule, NULL);
+ if (NULL == executor_wq_high_pri) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_working_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == executor_working_wait_queue) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_wq_notify_core_change = _mali_osk_wq_create_work(mali_executor_wq_notify_core_change, NULL);
+ if (NULL == executor_wq_notify_core_change) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_notify_core_change_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == executor_notify_core_change_wait_queue) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_executor_terminate(void)
+{
+ if (NULL != executor_notify_core_change_wait_queue) {
+ _mali_osk_wait_queue_term(executor_notify_core_change_wait_queue);
+ executor_notify_core_change_wait_queue = NULL;
+ }
+
+ if (NULL != executor_wq_notify_core_change) {
+ _mali_osk_wq_delete_work(executor_wq_notify_core_change);
+ executor_wq_notify_core_change = NULL;
+ }
+
+ if (NULL != executor_working_wait_queue) {
+ _mali_osk_wait_queue_term(executor_working_wait_queue);
+ executor_working_wait_queue = NULL;
+ }
+
+ if (NULL != executor_wq_high_pri) {
+ _mali_osk_wq_delete_work(executor_wq_high_pri);
+ executor_wq_high_pri = NULL;
+ }
+
+ if (NULL != mali_executor_lock_obj) {
+ _mali_osk_spinlock_irq_term(mali_executor_lock_obj);
+ mali_executor_lock_obj = NULL;
+ }
+}
+
+void mali_executor_populate(void)
+{
+ u32 num_groups;
+ u32 i;
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ /* Do we have a virtual group? */
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (mali_group_is_virtual(group)) {
+ virtual_group = group;
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ break;
+ }
+ }
+
+ /* Find all the available physical GP and PP cores */
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (NULL != group) {
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+
+ if (!mali_group_is_virtual(group)) {
+ if (NULL != pp_core) {
+ if (0 == pp_version) {
+ /* Retrieve PP version from the first available PP core */
+ pp_version = mali_pp_core_get_version(pp_core);
+ }
+
+ if (NULL != virtual_group) {
+ mali_executor_lock();
+ mali_group_add_group(virtual_group, group);
+ mali_executor_unlock();
+ } else {
+ _mali_osk_list_add(&group->executor_list, &group_list_inactive);
+ group_list_inactive_count++;
+ }
+
+ num_physical_pp_cores_total++;
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(gp_core);
+
+ if (0 == gp_version) {
+ /* Retrieve GP version */
+ gp_version = mali_gp_core_get_version(gp_core);
+ }
+
+ gp_group = group;
+ gp_group_state = EXEC_STATE_INACTIVE;
+ }
+
+ }
+ }
+ }
+
+ num_physical_pp_cores_enabled = num_physical_pp_cores_total;
+}
+
+void mali_executor_depopulate(void)
+{
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
+
+ if (NULL != gp_group) {
+ mali_group_delete(gp_group);
+ gp_group = NULL;
+ }
+
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
+
+ if (NULL != virtual_group) {
+ mali_group_delete(virtual_group);
+ virtual_group = NULL;
+ }
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+ mali_group_delete(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+ mali_group_delete(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+ mali_group_delete(group);
+ }
+}
+
+void mali_executor_suspend(void)
+{
+ mali_executor_lock();
+
+ /* Increment the pause_count so that no more jobs will be scheduled */
+ pause_count++;
+
+ mali_executor_unlock();
+
+ _mali_osk_wait_queue_wait_event(executor_working_wait_queue,
+ mali_executor_is_suspended, NULL);
+
+ /*
+ * mali_executor_complete_XX() leaves jobs in idle state.
+ * deactivate option is used when we are going to power down
+ * the entire GPU (OS suspend) and want a consistent SW vs HW
+ * state.
+ */
+ mali_executor_lock();
+
+ mali_executor_deactivate_list_idle(MALI_TRUE);
+
+ /*
+ * The following steps are used to deactive all of activated
+ * (MALI_GROUP_STATE_ACTIVE) and activating (MALI_GROUP
+ * _STAET_ACTIVATION_PENDING) groups, to make sure the variable
+ * pd_mask_wanted is equal with 0. */
+ if (MALI_GROUP_STATE_INACTIVE != mali_group_get_state(gp_group)) {
+ gp_group_state = EXEC_STATE_INACTIVE;
+ mali_group_deactivate(gp_group);
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ if (MALI_GROUP_STATE_INACTIVE
+ != mali_group_get_state(virtual_group)) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ mali_group_deactivate(virtual_group);
+ }
+ }
+
+ if (0 < group_list_inactive_count) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+ &group_list_inactive,
+ struct mali_group, executor_list) {
+ if (MALI_GROUP_STATE_ACTIVATION_PENDING
+ == mali_group_get_state(group)) {
+ mali_group_deactivate(group);
+ }
+
+ /*
+ * On mali-450 platform, we may have physical group in the group inactive
+ * list, and its state is MALI_GROUP_STATE_ACTIVATION_PENDING, so we only
+ * deactivate it is not enough, we still also need add it back to virtual group.
+ * And now, virtual group must be in INACTIVE state, so it's safe to add
+ * physical group to virtual group at this point.
+ */
+ if (NULL != virtual_group) {
+ _mali_osk_list_delinit(&group->executor_list);
+ group_list_inactive_count--;
+
+ mali_group_add_group(virtual_group, group);
+ }
+ }
+ }
+
+ mali_executor_unlock();
+}
+
+void mali_executor_resume(void)
+{
+ mali_executor_lock();
+
+ /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ pause_count--;
+ if (0 == pause_count) {
+ mali_executor_schedule();
+ }
+
+ mali_executor_unlock();
+}
+
+u32 mali_executor_get_num_cores_total(void)
+{
+ return num_physical_pp_cores_total;
+}
+
+u32 mali_executor_get_num_cores_enabled(void)
+{
+ return num_physical_pp_cores_enabled;
+}
+
+struct mali_pp_core *mali_executor_get_virtual_pp(void)
+{
+ MALI_DEBUG_ASSERT_POINTER(virtual_group);
+ MALI_DEBUG_ASSERT_POINTER(virtual_group->pp_core);
+ return virtual_group->pp_core;
+}
+
+struct mali_group *mali_executor_get_virtual_group(void)
+{
+ return virtual_group;
+}
+
+void mali_executor_zap_all_active(struct mali_session_data *session)
+{
+ struct mali_group *group;
+ struct mali_group *temp;
+ mali_bool ret;
+
+ mali_executor_lock();
+
+ /*
+ * This function is a bit complicated because
+ * mali_group_zap_session() can fail. This only happens because the
+ * group is in an unhandled page fault status.
+ * We need to make sure this page fault is handled before we return,
+ * so that we know every single outstanding MMU transactions have
+ * completed. This will allow caller to safely remove physical pages
+ * when we have returned.
+ */
+
+ MALI_DEBUG_ASSERT(NULL != gp_group);
+ ret = mali_group_zap_session(gp_group, session);
+ if (MALI_FALSE == ret) {
+ struct mali_gp_job *gp_job = NULL;
+
+ mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL);
+
+ MALI_DEBUG_ASSERT_POINTER(gp_job);
+
+ /* GP job completed, make sure it is freed */
+ mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+ MALI_TRUE, MALI_TRUE);
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ ret = mali_group_zap_session(virtual_group, session);
+ if (MALI_FALSE == ret) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, make sure it is freed */
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working,
+ struct mali_group, executor_list) {
+ ret = mali_group_zap_session(group, session);
+ if (MALI_FALSE == ret) {
+ ret = mali_group_zap_session(group, session);
+ if (MALI_FALSE == ret) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, free it */
+ mali_scheduler_complete_pp_job(pp_job,
+ 0, MALI_FALSE,
+ MALI_TRUE);
+ }
+ }
+ }
+ }
+
+ mali_executor_unlock();
+}
+
+void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule)
+{
+ if (MALI_SCHEDULER_MASK_EMPTY != mask) {
+ if (MALI_TRUE == deferred_schedule) {
+ _mali_osk_wq_schedule_work_high_pri(executor_wq_high_pri);
+ } else {
+ /* Schedule from this thread*/
+ mali_executor_lock();
+ mali_executor_schedule();
+ mali_executor_unlock();
+ }
+ }
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group,
+ mali_bool in_upper_half)
+{
+ enum mali_interrupt_result int_result;
+ mali_bool time_out = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(4, ("Executor: GP interrupt from %s in %s half\n",
+ mali_group_core_description(group),
+ in_upper_half ? "upper" : "bottom"));
+
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_working(group));
+
+ if (mali_group_has_timed_out(group)) {
+ int_result = MALI_INTERRUPT_RESULT_ERROR;
+ time_out = MALI_TRUE;
+ MALI_PRINT(("Executor GP: Job %d Timeout on %s\n",
+ mali_gp_job_get_id(group->gp_running_job),
+ mali_group_core_description(group)));
+ } else {
+ int_result = mali_group_get_interrupt_result_gp(group);
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ /* No interrupts signalled, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
+#endif
+
+ mali_group_mask_all_interrupts_gp(group);
+
+ if (MALI_INTERRUPT_RESULT_SUCCESS_VS == int_result) {
+ if (mali_group_gp_is_active(group)) {
+ /* Only VS completed so far, while PLBU is still active */
+
+ /* Enable all but the current interrupt */
+ mali_group_enable_interrupts_gp(group, int_result);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_OK;
+ }
+ } else if (MALI_INTERRUPT_RESULT_SUCCESS_PLBU == int_result) {
+ if (mali_group_gp_is_active(group)) {
+ /* Only PLBU completed so far, while VS is still active */
+
+ /* Enable all but the current interrupt */
+ mali_group_enable_interrupts_gp(group, int_result);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_OK;
+ }
+ } else if (MALI_INTERRUPT_RESULT_OOM == int_result) {
+ struct mali_gp_job *job = mali_group_get_running_gp_job(group);
+
+ /* PLBU out of mem */
+ MALI_DEBUG_PRINT(3, ("Executor: PLBU needs more heap memory\n"));
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* Give group a chance to generate a SUSPEND event */
+ mali_group_oom(group);
+#endif
+
+ /*
+ * no need to hold interrupt raised while
+ * waiting for more memory.
+ */
+ mali_executor_send_gp_oom_to_user(job);
+
+ mali_executor_unlock();
+
+ return _MALI_OSK_ERR_OK;
+ }
+
+ /* We should now have a real interrupt to handle */
+
+ MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
+ mali_group_core_description(group),
+ (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
+ "ERROR" : "success"));
+
+ if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
+ /* Don't bother to do processing of errors in upper half */
+ mali_executor_unlock();
+
+ if (MALI_FALSE == time_out) {
+ mali_group_schedule_bottom_half_gp(group);
+ }
+ } else {
+ struct mali_gp_job *job;
+ mali_bool success;
+
+ if (MALI_TRUE == time_out) {
+ mali_group_dump_status(group);
+ }
+
+ success = (int_result != MALI_INTERRUPT_RESULT_ERROR) ?
+ MALI_TRUE : MALI_FALSE;
+
+ mali_executor_complete_group(group, success, &job, NULL);
+
+ mali_executor_unlock();
+
+ /* GP jobs always fully complete */
+ MALI_DEBUG_ASSERT(NULL != job);
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_gp_job(job, success,
+ MALI_TRUE, MALI_TRUE);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group,
+ mali_bool in_upper_half)
+{
+ enum mali_interrupt_result int_result;
+ mali_bool time_out = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(4, ("Executor: PP interrupt from %s in %s half\n",
+ mali_group_core_description(group),
+ in_upper_half ? "upper" : "bottom"));
+
+ mali_executor_lock();
+
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (in_upper_half) {
+ if (mali_group_is_in_virtual(group)) {
+ /* Child groups should never handle PP interrupts */
+ MALI_DEBUG_ASSERT(!mali_group_has_timed_out(group));
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_working(group));
+ MALI_DEBUG_ASSERT(!mali_group_is_in_virtual(group));
+
+ if (mali_group_has_timed_out(group)) {
+ int_result = MALI_INTERRUPT_RESULT_ERROR;
+ time_out = MALI_TRUE;
+ MALI_PRINT(("Executor PP: Job %d Timeout on %s\n",
+ mali_pp_job_get_id(group->pp_running_job),
+ mali_group_core_description(group)));
+ } else {
+ int_result = mali_group_get_interrupt_result_pp(group);
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ /* No interrupts signalled, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ } else if (MALI_INTERRUPT_RESULT_SUCCESS == int_result) {
+ if (mali_group_is_virtual(group) && mali_group_pp_is_active(group)) {
+ /* Some child groups are still working, so nothing to do right now */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
+#endif
+
+ /* We should now have a real interrupt to handle */
+
+ MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
+ mali_group_core_description(group),
+ (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
+ "ERROR" : "success"));
+
+ if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
+ /* Don't bother to do processing of errors in upper half */
+ mali_group_mask_all_interrupts_pp(group);
+ mali_executor_unlock();
+
+ if (MALI_FALSE == time_out) {
+ mali_group_schedule_bottom_half_pp(group);
+ }
+ } else {
+ struct mali_pp_job *job = NULL;
+ mali_bool success;
+
+ if (MALI_TRUE == time_out) {
+ mali_group_dump_status(group);
+ }
+
+ success = (int_result == MALI_INTERRUPT_RESULT_SUCCESS) ?
+ MALI_TRUE : MALI_FALSE;
+
+ mali_executor_complete_group(group, success, NULL, &job);
+
+ mali_executor_unlock();
+
+ if (NULL != job) {
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job,
+ num_physical_pp_cores_total,
+ MALI_TRUE, MALI_TRUE);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group,
+ mali_bool in_upper_half)
+{
+ enum mali_interrupt_result int_result;
+
+ MALI_DEBUG_PRINT(4, ("Executor: MMU interrupt from %s in %s half\n",
+ mali_group_core_description(group),
+ in_upper_half ? "upper" : "bottom"));
+
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_working(group));
+
+ int_result = mali_group_get_interrupt_result_mmu(group);
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ /* No interrupts signalled, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_ERROR == int_result);
+#endif
+
+ /* We should now have a real interrupt to handle */
+
+ if (in_upper_half) {
+ /* Don't bother to do processing of errors in upper half */
+
+ struct mali_group *parent = group->parent_group;
+
+ mali_mmu_mask_all_interrupts(group->mmu);
+
+ mali_executor_unlock();
+
+ if (NULL == parent) {
+ mali_group_schedule_bottom_half_mmu(group);
+ } else {
+ mali_group_schedule_bottom_half_mmu(parent);
+ }
+
+ } else {
+ struct mali_gp_job *gp_job = NULL;
+ struct mali_pp_job *pp_job = NULL;
+
+#ifdef DEBUG
+
+ u32 fault_address = mali_mmu_get_page_fault_addr(group->mmu);
+ u32 status = mali_mmu_get_status(group->mmu);
+ MALI_DEBUG_PRINT(2, ("Executor: Mali page fault detected at 0x%x from bus id %d of type %s on %s\n",
+ (void *)(uintptr_t)fault_address,
+ (status >> 6) & 0x1F,
+ (status & 32) ? "write" : "read",
+ group->mmu->hw_core.description));
+ MALI_DEBUG_PRINT(3, ("Executor: MMU rawstat = 0x%08X, MMU status = 0x%08X\n",
+ mali_mmu_get_rawstat(group->mmu), status));
+ mali_mmu_pagedir_diag(mali_session_get_page_directory(group->session), fault_address);
+#endif
+
+ mali_executor_complete_group(group, MALI_FALSE, &gp_job, &pp_job);
+
+ mali_executor_unlock();
+
+ if (NULL != gp_job) {
+ MALI_DEBUG_ASSERT(NULL == pp_job);
+
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+ MALI_TRUE, MALI_TRUE);
+ } else if (NULL != pp_job) {
+ MALI_DEBUG_ASSERT(NULL == gp_job);
+
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_pp_job(pp_job,
+ num_physical_pp_cores_total,
+ MALI_TRUE, MALI_TRUE);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups)
+{
+ u32 i;
+ mali_bool child_groups_activated = MALI_FALSE;
+ mali_bool do_schedule = MALI_FALSE;
+#if defined(DEBUG)
+ u32 num_activated = 0;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(groups);
+ MALI_DEBUG_ASSERT(0 < num_groups);
+
+ mali_executor_lock();
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups\n", num_groups));
+
+ for (i = 0; i < num_groups; i++) {
+ MALI_DEBUG_PRINT(3, ("Executor: powering up group %s\n",
+ mali_group_core_description(groups[i])));
+
+ mali_group_power_up(groups[i]);
+
+ if ((MALI_GROUP_STATE_ACTIVATION_PENDING != mali_group_get_state(groups[i]) ||
+ (MALI_TRUE != mali_executor_group_is_in_state(groups[i], EXEC_STATE_INACTIVE)))) {
+ /* nothing more to do for this group */
+ continue;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Executor: activating group %s\n",
+ mali_group_core_description(groups[i])));
+
+#if defined(DEBUG)
+ num_activated++;
+#endif
+
+ if (mali_group_is_in_virtual(groups[i])) {
+ /*
+ * At least one child group of virtual group is powered on.
+ */
+ child_groups_activated = MALI_TRUE;
+ } else if (MALI_FALSE == mali_group_is_virtual(groups[i])) {
+ /* Set gp and pp not in virtual to active. */
+ mali_group_set_active(groups[i]);
+ }
+
+ /* Move group from inactive to idle list */
+ if (groups[i] == gp_group) {
+ MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
+ gp_group_state);
+ gp_group_state = EXEC_STATE_IDLE;
+ } else if (MALI_FALSE == mali_group_is_in_virtual(groups[i])
+ && MALI_FALSE == mali_group_is_virtual(groups[i])) {
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_INACTIVE));
+
+ mali_executor_change_state_pp_physical(groups[i],
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_idle,
+ &group_list_idle_count);
+ }
+
+ do_schedule = MALI_TRUE;
+ }
+
+ if (mali_executor_has_virtual_group() &&
+ MALI_TRUE == child_groups_activated &&
+ MALI_GROUP_STATE_ACTIVATION_PENDING ==
+ mali_group_get_state(virtual_group)) {
+ /*
+ * Try to active virtual group while it may be not sucessful every time,
+ * because there is one situation that not all of child groups are powered on
+ * in one time and virtual group is in activation pending state.
+ */
+ if (mali_group_set_active(virtual_group)) {
+ /* Move group from inactive to idle */
+ MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
+ virtual_group_state);
+ virtual_group_state = EXEC_STATE_IDLE;
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated, 1 virtual activated.\n", num_groups, num_activated));
+ } else {
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
+ }
+ } else {
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
+ }
+
+ if (MALI_TRUE == do_schedule) {
+ /* Trigger a schedule */
+ mali_executor_schedule();
+ }
+
+ mali_executor_unlock();
+}
+
+void mali_executor_group_power_down(struct mali_group *groups[],
+ u32 num_groups)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(groups);
+ MALI_DEBUG_ASSERT(0 < num_groups);
+
+ mali_executor_lock();
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups\n", num_groups));
+
+ for (i = 0; i < num_groups; i++) {
+ /* Groups must be either disabled or inactive. while for virtual group,
+ * it maybe in empty state, because when we meet pm_runtime_suspend,
+ * virtual group could be powered off, and before we acquire mali_executor_lock,
+ * we must release mali_pm_state_lock, if there is a new physical job was queued,
+ * all of physical groups in virtual group could be pulled out, so we only can
+ * powered down an empty virtual group. Those physical groups will be powered
+ * up in following pm_runtime_resume callback function.
+ */
+ MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_DISABLED) ||
+ mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_INACTIVE) ||
+ mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_EMPTY));
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering down group %s\n",
+ mali_group_core_description(groups[i])));
+
+ mali_group_power_down(groups[i]);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups completed\n", num_groups));
+
+ mali_executor_unlock();
+}
+
+void mali_executor_abort_session(struct mali_session_data *session)
+{
+ struct mali_group *group;
+ struct mali_group *tmp_group;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(session->is_aborting);
+
+ MALI_DEBUG_PRINT(3,
+ ("Executor: Aborting all jobs from session 0x%08X.\n",
+ session));
+
+ mali_executor_lock();
+
+ if (mali_group_get_session(gp_group) == session) {
+ if (EXEC_STATE_WORKING == gp_group_state) {
+ struct mali_gp_job *gp_job = NULL;
+
+ mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL);
+
+ MALI_DEBUG_ASSERT_POINTER(gp_job);
+
+ /* GP job completed, make sure it is freed */
+ mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+ MALI_FALSE, MALI_TRUE);
+ } else {
+ /* Same session, but not working, so just clear it */
+ mali_group_clear_session(gp_group);
+ }
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ if (EXEC_STATE_WORKING == virtual_group_state
+ && mali_group_get_session(virtual_group) == session) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, make sure it is freed */
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working,
+ struct mali_group, executor_list) {
+ if (mali_group_get_session(group) == session) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, make sure it is freed */
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, executor_list) {
+ mali_group_clear_session(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_inactive, struct mali_group, executor_list) {
+ mali_group_clear_session(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_disabled, struct mali_group, executor_list) {
+ mali_group_clear_session(group);
+ }
+
+ mali_executor_unlock();
+}
+
+
+void mali_executor_core_scaling_enable(void)
+{
+ /* PS: Core scaling is by default enabled */
+ core_scaling_enabled = MALI_TRUE;
+}
+
+void mali_executor_core_scaling_disable(void)
+{
+ core_scaling_enabled = MALI_FALSE;
+}
+
+mali_bool mali_executor_core_scaling_is_enabled(void)
+{
+ return core_scaling_enabled;
+}
+
+void mali_executor_group_enable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_executor_lock();
+
+ if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
+ && (mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
+ mali_executor_group_enable_internal(group);
+ }
+
+ mali_executor_schedule();
+ mali_executor_unlock();
+
+ _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+}
+
+/*
+ * If a physical group is inactive or idle, we should disable it immediately,
+ * if group is in virtual, and virtual group is idle, disable given physical group in it.
+ */
+void mali_executor_group_disable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_executor_lock();
+
+ if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
+ && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
+ mali_executor_group_disable_internal(group);
+ }
+
+ mali_executor_schedule();
+ mali_executor_unlock();
+
+ _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+}
+
+mali_bool mali_executor_group_is_disabled(struct mali_group *group)
+{
+ /* NB: This function is not optimized for time critical usage */
+
+ mali_bool ret;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_executor_lock();
+ ret = mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED);
+ mali_executor_unlock();
+
+ return ret;
+}
+
+int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override)
+{
+ if (target_core_nr == num_physical_pp_cores_enabled) return 0;
+ if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM;
+ if (target_core_nr > num_physical_pp_cores_total) return -EINVAL;
+ if (0 == target_core_nr) return -EINVAL;
+
+ mali_executor_core_scale(target_core_nr);
+
+ _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+
+ return 0;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_executor_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ mali_executor_lock();
+
+ switch (gp_group_state) {
+ case EXEC_STATE_INACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in state INACTIVE\n");
+ break;
+ case EXEC_STATE_IDLE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in state IDLE\n");
+ break;
+ case EXEC_STATE_WORKING:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in state WORKING\n");
+ break;
+ default:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in unknown/illegal state %u\n",
+ gp_group_state);
+ break;
+ }
+
+ n += mali_group_dump_state(gp_group, buf + n, size - n);
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in WORKING state (count = %u):\n",
+ group_list_working_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in IDLE state (count = %u):\n",
+ group_list_idle_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in INACTIVE state (count = %u):\n",
+ group_list_inactive_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in DISABLED state (count = %u):\n",
+ group_list_disabled_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ switch (virtual_group_state) {
+ case EXEC_STATE_EMPTY:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state EMPTY\n");
+ break;
+ case EXEC_STATE_INACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state INACTIVE\n");
+ break;
+ case EXEC_STATE_IDLE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state IDLE\n");
+ break;
+ case EXEC_STATE_WORKING:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state WORKING\n");
+ break;
+ default:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in unknown/illegal state %u\n",
+ virtual_group_state);
+ break;
+ }
+
+ n += mali_group_dump_state(virtual_group, buf + n, size - n);
+ }
+
+ mali_executor_unlock();
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ return n;
+}
+#endif
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->number_of_total_cores = num_physical_pp_cores_total;
+ args->number_of_enabled_cores = num_physical_pp_cores_enabled;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->version = pp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->number_of_cores = 1;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->version = gp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *job;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
+ _mali_osk_notification_t *new_notification = NULL;
+
+ new_notification = _mali_osk_notification_create(
+ _MALI_NOTIFICATION_GP_STALLED,
+ sizeof(_mali_uk_gp_job_suspended_s));
+
+ if (NULL != new_notification) {
+ MALI_DEBUG_PRINT(3, ("Executor: Resuming job %u with new heap; 0x%08X - 0x%08X\n",
+ args->cookie, args->arguments[0], args->arguments[1]));
+
+ mali_executor_lock();
+
+ /* Resume the job in question if it is still running */
+ job = mali_group_get_running_gp_job(gp_group);
+ if (NULL != job &&
+ args->cookie == mali_gp_job_get_id(job) &&
+ session == mali_gp_job_get_session(job)) {
+ /*
+ * Correct job is running, resume with new heap
+ */
+
+ mali_gp_job_set_oom_notification(job,
+ new_notification);
+
+ /* This will also re-enable interrupts */
+ mali_group_resume_gp_with_new_heap(gp_group,
+ args->cookie,
+ args->arguments[0],
+ args->arguments[1]);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_DEBUG_PRINT(2, ("Executor: Unable to resume gp job becasue gp time out or any other unexpected reason!\n"));
+
+ _mali_osk_notification_delete(new_notification);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ MALI_PRINT_ERROR(("Executor: Failed to allocate notification object. Will abort GP job.\n"));
+ }
+ } else {
+ MALI_DEBUG_PRINT(2, ("Executor: Aborting job %u, no new heap provided\n", args->cookie));
+ }
+
+ mali_executor_lock();
+
+ /* Abort the job in question if it is still running */
+ job = mali_group_get_running_gp_job(gp_group);
+ if (NULL != job &&
+ args->cookie == mali_gp_job_get_id(job) &&
+ session == mali_gp_job_get_session(job)) {
+ /* Correct job is still running */
+ struct mali_gp_job *job_done = NULL;
+
+ mali_executor_complete_group(gp_group, MALI_FALSE, &job_done, NULL);
+
+ /* The same job should have completed */
+ MALI_DEBUG_ASSERT(job_done == job);
+
+ /* GP job completed, make sure it is freed */
+ mali_scheduler_complete_gp_job(job_done, MALI_FALSE,
+ MALI_TRUE, MALI_TRUE);
+ }
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+/*
+ * ---------- Implementation of static functions ----------
+ */
+
+void mali_executor_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(mali_executor_lock_obj);
+ MALI_DEBUG_PRINT(5, ("Executor: lock taken\n"));
+}
+
+void mali_executor_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Executor: Releasing lock\n"));
+ _mali_osk_spinlock_irq_unlock(mali_executor_lock_obj);
+}
+
+static mali_bool mali_executor_is_suspended(void *data)
+{
+ mali_bool ret;
+
+ /* This callback does not use the data pointer. */
+ MALI_IGNORE(data);
+
+ mali_executor_lock();
+
+ ret = pause_count > 0 && !mali_executor_is_working();
+
+ mali_executor_unlock();
+
+ return ret;
+}
+
+static mali_bool mali_executor_is_working()
+{
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ return (0 != group_list_working_count ||
+ EXEC_STATE_WORKING == gp_group_state ||
+ EXEC_STATE_WORKING == virtual_group_state);
+}
+
+static void mali_executor_disable_empty_virtual(void)
+{
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_EMPTY);
+ MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_WORKING);
+
+ if (mali_group_is_empty(virtual_group)) {
+ virtual_group_state = EXEC_STATE_EMPTY;
+ }
+}
+
+static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group)
+{
+ mali_bool trigger_pm_update = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ /* Only rejoining after job has completed (still active) */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
+ mali_group_get_state(group));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_has_virtual_group());
+ MALI_DEBUG_ASSERT(MALI_FALSE == mali_group_is_virtual(group));
+
+ /* Make sure group and virtual group have same status */
+
+ if (MALI_GROUP_STATE_INACTIVE == mali_group_get_state(virtual_group)) {
+ if (mali_group_deactivate(group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ if (virtual_group_state == EXEC_STATE_EMPTY) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ }
+ } else if (MALI_GROUP_STATE_ACTIVATION_PENDING ==
+ mali_group_get_state(virtual_group)) {
+ /*
+ * Activation is pending for virtual group, leave
+ * this child group as active.
+ */
+ if (virtual_group_state == EXEC_STATE_EMPTY) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
+ mali_group_get_state(virtual_group));
+
+ if (virtual_group_state == EXEC_STATE_EMPTY) {
+ virtual_group_state = EXEC_STATE_IDLE;
+ }
+ }
+
+ /* Remove group from idle list */
+ MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group,
+ EXEC_STATE_IDLE));
+ _mali_osk_list_delinit(&group->executor_list);
+ group_list_idle_count--;
+
+ /*
+ * And finally rejoin the virtual group
+ * group will start working on same job as virtual_group,
+ * if virtual_group is working on a job
+ */
+ mali_group_add_group(virtual_group, group);
+
+ return trigger_pm_update;
+}
+
+static mali_bool mali_executor_has_virtual_group(void)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+ return (NULL != virtual_group) ? MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_FALSE;
+#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
+}
+
+static mali_bool mali_executor_virtual_group_is_usable(void)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return ((EXEC_STATE_INACTIVE == virtual_group_state ||
+ EXEC_STATE_IDLE == virtual_group_state) && (virtual_group->state != MALI_GROUP_STATE_ACTIVATION_PENDING)) ?
+ MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_FALSE;
+#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
+}
+
+static mali_bool mali_executor_tackle_gp_bound(void)
+{
+ struct mali_pp_job *job;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ job = mali_scheduler_job_pp_physical_peek();
+
+ if (NULL != job && MALI_TRUE == mali_is_mali400()) {
+ if (0 < group_list_working_count &&
+ mali_pp_job_is_large_and_unstarted(job)) {
+ return MALI_TRUE;
+ }
+ }
+
+ return MALI_FALSE;
+}
+
+static mali_bool mali_executor_schedule_is_early_out(mali_bool *gpu_secure_mode_is_needed)
+{
+ struct mali_pp_job *next_pp_job_to_start = NULL;
+ struct mali_group *group;
+ struct mali_group *tmp_group;
+ struct mali_pp_job *physical_pp_job_working = NULL;
+ struct mali_pp_job *virtual_pp_job_working = NULL;
+ mali_bool gpu_working_in_protected_mode = MALI_FALSE;
+ mali_bool gpu_working_in_non_protected_mode = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+ *gpu_secure_mode_is_needed = MALI_FALSE;
+
+ /* Check if the gpu secure mode is supported, exit if not.*/
+ if (MALI_FALSE == _mali_osk_gpu_secure_mode_is_supported()) {
+ return MALI_FALSE;
+ }
+
+ /* Check if need to set gpu secure mode for the next pp job,
+ * get the next pp job that will be scheduled if exist.
+ */
+ next_pp_job_to_start = mali_scheduler_job_pp_next();
+
+ /* Check current pp physical/virtual running job is protected job or not if exist.*/
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working,
+ struct mali_group, executor_list) {
+ physical_pp_job_working = group->pp_running_job;
+ break;
+ }
+
+ if (EXEC_STATE_WORKING == virtual_group_state) {
+ virtual_pp_job_working = virtual_group->pp_running_job;
+ }
+
+ if (NULL != physical_pp_job_working) {
+ if (MALI_TRUE == mali_pp_job_is_protected_job(physical_pp_job_working)) {
+ gpu_working_in_protected_mode = MALI_TRUE;
+ } else {
+ gpu_working_in_non_protected_mode = MALI_TRUE;
+ }
+ } else if (NULL != virtual_pp_job_working) {
+ if (MALI_TRUE == mali_pp_job_is_protected_job(virtual_pp_job_working)) {
+ gpu_working_in_protected_mode = MALI_TRUE;
+ } else {
+ gpu_working_in_non_protected_mode = MALI_TRUE;
+ }
+ } else if (EXEC_STATE_WORKING == gp_group_state) {
+ gpu_working_in_non_protected_mode = MALI_TRUE;
+ }
+
+ /* If the next pp job is the protected pp job.*/
+ if ((NULL != next_pp_job_to_start) && MALI_TRUE == mali_pp_job_is_protected_job(next_pp_job_to_start)) {
+ /* if gp is working or any non-protected pp job is working now, unable to schedule protected pp job. */
+ if (MALI_TRUE == gpu_working_in_non_protected_mode)
+ return MALI_TRUE;
+
+ *gpu_secure_mode_is_needed = MALI_TRUE;
+ return MALI_FALSE;
+
+ }
+
+ if (MALI_TRUE == gpu_working_in_protected_mode) {
+ /* Unable to schedule non-protected pp job/gp job if exist protected pp running jobs*/
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+/*
+ * This is where jobs are actually started.
+ */
+static void mali_executor_schedule(void)
+{
+ u32 i;
+ u32 num_physical_needed = 0;
+ u32 num_physical_to_process = 0;
+ mali_bool trigger_pm_update = MALI_FALSE;
+ mali_bool deactivate_idle_group = MALI_TRUE;
+ mali_bool gpu_secure_mode_is_needed = MALI_FALSE;
+ mali_bool is_gpu_secure_mode = MALI_FALSE;
+ /* Physical groups + jobs to start in this function */
+ struct mali_group *groups_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ struct mali_pp_job *jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ u32 sub_jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ int num_jobs_to_start = 0;
+
+ /* Virtual job to start in this function */
+ struct mali_pp_job *virtual_job_to_start = NULL;
+
+ /* GP job to start in this function */
+ struct mali_gp_job *gp_job_to_start = NULL;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (pause_count > 0) {
+ /* Execution is suspended, don't schedule any jobs. */
+ return;
+ }
+
+ /* Lock needed in order to safely handle the job queues */
+ mali_scheduler_lock();
+
+ /* 1. Check the schedule if need to early out. */
+ if (MALI_TRUE == mali_executor_schedule_is_early_out(&gpu_secure_mode_is_needed)) {
+ mali_scheduler_unlock();
+ return;
+ }
+
+ /* 2. Activate gp firstly if have gp job queued. */
+ if ((EXEC_STATE_INACTIVE == gp_group_state)
+ && (0 < mali_scheduler_job_gp_count())
+ && (gpu_secure_mode_is_needed == MALI_FALSE)) {
+
+ enum mali_group_state state =
+ mali_group_activate(gp_group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Set GP group state to idle */
+ gp_group_state = EXEC_STATE_IDLE;
+ } else {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+
+ /* 3. Prepare as many physical groups as needed/possible */
+
+ num_physical_needed = mali_scheduler_job_physical_head_count(gpu_secure_mode_is_needed);
+
+ /* On mali-450 platform, we don't need to enter in this block frequently. */
+ if (0 < num_physical_needed) {
+
+ if (num_physical_needed <= group_list_idle_count) {
+ /* We have enough groups on idle list already */
+ num_physical_to_process = num_physical_needed;
+ num_physical_needed = 0;
+ } else {
+ /* We need to get a hold of some more groups */
+ num_physical_to_process = group_list_idle_count;
+ num_physical_needed -= group_list_idle_count;
+ }
+
+ if (0 < num_physical_needed) {
+
+ /* 3.1. Activate groups which are inactive */
+
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive,
+ struct mali_group, executor_list) {
+ enum mali_group_state state =
+ mali_group_activate(group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Move from inactive to idle */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_idle,
+ &group_list_idle_count);
+ num_physical_to_process++;
+ } else {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ num_physical_needed--;
+ if (0 == num_physical_needed) {
+ /* We have activated all the groups we need */
+ break;
+ }
+ }
+ }
+
+ if (mali_executor_virtual_group_is_usable()) {
+
+ /*
+ * 3.2. And finally, steal and activate groups
+ * from virtual group if we need even more
+ */
+ while (0 < num_physical_needed) {
+ struct mali_group *group;
+
+ group = mali_group_acquire_group(virtual_group);
+ if (NULL != group) {
+ enum mali_group_state state;
+
+ mali_executor_disable_empty_virtual();
+
+ state = mali_group_activate(group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Group is ready, add to idle list */
+ _mali_osk_list_add(
+ &group->executor_list,
+ &group_list_idle);
+ group_list_idle_count++;
+ num_physical_to_process++;
+ } else {
+ /*
+ * Group is not ready yet,
+ * add to inactive list
+ */
+ _mali_osk_list_add(
+ &group->executor_list,
+ &group_list_inactive);
+ group_list_inactive_count++;
+
+ trigger_pm_update = MALI_TRUE;
+ }
+ num_physical_needed--;
+ } else {
+ /*
+ * We could not get enough groups
+ * from the virtual group.
+ */
+ break;
+ }
+ }
+ }
+
+ /* 3.3. Assign physical jobs to groups */
+
+ if (0 < num_physical_to_process) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle,
+ struct mali_group, executor_list) {
+ struct mali_pp_job *job = NULL;
+ u32 sub_job = MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+
+ MALI_DEBUG_ASSERT(num_jobs_to_start <
+ MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
+
+ MALI_DEBUG_ASSERT(0 <
+ mali_scheduler_job_physical_head_count(gpu_secure_mode_is_needed));
+
+ /* If the next pp job is non-protected, check if gp bound now. */
+ if ((MALI_FALSE == gpu_secure_mode_is_needed)
+ && (mali_executor_hint_is_enabled(MALI_EXECUTOR_HINT_GP_BOUND))
+ && (MALI_TRUE == mali_executor_tackle_gp_bound())) {
+ /*
+ * We're gp bound,
+ * don't start this right now.
+ */
+ deactivate_idle_group = MALI_FALSE;
+ num_physical_to_process = 0;
+ break;
+ }
+
+ job = mali_scheduler_job_pp_physical_get(
+ &sub_job);
+
+ if (MALI_FALSE == gpu_secure_mode_is_needed) {
+ MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_protected_job(job));
+ } else {
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_protected_job(job));
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(sub_job <= MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
+
+ /* Put job + group on list of jobs to start later on */
+
+ groups_to_start[num_jobs_to_start] = group;
+ jobs_to_start[num_jobs_to_start] = job;
+ sub_jobs_to_start[num_jobs_to_start] = sub_job;
+ num_jobs_to_start++;
+
+ /* Move group from idle to working */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_idle,
+ &group_list_idle_count,
+ &group_list_working,
+ &group_list_working_count);
+
+ num_physical_to_process--;
+ if (0 == num_physical_to_process) {
+ /* Got all we needed */
+ break;
+ }
+ }
+ }
+ }
+
+ /* 4. Deactivate idle pp group , must put deactive here before active vitual group
+ * for cover case first only has physical job in normal queue but group inactive,
+ * so delay the job start go to active group, when group activated,
+ * call scheduler again, but now if we get high queue virtual job,
+ * we will do nothing in schedule cause executor schedule stop
+ */
+
+ if (MALI_TRUE == mali_executor_deactivate_list_idle(deactivate_idle_group
+ && (!mali_timeline_has_physical_pp_job()))) {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ /* 5. Activate virtual group, if needed */
+ if (EXEC_STATE_INACTIVE == virtual_group_state &&
+ MALI_TRUE == mali_scheduler_job_next_is_virtual()) {
+ struct mali_pp_job *virtual_job = mali_scheduler_job_pp_virtual_peek();
+ if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == mali_pp_job_is_protected_job(virtual_job))
+ || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == mali_pp_job_is_protected_job(virtual_job))) {
+ enum mali_group_state state =
+ mali_group_activate(virtual_group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Set virtual group state to idle */
+ virtual_group_state = EXEC_STATE_IDLE;
+ } else {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+ }
+
+ /* 6. To power up group asap, trigger pm update only when no need to swith the gpu mode. */
+
+ is_gpu_secure_mode = _mali_osk_gpu_secure_mode_is_enabled();
+
+ if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == is_gpu_secure_mode)
+ || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == is_gpu_secure_mode)) {
+ if (MALI_TRUE == trigger_pm_update) {
+ trigger_pm_update = MALI_FALSE;
+ mali_pm_update_async();
+ }
+ }
+
+ /* 7. Assign jobs to idle virtual group (or deactivate if no job) */
+
+ if (EXEC_STATE_IDLE == virtual_group_state) {
+ if (MALI_TRUE == mali_scheduler_job_next_is_virtual()) {
+ struct mali_pp_job *virtual_job = mali_scheduler_job_pp_virtual_peek();
+ if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == mali_pp_job_is_protected_job(virtual_job))
+ || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == mali_pp_job_is_protected_job(virtual_job))) {
+ virtual_job_to_start =
+ mali_scheduler_job_pp_virtual_get();
+ virtual_group_state = EXEC_STATE_WORKING;
+ }
+ } else if (!mali_timeline_has_virtual_pp_job()) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+
+ if (mali_group_deactivate(virtual_group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+ }
+
+ /* 8. Assign job to idle GP group (or deactivate if no job) */
+
+ if (EXEC_STATE_IDLE == gp_group_state && MALI_FALSE == gpu_secure_mode_is_needed) {
+ if (0 < mali_scheduler_job_gp_count()) {
+ gp_job_to_start = mali_scheduler_job_gp_get();
+ gp_group_state = EXEC_STATE_WORKING;
+ } else if (!mali_timeline_has_gp_job()) {
+ gp_group_state = EXEC_STATE_INACTIVE;
+ if (mali_group_deactivate(gp_group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+ }
+
+ /* 9. We no longer need the schedule/queue lock */
+
+ mali_scheduler_unlock();
+
+ /* 10. start jobs */
+ if (NULL != virtual_job_to_start) {
+ MALI_DEBUG_ASSERT(!mali_group_pp_is_active(virtual_group));
+ mali_group_start_pp_job(virtual_group,
+ virtual_job_to_start, 0, is_gpu_secure_mode);
+ }
+
+ for (i = 0; i < num_jobs_to_start; i++) {
+ MALI_DEBUG_ASSERT(!mali_group_pp_is_active(
+ groups_to_start[i]));
+ mali_group_start_pp_job(groups_to_start[i],
+ jobs_to_start[i],
+ sub_jobs_to_start[i], is_gpu_secure_mode);
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(gp_group);
+
+ if (NULL != gp_job_to_start) {
+ MALI_DEBUG_ASSERT(!mali_group_gp_is_active(gp_group));
+ mali_group_start_gp_job(gp_group, gp_job_to_start, is_gpu_secure_mode);
+ }
+
+ /* 11. Trigger any pending PM updates */
+ if (MALI_TRUE == trigger_pm_update) {
+ mali_pm_update_async();
+ }
+}
+
+/* Handler for deferred schedule requests */
+static void mali_executor_wq_schedule(void *arg)
+{
+ MALI_IGNORE(arg);
+ mali_executor_lock();
+ mali_executor_schedule();
+ mali_executor_unlock();
+}
+
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job)
+{
+ _mali_uk_gp_job_suspended_s *jobres;
+ _mali_osk_notification_t *notification;
+
+ notification = mali_gp_job_get_oom_notification(job);
+
+ /*
+ * Remember the id we send to user space, so we have something to
+ * verify when we get a response
+ */
+ gp_returned_cookie = mali_gp_job_get_id(job);
+
+ jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ jobres->cookie = gp_returned_cookie;
+
+ mali_session_send_notification(mali_gp_job_get_session(job),
+ notification);
+}
+static struct mali_gp_job *mali_executor_complete_gp(struct mali_group *group,
+ mali_bool success)
+{
+ struct mali_gp_job *job;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ /* Extracts the needed HW status from core and reset */
+ job = mali_group_complete_gp(group, success);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Core is now ready to go into idle list */
+ gp_group_state = EXEC_STATE_IDLE;
+
+ /* This will potentially queue more GP and PP jobs */
+ mali_timeline_tracker_release(&job->tracker);
+
+ /* Signal PP job */
+ mali_gp_job_signal_pp_tracker(job, success);
+
+ return job;
+}
+
+static struct mali_pp_job *mali_executor_complete_pp(struct mali_group *group,
+ mali_bool success)
+{
+ struct mali_pp_job *job;
+ u32 sub_job;
+ mali_bool job_is_done;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ /* Extracts the needed HW status from core and reset */
+ job = mali_group_complete_pp(group, success, &sub_job);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Core is now ready to go into idle list */
+ if (mali_group_is_virtual(group)) {
+ virtual_group_state = EXEC_STATE_IDLE;
+ } else {
+ /* Move from working to idle state */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_working,
+ &group_list_working_count,
+ &group_list_idle,
+ &group_list_idle_count);
+ }
+
+ /* It is the executor module which owns the jobs themselves by now */
+ mali_pp_job_mark_sub_job_completed(job, success);
+ job_is_done = mali_pp_job_is_complete(job);
+
+ if (job_is_done) {
+ /* This will potentially queue more GP and PP jobs */
+ mali_timeline_tracker_release(&job->tracker);
+ }
+
+ return job;
+}
+
+static void mali_executor_complete_group(struct mali_group *group,
+ mali_bool success,
+ struct mali_gp_job **gp_job_done,
+ struct mali_pp_job **pp_job_done)
+{
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ struct mali_gp_job *gp_job = NULL;
+ struct mali_pp_job *pp_job = NULL;
+ mali_bool pp_job_is_done = MALI_TRUE;
+
+ if (NULL != gp_core) {
+ gp_job = mali_executor_complete_gp(group, success);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(pp_core);
+ MALI_IGNORE(pp_core);
+ pp_job = mali_executor_complete_pp(group, success);
+
+ pp_job_is_done = mali_pp_job_is_complete(pp_job);
+ }
+
+ if (pause_count > 0) {
+ /* Execution has been suspended */
+
+ if (!mali_executor_is_working()) {
+ /* Last job completed, wake up sleepers */
+ _mali_osk_wait_queue_wake_up(
+ executor_working_wait_queue);
+ }
+ } else if (MALI_TRUE == mali_group_disable_requested(group)) {
+ mali_executor_core_scale_in_group_complete(group);
+
+ mali_executor_schedule();
+ } else {
+ /* try to schedule new jobs */
+ mali_executor_schedule();
+ }
+
+ if (NULL != gp_job) {
+ MALI_DEBUG_ASSERT_POINTER(gp_job_done);
+ *gp_job_done = gp_job;
+ } else if (pp_job_is_done) {
+ MALI_DEBUG_ASSERT_POINTER(pp_job);
+ MALI_DEBUG_ASSERT_POINTER(pp_job_done);
+ *pp_job_done = pp_job;
+ }
+}
+
+static void mali_executor_change_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *old_list,
+ u32 *old_count,
+ _mali_osk_list_t *new_list,
+ u32 *new_count)
+{
+ /*
+ * It's a bit more complicated to change the state for the physical PP
+ * groups since their state is determined by the list they are on.
+ */
+#if defined(DEBUG)
+ mali_bool found = MALI_FALSE;
+ struct mali_group *group_iter;
+ struct mali_group *temp;
+ u32 old_counted = 0;
+ u32 new_counted = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(old_list);
+ MALI_DEBUG_ASSERT_POINTER(old_count);
+ MALI_DEBUG_ASSERT_POINTER(new_list);
+ MALI_DEBUG_ASSERT_POINTER(new_count);
+
+ /*
+ * Verify that group is present on old list,
+ * and that the count is correct
+ */
+
+ _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, old_list,
+ struct mali_group, executor_list) {
+ old_counted++;
+ if (group == group_iter) {
+ found = MALI_TRUE;
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, new_list,
+ struct mali_group, executor_list) {
+ new_counted++;
+ }
+
+ if (MALI_FALSE == found) {
+ if (old_list == &group_list_idle) {
+ MALI_DEBUG_PRINT(1, (" old Group list is idle,"));
+ } else if (old_list == &group_list_inactive) {
+ MALI_DEBUG_PRINT(1, (" old Group list is inactive,"));
+ } else if (old_list == &group_list_working) {
+ MALI_DEBUG_PRINT(1, (" old Group list is working,"));
+ } else if (old_list == &group_list_disabled) {
+ MALI_DEBUG_PRINT(1, (" old Group list is disable,"));
+ }
+
+ if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_WORKING)) {
+ MALI_DEBUG_PRINT(1, (" group in working \n"));
+ } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_INACTIVE)) {
+ MALI_DEBUG_PRINT(1, (" group in inactive \n"));
+ } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_IDLE)) {
+ MALI_DEBUG_PRINT(1, (" group in idle \n"));
+ } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)) {
+ MALI_DEBUG_PRINT(1, (" but group in disabled \n"));
+ }
+ }
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == found);
+ MALI_DEBUG_ASSERT(0 < (*old_count));
+ MALI_DEBUG_ASSERT((*old_count) == old_counted);
+ MALI_DEBUG_ASSERT((*new_count) == new_counted);
+#endif
+
+ _mali_osk_list_move(&group->executor_list, new_list);
+ (*old_count)--;
+ (*new_count)++;
+}
+
+static void mali_executor_set_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *new_list,
+ u32 *new_count)
+{
+ _mali_osk_list_add(&group->executor_list, new_list);
+ (*new_count)++;
+}
+
+static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
+ enum mali_executor_state_t state)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (gp_group == group) {
+ if (gp_group_state == state) {
+ return MALI_TRUE;
+ }
+ } else if (virtual_group == group || mali_group_is_in_virtual(group)) {
+ if (virtual_group_state == state) {
+ return MALI_TRUE;
+ }
+ } else {
+ /* Physical PP group */
+ struct mali_group *group_iter;
+ struct mali_group *temp;
+ _mali_osk_list_t *list;
+
+ if (EXEC_STATE_DISABLED == state) {
+ list = &group_list_disabled;
+ } else if (EXEC_STATE_INACTIVE == state) {
+ list = &group_list_inactive;
+ } else if (EXEC_STATE_IDLE == state) {
+ list = &group_list_idle;
+ } else {
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING == state);
+ list = &group_list_working;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, list,
+ struct mali_group, executor_list) {
+ if (group_iter == group) {
+ return MALI_TRUE;
+ }
+ }
+ }
+
+ /* group not in correct state */
+ return MALI_FALSE;
+}
+
+static void mali_executor_group_enable_internal(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
+
+ /* Put into inactive state (== "lowest" enabled state) */
+ if (group == gp_group) {
+ MALI_DEBUG_ASSERT(EXEC_STATE_DISABLED == gp_group_state);
+ gp_group_state = EXEC_STATE_INACTIVE;
+ } else {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_disabled,
+ &group_list_disabled_count,
+ &group_list_inactive,
+ &group_list_inactive_count);
+
+ ++num_physical_pp_cores_enabled;
+ MALI_DEBUG_PRINT(4, ("Enabling group id %d \n", group->pp_core->core_id));
+ }
+
+ if (MALI_GROUP_STATE_ACTIVE == mali_group_activate(group)) {
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_power_is_on(group));
+
+ /* Move from inactive to idle */
+ if (group == gp_group) {
+ gp_group_state = EXEC_STATE_IDLE;
+ } else {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_idle,
+ &group_list_idle_count);
+
+ if (mali_executor_has_virtual_group()) {
+ if (mali_executor_physical_rejoin_virtual(group)) {
+ mali_pm_update_async();
+ }
+ }
+ }
+ } else {
+ mali_pm_update_async();
+ }
+}
+
+static void mali_executor_group_disable_internal(struct mali_group *group)
+{
+ mali_bool working;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
+
+ working = mali_executor_group_is_in_state(group, EXEC_STATE_WORKING);
+ if (MALI_TRUE == working) {
+ /** Group to be disabled once it completes current work,
+ * when virtual group completes, also check child groups for this flag */
+ mali_group_set_disable_request(group, MALI_TRUE);
+ return;
+ }
+
+ /* Put into disabled state */
+ if (group == gp_group) {
+ /* GP group */
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
+ gp_group_state = EXEC_STATE_DISABLED;
+ } else {
+ if (mali_group_is_in_virtual(group)) {
+ /* A child group of virtual group. move the specific group from virtual group */
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
+
+ mali_executor_set_state_pp_physical(group,
+ &group_list_disabled,
+ &group_list_disabled_count);
+
+ mali_group_remove_group(virtual_group, group);
+ mali_executor_disable_empty_virtual();
+ } else {
+ mali_executor_change_group_status_disabled(group);
+ }
+
+ --num_physical_pp_cores_enabled;
+ MALI_DEBUG_PRINT(4, ("Disabling group id %d \n", group->pp_core->core_id));
+ }
+
+ if (MALI_GROUP_STATE_INACTIVE != group->state) {
+ if (MALI_TRUE == mali_group_deactivate(group)) {
+ mali_pm_update_async();
+ }
+ }
+}
+
+static void mali_executor_notify_core_change(u32 num_cores)
+{
+ mali_bool done = MALI_FALSE;
+
+ if (mali_is_mali450() || mali_is_mali470()) {
+ return;
+ }
+
+ /*
+ * This function gets a bit complicated because we can't hold the session lock while
+ * allocating notification objects.
+ */
+ while (!done) {
+ u32 i;
+ u32 num_sessions_alloc;
+ u32 num_sessions_with_lock;
+ u32 used_notification_objects = 0;
+ _mali_osk_notification_t **notobjs;
+
+ /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+ num_sessions_alloc = mali_session_get_count();
+ if (0 == num_sessions_alloc) {
+ /* No sessions to report to */
+ return;
+ }
+
+ notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+ if (NULL == notobjs) {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+ /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */
+ return;
+ }
+
+ for (i = 0; i < num_sessions_alloc; i++) {
+ notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
+ if (NULL != notobjs[i]) {
+ _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer;
+ data->number_of_enabled_cores = num_cores;
+ } else {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i));
+ }
+ }
+
+ mali_session_lock();
+
+ /* number of sessions will not change while we hold the lock */
+ num_sessions_with_lock = mali_session_get_count();
+
+ if (num_sessions_alloc >= num_sessions_with_lock) {
+ /* We have allocated enough notification objects for all the sessions atm */
+ struct mali_session_data *session, *tmp;
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+ if (NULL != notobjs[used_notification_objects]) {
+ mali_session_send_notification(session, notobjs[used_notification_objects]);
+ notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+ }
+ used_notification_objects++;
+ }
+ done = MALI_TRUE;
+ }
+
+ mali_session_unlock();
+
+ /* Delete any remaining/unused notification objects */
+ for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+ if (NULL != notobjs[used_notification_objects]) {
+ _mali_osk_notification_delete(notobjs[used_notification_objects]);
+ }
+ }
+
+ _mali_osk_free(notobjs);
+ }
+}
+
+static mali_bool mali_executor_core_scaling_is_done(void *data)
+{
+ u32 i;
+ u32 num_groups;
+ mali_bool ret = MALI_TRUE;
+
+ MALI_IGNORE(data);
+
+ mali_executor_lock();
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (NULL != group) {
+ if (MALI_TRUE == group->disable_requested && NULL != mali_group_get_pp_core(group)) {
+ ret = MALI_FALSE;
+ break;
+ }
+ }
+ }
+ mali_executor_unlock();
+
+ return ret;
+}
+
+static void mali_executor_wq_notify_core_change(void *arg)
+{
+ MALI_IGNORE(arg);
+
+ if (mali_is_mali450() || mali_is_mali470()) {
+ return;
+ }
+
+ _mali_osk_wait_queue_wait_event(executor_notify_core_change_wait_queue,
+ mali_executor_core_scaling_is_done, NULL);
+
+ mali_executor_notify_core_change(num_physical_pp_cores_enabled);
+}
+
+/**
+ * Clear all disable request from the _last_ core scaling behavior.
+ */
+static void mali_executor_core_scaling_reset(void)
+{
+ u32 i;
+ u32 num_groups;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (NULL != group) {
+ group->disable_requested = MALI_FALSE;
+ }
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ core_scaling_delay_up_mask[i] = 0;
+ }
+}
+
+static void mali_executor_core_scale(unsigned int target_core_nr)
+{
+ int current_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+ int target_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+ int i;
+
+ MALI_DEBUG_ASSERT(0 < target_core_nr);
+ MALI_DEBUG_ASSERT(num_physical_pp_cores_total >= target_core_nr);
+
+ mali_executor_lock();
+
+ if (target_core_nr < num_physical_pp_cores_enabled) {
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, num_physical_pp_cores_enabled - target_core_nr));
+ } else {
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - num_physical_pp_cores_enabled));
+ }
+
+ /* When a new core scaling request is comming, we should remove the un-doing
+ * part of the last core scaling request. It's safe because we have only one
+ * lock(executor lock) protection. */
+ mali_executor_core_scaling_reset();
+
+ mali_pm_get_best_power_cost_mask(num_physical_pp_cores_enabled, current_core_scaling_mask);
+ mali_pm_get_best_power_cost_mask(target_core_nr, target_core_scaling_mask);
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ target_core_scaling_mask[i] = target_core_scaling_mask[i] - current_core_scaling_mask[i];
+ MALI_DEBUG_PRINT(5, ("target_core_scaling_mask[%d] = %d\n", i, target_core_scaling_mask[i]));
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 > target_core_scaling_mask[i]) {
+ struct mali_pm_domain *domain;
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+ if (NULL != mali_group_get_pp_core(group) && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))
+ && (!mali_group_is_virtual(group))) {
+ mali_executor_group_disable_internal(group);
+ target_core_scaling_mask[i]++;
+ if ((0 == target_core_scaling_mask[i])) {
+ break;
+ }
+
+ }
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ /**
+ * Target_core_scaling_mask[i] is bigger than 0,
+ * means we need to enable some pp cores in
+ * this domain whose domain index is i.
+ */
+ if (0 < target_core_scaling_mask[i]) {
+ struct mali_pm_domain *domain;
+
+ if (num_physical_pp_cores_enabled >= target_core_nr) {
+ break;
+ }
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+ if (NULL != mali_group_get_pp_core(group) && mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)
+ && (!mali_group_is_virtual(group))) {
+ mali_executor_group_enable_internal(group);
+ target_core_scaling_mask[i]--;
+
+ if ((0 == target_core_scaling_mask[i]) || num_physical_pp_cores_enabled == target_core_nr) {
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Here, we may still have some pp cores not been enabled because of some
+ * pp cores need to be disabled are still in working state.
+ */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 < target_core_scaling_mask[i]) {
+ core_scaling_delay_up_mask[i] = target_core_scaling_mask[i];
+ }
+ }
+
+ mali_executor_schedule();
+ mali_executor_unlock();
+}
+
+static void mali_executor_core_scale_in_group_complete(struct mali_group *group)
+{
+ int num_pp_cores_disabled = 0;
+ int num_pp_cores_to_enable = 0;
+ int i;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_disable_requested(group));
+
+ /* Disable child group of virtual group */
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ if (MALI_TRUE == mali_group_disable_requested(child)) {
+ mali_group_set_disable_request(child, MALI_FALSE);
+ mali_executor_group_disable_internal(child);
+ num_pp_cores_disabled++;
+ }
+ }
+ mali_group_set_disable_request(group, MALI_FALSE);
+ } else {
+ mali_executor_group_disable_internal(group);
+ mali_group_set_disable_request(group, MALI_FALSE);
+ if (NULL != mali_group_get_pp_core(group)) {
+ num_pp_cores_disabled++;
+ }
+ }
+
+ num_pp_cores_to_enable = num_pp_cores_disabled;
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 < core_scaling_delay_up_mask[i]) {
+ struct mali_pm_domain *domain;
+
+ if (0 == num_pp_cores_to_enable) {
+ break;
+ }
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+ struct mali_group *disabled_group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(disabled_group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+ if (NULL != mali_group_get_pp_core(disabled_group) && mali_executor_group_is_in_state(disabled_group, EXEC_STATE_DISABLED)) {
+ mali_executor_group_enable_internal(disabled_group);
+ core_scaling_delay_up_mask[i]--;
+ num_pp_cores_to_enable--;
+
+ if ((0 == core_scaling_delay_up_mask[i]) || 0 == num_pp_cores_to_enable) {
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ _mali_osk_wait_queue_wake_up(executor_notify_core_change_wait_queue);
+}
+
+static void mali_executor_change_group_status_disabled(struct mali_group *group)
+{
+ /* Physical PP group */
+ mali_bool idle;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ idle = mali_executor_group_is_in_state(group, EXEC_STATE_IDLE);
+ if (MALI_TRUE == idle) {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_idle,
+ &group_list_idle_count,
+ &group_list_disabled,
+ &group_list_disabled_count);
+ } else {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_disabled,
+ &group_list_disabled_count);
+ }
+}
+
+static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group)
+{
+ mali_bool trigger_pm_update = MALI_FALSE;
+
+ if (group_list_idle_count > 0) {
+ if (mali_executor_has_virtual_group()) {
+
+ /* Rejoin virtual group on Mali-450 */
+
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+ &group_list_idle,
+ struct mali_group, executor_list) {
+ if (mali_executor_physical_rejoin_virtual(
+ group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+ } else if (deactivate_idle_group) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ /* Deactivate group on Mali-300/400 */
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+ &group_list_idle,
+ struct mali_group, executor_list) {
+ if (mali_group_deactivate(group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ /* Move from idle to inactive */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_idle,
+ &group_list_idle_count,
+ &group_list_inactive,
+ &group_list_inactive_count);
+ }
+ }
+ }
+
+ return trigger_pm_update;
+}
+
+void mali_executor_running_status_print(void)
+{
+ struct mali_group *group = NULL;
+ struct mali_group *temp = NULL;
+
+ MALI_PRINT(("GP running job: %p\n", gp_group->gp_running_job));
+ if ((gp_group->gp_core) && (gp_group->is_working)) {
+ mali_group_dump_status(gp_group);
+ }
+ MALI_PRINT(("Physical PP groups in WORKING state (count = %u):\n", group_list_working_count));
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) {
+ MALI_PRINT(("PP running job: %p, subjob %d \n", group->pp_running_job, group->pp_running_sub_job));
+ mali_group_dump_status(group);
+ }
+ MALI_PRINT(("Physical PP groups in INACTIVE state (count = %u):\n", group_list_inactive_count));
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+ MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
+ MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
+ }
+ MALI_PRINT(("Physical PP groups in IDLE state (count = %u):\n", group_list_idle_count));
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+ MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
+ MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
+ }
+ MALI_PRINT(("Physical PP groups in DISABLED state (count = %u):\n", group_list_disabled_count));
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+ MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
+ MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ MALI_PRINT(("Virtual group running job: %p\n", virtual_group->pp_running_job));
+ MALI_PRINT(("Virtual group status: %d\n", virtual_group_state));
+ MALI_PRINT(("Virtual group->status: %d\n", virtual_group->state));
+ MALI_PRINT(("\tSW power: %s\n", virtual_group->power_is_on ? "On" : "Off"));
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &virtual_group->group_list,
+ struct mali_group, group_list) {
+ int i = 0;
+ MALI_PRINT(("\tchild group(%s) running job: %p\n", group->pp_core->hw_core.description, group->pp_running_job));
+ MALI_PRINT(("\tchild group(%s)->status: %d\n", group->pp_core->hw_core.description, group->state));
+ MALI_PRINT(("\tchild group(%s) SW power: %s\n", group->pp_core->hw_core.description, group->power_is_on ? "On" : "Off"));
+ if (group->pm_domain) {
+ MALI_PRINT(("\tPower domain: id %u\n", mali_pm_domain_get_id(group->pm_domain)));
+ MALI_PRINT(("\tMask:0x%04x \n", mali_pm_domain_get_mask(group->pm_domain)));
+ MALI_PRINT(("\tUse-count:%u \n", mali_pm_domain_get_use_count(group->pm_domain)));
+ MALI_PRINT(("\tCurrent power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_current_mask()) ? "On" : "Off"));
+ MALI_PRINT(("\tWanted power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_wanted_mask()) ? "On" : "Off"));
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (NULL != group->l2_cache_core[i]) {
+ struct mali_pm_domain *domain;
+ domain = mali_l2_cache_get_pm_domain(group->l2_cache_core[i]);
+ MALI_PRINT(("\t L2(index %d) group SW power: %s\n", i, group->l2_cache_core[i]->power_is_on ? "On" : "Off"));
+ if (domain) {
+ MALI_PRINT(("\tL2 Power domain: id %u\n", mali_pm_domain_get_id(domain)));
+ MALI_PRINT(("\tL2 Mask:0x%04x \n", mali_pm_domain_get_mask(domain)));
+ MALI_PRINT(("\tL2 Use-count:%u \n", mali_pm_domain_get_use_count(domain)));
+ MALI_PRINT(("\tL2 Current power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_current_mask()) ? "On" : "Off"));
+ MALI_PRINT(("\tL2 Wanted power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_wanted_mask()) ? "On" : "Off"));
+ }
+ }
+ }
+ }
+ if (EXEC_STATE_WORKING == virtual_group_state) {
+ mali_group_dump_status(virtual_group);
+ }
+ }
+}
+
+void mali_executor_status_dump(void)
+{
+ mali_executor_lock();
+ mali_scheduler_lock();
+
+ /* print schedule queue status */
+ mali_scheduler_gp_pp_job_queue_print();
+
+ mali_scheduler_unlock();
+ mali_executor_unlock();
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_executor.h b/drivers/gpu/arm/utgard/common/mali_executor.h
new file mode 100644
index 000000000000..4224d6a6cdc4
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_executor.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2012, 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_EXECUTOR_H__
+#define __MALI_EXECUTOR_H__
+
+#include "mali_osk.h"
+#include "mali_scheduler_types.h"
+#include "mali_kernel_common.h"
+
+typedef enum {
+ MALI_EXECUTOR_HINT_GP_BOUND = 0
+#define MALI_EXECUTOR_HINT_MAX 1
+} mali_executor_hint;
+
+extern mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX];
+
+/* forward declare struct instead of using include */
+struct mali_session_data;
+struct mali_group;
+struct mali_pp_core;
+
+extern _mali_osk_spinlock_irq_t *mali_executor_lock_obj;
+
+#define MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj);
+
+_mali_osk_errcode_t mali_executor_initialize(void);
+void mali_executor_terminate(void);
+
+void mali_executor_populate(void);
+void mali_executor_depopulate(void);
+
+void mali_executor_suspend(void);
+void mali_executor_resume(void);
+
+u32 mali_executor_get_num_cores_total(void);
+u32 mali_executor_get_num_cores_enabled(void);
+struct mali_pp_core *mali_executor_get_virtual_pp(void);
+struct mali_group *mali_executor_get_virtual_group(void);
+
+void mali_executor_zap_all_active(struct mali_session_data *session);
+
+/**
+ * Schedule GP and PP according to bitmask.
+ *
+ * @param mask A scheduling bitmask.
+ * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not.
+ */
+void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule);
+
+_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group, mali_bool in_upper_half);
+_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, mali_bool in_upper_half);
+_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group, mali_bool in_upper_half);
+void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups);
+void mali_executor_group_power_down(struct mali_group *groups[], u32 num_groups);
+
+void mali_executor_abort_session(struct mali_session_data *session);
+
+void mali_executor_core_scaling_enable(void);
+void mali_executor_core_scaling_disable(void);
+mali_bool mali_executor_core_scaling_is_enabled(void);
+
+void mali_executor_group_enable(struct mali_group *group);
+void mali_executor_group_disable(struct mali_group *group);
+mali_bool mali_executor_group_is_disabled(struct mali_group *group);
+
+int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override);
+
+#if MALI_STATE_TRACKING
+u32 mali_executor_dump_state(char *buf, u32 size);
+#endif
+
+MALI_STATIC_INLINE void mali_executor_hint_enable(mali_executor_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+ mali_executor_hints[hint] = MALI_TRUE;
+}
+
+MALI_STATIC_INLINE void mali_executor_hint_disable(mali_executor_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+ mali_executor_hints[hint] = MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_executor_hint_is_enabled(mali_executor_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+ return mali_executor_hints[hint];
+}
+
+void mali_executor_running_status_print(void);
+void mali_executor_status_dump(void);
+void mali_executor_lock(void);
+void mali_executor_unlock(void);
+#endif /* __MALI_EXECUTOR_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_gp.c b/drivers/gpu/arm/utgard/common/mali_gp.c
new file mode 100644
index 000000000000..7d3d4aff7c3f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_gp.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+static struct mali_gp_core *mali_global_gp_core = NULL;
+
+/* Interrupt handlers */
+static void mali_gp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group)
+{
+ struct mali_gp_core *core = NULL;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_gp_core);
+ MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_gp_core));
+ if (NULL != core) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE)) {
+ _mali_osk_errcode_t ret;
+
+ ret = mali_gp_reset(core);
+
+ if (_MALI_OSK_ERR_OK == ret) {
+ ret = mali_group_add_gp_core(group, core);
+ if (_MALI_OSK_ERR_OK == ret) {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_gp,
+ group,
+ mali_gp_irq_probe_trigger,
+ mali_gp_irq_probe_ack,
+ core,
+ resource->description);
+ if (NULL != core->irq) {
+ MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core));
+ mali_global_gp_core = core;
+
+ return core;
+ } else {
+ MALI_PRINT_ERROR(("Mali GP: Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description));
+ }
+ mali_group_remove_gp_core(group);
+ } else {
+ MALI_PRINT_ERROR(("Mali GP: Failed to add core %s to group\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ } else {
+ MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_gp_delete(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+ mali_global_gp_core = NULL;
+ _mali_osk_free(core);
+}
+
+void mali_gp_stop_bus(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core)
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Send the stop bus command. */
+ mali_gp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < MALI_REG_POLL_COUNT_SLOW; i++) {
+ if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_SLOW == i) {
+ MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_hard_reset(struct mali_gp_core *core)
+{
+ const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_LIMIT;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+}
+
+void mali_gp_reset_async(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: Reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+}
+
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core)
+{
+ int i;
+ u32 rawstat = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ rawstat = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+ if (rawstat & MALI400GP_REG_VAL_IRQ_RESET_COMPLETED) {
+ break;
+ }
+ }
+
+ if (i == MALI_REG_POLL_COUNT_FAST) {
+ MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, rawstat: 0x%08x\n",
+ core->hw_core.description, rawstat));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core)
+{
+ mali_gp_reset_async(core);
+ return mali_gp_reset_wait(core);
+}
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
+{
+ u32 startcmd = 0;
+ u32 *frame_registers = mali_gp_job_get_frame_registers(job);
+ u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job);
+ u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job);
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ if (mali_gp_job_has_vs_job(job)) {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+ }
+
+ if (mali_gp_job_has_plbu_job(job)) {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+ }
+
+ MALI_DEBUG_ASSERT(0 != startcmd);
+
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));
+
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core.
+ *
+ * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+ * force core to assert the completion interrupt.
+ */
+#if !defined(PROFILING_SKIP_GP_JOBS)
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);
+#else
+ {
+ u32 bits = 0;
+
+ if (mali_gp_job_has_vs_job(job))
+ bits = MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST;
+ if (mali_gp_job_has_plbu_job(job))
+ bits |= MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+
+ mali_hw_core_register_write_relaxed(&core->hw_core,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, bits);
+ }
+#endif
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+}
+
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr)
+{
+ u32 irq_readout;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+
+ if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* re-enable interrupts */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, start_addr);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, end_addr);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Resuming job\n"));
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+ _mali_osk_write_mem_barrier();
+ }
+ /*
+ * else: core has been reset between PLBU_OUT_OF_MEM interrupt and this new heap response.
+ * A timeout or a page fault on Mali-200 PP core can cause this behaviour.
+ */
+}
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void)
+{
+ return mali_global_gp_core;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_gp_irq_probe_trigger(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+ if (MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR & irq_readout) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/* ------ local helper functions below --------- */
+#if MALI_STATE_TRACKING
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP: %s\n", core->hw_core.description);
+
+ return n;
+}
+#endif
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job)
+{
+ u32 val0 = 0;
+ u32 val1 = 0;
+ u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job);
+ u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job);
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ mali_gp_job_set_perf_counter_value0(job, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C0, val0);
+ _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C0, val0);
+#endif
+
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ mali_gp_job_set_perf_counter_value1(job, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C1, val1);
+ _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C1, val1);
+#endif
+ }
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_gp.h b/drivers/gpu/arm/utgard/common/mali_gp.h
new file mode 100644
index 000000000000..3156310f21c7
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_gp.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2011-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_H__
+#define __MALI_GP_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+#include "mali_hw_core.h"
+#include "regs/mali_gp_regs.h"
+
+struct mali_group;
+
+/**
+ * Definition of the GP core struct
+ * Used to track a GP core in the system.
+ */
+struct mali_gp_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_gp_initialize(void);
+void mali_gp_terminate(void);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group);
+void mali_gp_delete(struct mali_gp_core *core);
+
+void mali_gp_stop_bus(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core);
+void mali_gp_reset_async(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core);
+void mali_gp_hard_reset(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core);
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job);
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr);
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core);
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void);
+
+#if MALI_STATE_TRACKING
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size);
+#endif
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job);
+
+MALI_STATIC_INLINE const char *mali_gp_core_description(struct mali_gp_core *core)
+{
+ return core->hw_core.description;
+}
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_gp_get_interrupt_result(struct mali_gp_core *core)
+{
+ u32 stat_used = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT) &
+ MALIGP2_REG_VAL_IRQ_MASK_USED;
+
+ if (0 == stat_used) {
+ return MALI_INTERRUPT_RESULT_NONE;
+ } else if ((MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST |
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST) == stat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS;
+ } else if (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST == stat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS_VS;
+ } else if (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST == stat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS_PLBU;
+ } else if (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM & stat_used) {
+ return MALI_INTERRUPT_RESULT_OOM;
+ }
+
+ return MALI_INTERRUPT_RESULT_ERROR;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_get_rawstat(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_is_active(struct mali_gp_core *core)
+{
+ u32 status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+ return (status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, enum mali_interrupt_result exceptions)
+{
+ /* Enable all interrupts, except those specified in exceptions */
+ u32 value;
+
+ if (MALI_INTERRUPT_RESULT_SUCCESS_VS == exceptions) {
+ /* Enable all used except VS complete */
+ value = MALIGP2_REG_VAL_IRQ_MASK_USED &
+ ~MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST;
+ } else {
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_SUCCESS_PLBU ==
+ exceptions);
+ /* Enable all used except PLBU complete */
+ value = MALIGP2_REG_VAL_IRQ_MASK_USED &
+ ~MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+ }
+
+ mali_hw_core_register_write(&core->hw_core,
+ MALIGP2_REG_ADDR_MGMT_INT_MASK,
+ value);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_plbu_alloc_start_addr(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR);
+}
+
+#endif /* __MALI_GP_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_gp_job.c b/drivers/gpu/arm/utgard/common/mali_gp_job.c
new file mode 100644
index 000000000000..8b45179b01ed
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_gp_job.c
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_defer_bind.h"
+
+static u32 gp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 gp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+static void _mali_gp_del_varying_allocations(struct mali_gp_job *job);
+
+
+static int _mali_gp_add_varying_allocations(struct mali_session_data *session,
+ struct mali_gp_job *job,
+ u32 *alloc,
+ u32 num)
+{
+ int i = 0;
+ struct mali_gp_allocation_node *alloc_node;
+ mali_mem_allocation *mali_alloc = NULL;
+ struct mali_vma_node *mali_vma_node = NULL;
+
+ for (i = 0 ; i < num ; i++) {
+ MALI_DEBUG_ASSERT(alloc[i]);
+ alloc_node = _mali_osk_calloc(1, sizeof(struct mali_gp_allocation_node));
+ if (alloc_node) {
+ INIT_LIST_HEAD(&alloc_node->node);
+ /* find mali allocation structure by vaddress*/
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, alloc[i], 0);
+
+ if (likely(mali_vma_node)) {
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ MALI_DEBUG_ASSERT(alloc[i] == mali_vma_node->vm_node.start);
+ } else {
+ MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,can't find allocation %d by address =0x%x, num=%d\n", i, alloc[i], num));
+ _mali_osk_free(alloc_node);
+ goto fail;
+ }
+ alloc_node->alloc = mali_alloc;
+ /* add to gp job varying alloc list*/
+ list_move(&alloc_node->node, &job->varying_alloc);
+ } else
+ goto fail;
+ }
+
+ return 0;
+fail:
+ MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,failed to alloc memory!\n"));
+ _mali_gp_del_varying_allocations(job);
+ return -1;
+}
+
+
+static void _mali_gp_del_varying_allocations(struct mali_gp_job *job)
+{
+ struct mali_gp_allocation_node *alloc_node, *tmp_node;
+
+ list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) {
+ list_del(&alloc_node->node);
+ kfree(alloc_node);
+ }
+ INIT_LIST_HEAD(&job->varying_alloc);
+}
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker)
+{
+ struct mali_gp_job *job;
+ u32 perf_counter_flag;
+ u32 __user *memory_list = NULL;
+ struct mali_gp_allocation_node *alloc_node, *tmp_node;
+
+ job = _mali_osk_calloc(1, sizeof(struct mali_gp_job));
+ if (NULL != job) {
+ job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s));
+ if (NULL == job->finished_notification) {
+ goto fail3;
+ }
+
+ job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+ if (NULL == job->oom_notification) {
+ goto fail2;
+ }
+
+ if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) {
+ goto fail1;
+ }
+
+ perf_counter_flag = mali_gp_job_get_perf_counter_flag(job);
+
+ /* case when no counters came from user space
+ * so pass the debugfs / DS-5 provided global ones to the job object */
+ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+ (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
+ mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0());
+ mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1());
+ }
+
+ _mali_osk_list_init(&job->list);
+ job->session = session;
+ job->id = id;
+ job->heap_current_addr = job->uargs.frame_registers[4];
+ job->perf_counter_value0 = 0;
+ job->perf_counter_value1 = 0;
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+
+
+ INIT_LIST_HEAD(&job->varying_alloc);
+ INIT_LIST_HEAD(&job->vary_todo);
+ job->dmem = NULL;
+
+ if (job->uargs.deferred_mem_num > session->allocation_mgr.mali_allocation_num) {
+ MALI_PRINT_ERROR(("Mali GP job: The number of varying buffer to defer bind is invalid !\n"));
+ goto fail1;
+ }
+
+ /* add varying allocation list*/
+ if (job->uargs.deferred_mem_num > 0) {
+ /* copy varying list from user space*/
+ job->varying_list = _mali_osk_calloc(1, sizeof(u32) * job->uargs.deferred_mem_num);
+ if (!job->varying_list) {
+ MALI_PRINT_ERROR(("Mali GP job: allocate varying_list failed varying_alloc_num = %d !\n", job->uargs.deferred_mem_num));
+ goto fail1;
+ }
+
+ memory_list = (u32 __user *)(uintptr_t)job->uargs.deferred_mem_list;
+
+ if (0 != _mali_osk_copy_from_user(job->varying_list, memory_list, sizeof(u32) * job->uargs.deferred_mem_num)) {
+ MALI_PRINT_ERROR(("Mali GP job: Failed to copy varying list from user space!\n"));
+ goto fail;
+ }
+
+ if (unlikely(_mali_gp_add_varying_allocations(session, job, job->varying_list,
+ job->uargs.deferred_mem_num))) {
+ MALI_PRINT_ERROR(("Mali GP job: _mali_gp_add_varying_allocations failed!\n"));
+ goto fail;
+ }
+
+ /* do preparetion for each allocation */
+ list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) {
+ if (unlikely(_MALI_OSK_ERR_OK != mali_mem_defer_bind_allocation_prepare(alloc_node->alloc, &job->vary_todo, &job->required_varying_memsize))) {
+ MALI_PRINT_ERROR(("Mali GP job: mali_mem_defer_bind_allocation_prepare failed!\n"));
+ goto fail;
+ }
+ }
+
+ _mali_gp_del_varying_allocations(job);
+
+ /* bind varying here, to avoid memory latency issue. */
+ {
+ struct mali_defer_mem_block dmem_block;
+
+ INIT_LIST_HEAD(&dmem_block.free_pages);
+ atomic_set(&dmem_block.num_free_pages, 0);
+
+ if (mali_mem_prepare_mem_for_job(job, &dmem_block)) {
+ MALI_PRINT_ERROR(("Mali GP job: mali_mem_prepare_mem_for_job failed!\n"));
+ goto fail;
+ }
+ if (_MALI_OSK_ERR_OK != mali_mem_defer_bind(job, &dmem_block)) {
+ MALI_PRINT_ERROR(("gp job create, mali_mem_defer_bind failed! GP %x fail!", job));
+ goto fail;
+ }
+ }
+
+ if (job->uargs.varying_memsize > MALI_UK_BIG_VARYING_SIZE) {
+ job->big_job = 1;
+ }
+ }
+ job->pp_tracker = pp_tracker;
+ if (NULL != job->pp_tracker) {
+ /* Take a reference on PP job's tracker that will be released when the GP
+ job is done. */
+ mali_timeline_system_tracker_get(session->timeline_system, pp_tracker);
+ }
+
+ mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_GP, NULL, job);
+ mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));
+
+ return job;
+ } else {
+ MALI_PRINT_ERROR(("Mali GP job: _mali_osk_calloc failed!\n"));
+ return NULL;
+ }
+
+
+fail:
+ _mali_osk_free(job->varying_list);
+ /* Handle allocate fail here, free all varying node */
+ {
+ struct mali_backend_bind_list *bkn, *bkn_tmp;
+ list_for_each_entry_safe(bkn, bkn_tmp , &job->vary_todo, node) {
+ list_del(&bkn->node);
+ _mali_osk_free(bkn);
+ }
+ }
+fail1:
+ _mali_osk_notification_delete(job->oom_notification);
+fail2:
+ _mali_osk_notification_delete(job->finished_notification);
+fail3:
+ _mali_osk_free(job);
+ return NULL;
+}
+
+void mali_gp_job_delete(struct mali_gp_job *job)
+{
+ struct mali_backend_bind_list *bkn, *bkn_tmp;
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(NULL == job->pp_tracker);
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ _mali_osk_free(job->varying_list);
+
+ /* Handle allocate fail here, free all varying node */
+ list_for_each_entry_safe(bkn, bkn_tmp , &job->vary_todo, node) {
+ list_del(&bkn->node);
+ _mali_osk_free(bkn);
+ }
+
+ mali_mem_defer_dmem_free(job);
+
+ /* de-allocate the pre-allocated oom notifications */
+ if (NULL != job->oom_notification) {
+ _mali_osk_notification_delete(job->oom_notification);
+ job->oom_notification = NULL;
+ }
+ if (NULL != job->finished_notification) {
+ _mali_osk_notification_delete(job->finished_notification);
+ job->finished_notification = NULL;
+ }
+
+ _mali_osk_free(job);
+}
+
+void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list)
+{
+ struct mali_gp_job *iter;
+ struct mali_gp_job *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ /* Find position in list/queue where job should be added. */
+ _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list,
+ struct mali_gp_job, list) {
+
+ /* A span is used to handle job ID wrapping. */
+ bool job_is_after = (mali_gp_job_get_id(job) -
+ mali_gp_job_get_id(iter)) <
+ MALI_SCHEDULER_JOB_ID_SPAN;
+
+ if (job_is_after) {
+ break;
+ }
+ }
+
+ _mali_osk_list_add(&job->list, &iter->list);
+}
+
+u32 mali_gp_job_get_gp_counter_src0(void)
+{
+ return gp_counter_src0;
+}
+
+void mali_gp_job_set_gp_counter_src0(u32 counter)
+{
+ gp_counter_src0 = counter;
+}
+
+u32 mali_gp_job_get_gp_counter_src1(void)
+{
+ return gp_counter_src1;
+}
+
+void mali_gp_job_set_gp_counter_src1(u32 counter)
+{
+ gp_counter_src1 = counter;
+}
+
+mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (NULL != job->pp_tracker) {
+ schedule_mask |= mali_timeline_system_tracker_put(job->session->timeline_system, job->pp_tracker, MALI_FALSE == success);
+ job->pp_tracker = NULL;
+ }
+
+ return schedule_mask;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_gp_job.h b/drivers/gpu/arm/utgard/common/mali_gp_job.h
new file mode 100644
index 000000000000..b84333f9f810
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_gp_job.h
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_JOB_H__
+#define __MALI_GP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_timeline.h"
+#include "mali_scheduler_types.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
+#include "mali_timeline.h"
+
+struct mali_defer_mem;
+/**
+ * This structure represents a GP job
+ *
+ * The GP job object itself is not protected by any single lock,
+ * but relies on other locks instead (scheduler, executor and timeline lock).
+ * Think of the job object as moving between these sub systems through-out
+ * its lifetime. Different part of the GP job struct is used by different
+ * subsystems. Accessor functions ensure that correct lock is taken.
+ * Do NOT access any data members directly from outside this module!
+ */
+struct mali_gp_job {
+ /*
+ * These members are typically only set at creation,
+ * and only read later on.
+ * They do not require any lock protection.
+ */
+ _mali_uk_gp_start_job_s uargs; /**< Arguments from user space */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
+ u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
+ struct mali_timeline_tracker *pp_tracker; /**< Pointer to Timeline tracker for PP job that depends on this job. */
+ _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
+
+ /*
+ * These members are used by the scheduler,
+ * protected by scheduler lock
+ */
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+
+ /*
+ * These members are used by the executor and/or group,
+ * protected by executor lock
+ */
+ _mali_osk_notification_t *oom_notification; /**< Notification sent back to userspace on OOM */
+
+ /*
+ * Set by executor/group on job completion, read by scheduler when
+ * returning job to user. Hold executor lock when setting,
+ * no lock needed when reading
+ */
+ u32 heap_current_addr; /**< Holds the current HEAP address when the job has completed */
+ u32 perf_counter_value0; /**< Value of performance counter 0 (to be returned to user space) */
+ u32 perf_counter_value1; /**< Value of performance counter 1 (to be returned to user space) */
+ struct mali_defer_mem *dmem; /** < used for defer bind to store dmem info */
+ struct list_head varying_alloc; /**< hold the list of varying allocations */
+ u32 bind_flag; /** < flag for deferbind*/
+ u32 *varying_list; /**< varying memory list need to to defer bind*/
+ struct list_head vary_todo; /**< list of backend list need to do defer bind*/
+ u32 required_varying_memsize; /** < size of varying memory to reallocate*/
+ u32 big_job; /** < if the gp job have large varying output and may take long time*/
+};
+
+#define MALI_DEFER_BIND_MEMORY_PREPARED (0x1 << 0)
+#define MALI_DEFER_BIND_MEMORY_BINDED (0x1 << 2)
+
+struct mali_gp_allocation_node {
+ struct list_head node;
+ mali_mem_allocation *alloc;
+};
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker);
+void mali_gp_job_delete(struct mali_gp_job *job);
+
+u32 mali_gp_job_get_gp_counter_src0(void);
+void mali_gp_job_set_gp_counter_src0(u32 counter);
+u32 mali_gp_job_get_gp_counter_src1(void);
+void mali_gp_job_set_gp_counter_src1(u32 counter);
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_cache_order(struct mali_gp_job *job,
+ u32 cache_order)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ job->cache_order = cache_order;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_cache_order(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (NULL == job) ? 0 : job->cache_order;
+}
+
+MALI_STATIC_INLINE u64 mali_gp_job_get_user_id(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_pid(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_tid(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->tid;
+}
+
+MALI_STATIC_INLINE u32 *mali_gp_job_get_frame_registers(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->uargs.frame_registers[0] != job->uargs.frame_registers[1]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->uargs.frame_registers[2] != job->uargs.frame_registers[3]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->heap_current_addr;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ job->heap_current_addr = heap_addr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->perf_counter_value0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->perf_counter_value1;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src0(struct mali_gp_job *job, u32 src)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ job->uargs.perf_counter_src0 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src1(struct mali_gp_job *job, u32 src)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ job->uargs.perf_counter_src1 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ job->perf_counter_value0 = value;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ job->perf_counter_value1 = value;
+}
+
+void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list);
+
+MALI_STATIC_INLINE void mali_gp_job_list_move(struct mali_gp_job *job,
+ _mali_osk_list_t *list)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list));
+ _mali_osk_list_move(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_gp_job_list_remove(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ _mali_osk_list_delinit(&job->list);
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *
+mali_gp_job_get_finished_notification(struct mali_gp_job *job)
+{
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->finished_notification);
+
+ notification = job->finished_notification;
+ job->finished_notification = NULL;
+
+ return notification;
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *mali_gp_job_get_oom_notification(
+ struct mali_gp_job *job)
+{
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(job->oom_notification);
+
+ notification = job->oom_notification;
+ job->oom_notification = NULL;
+
+ return notification;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_oom_notification(
+ struct mali_gp_job *job,
+ _mali_osk_notification_t *notification)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(NULL == job->oom_notification);
+ job->oom_notification = notification;
+}
+
+MALI_STATIC_INLINE struct mali_timeline_tracker *mali_gp_job_get_tracker(
+ struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return &(job->tracker);
+}
+
+
+MALI_STATIC_INLINE u32 *mali_gp_job_get_timeline_point_ptr(
+ struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+}
+
+
+/**
+ * Release reference on tracker for PP job that depends on this GP job.
+ *
+ * @note If GP job has a reference on tracker, this function MUST be called before the GP job is
+ * deleted.
+ *
+ * @param job GP job that is done.
+ * @param success MALI_TRUE if job completed successfully, MALI_FALSE if not.
+ * @return A scheduling bitmask indicating whether scheduling needs to be done.
+ */
+mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success);
+
+#endif /* __MALI_GP_JOB_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_group.c b/drivers/gpu/arm/utgard/common/mali_group.c
new file mode 100644
index 000000000000..5c7b3f46c949
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_group.c
@@ -0,0 +1,1865 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_mmu.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_scheduler.h"
+#include "mali_osk_profiling.h"
+#include "mali_osk_mali.h"
+#include "mali_pm_domain.h"
+#include "mali_pm.h"
+#include "mali_executor.h"
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+#define MALI_MAX_NUM_DOMAIN_REFS (MALI_MAX_NUMBER_OF_GROUPS * 2)
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, };
+static u32 mali_global_num_groups = 0;
+
+/* SW timer for job execution */
+int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT;
+
+/* local helper functions */
+static void mali_group_bottom_half_mmu(void *data);
+static void mali_group_bottom_half_gp(void *data);
+static void mali_group_bottom_half_pp(void *data);
+static void mali_group_timeout(void *data);
+static void mali_group_reset_pp(struct mali_group *group);
+static void mali_group_reset_mmu(struct mali_group *group);
+
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session, mali_bool is_reload);
+static void mali_group_recovery_reset(struct mali_group *group);
+
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+ struct mali_dlbu_core *dlbu,
+ struct mali_bcast_unit *bcast,
+ u32 domain_index)
+{
+ struct mali_group *group = NULL;
+
+ if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) {
+ MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
+ return NULL;
+ }
+
+ group = _mali_osk_calloc(1, sizeof(struct mali_group));
+ if (NULL != group) {
+ group->timeout_timer = _mali_osk_timer_init();
+ if (NULL != group->timeout_timer) {
+ _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
+
+ group->l2_cache_core[0] = core;
+ _mali_osk_list_init(&group->group_list);
+ _mali_osk_list_init(&group->executor_list);
+ _mali_osk_list_init(&group->pm_domain_list);
+ group->bcast_core = bcast;
+ group->dlbu_core = dlbu;
+
+ /* register this object as a part of the correct power domain */
+ if ((NULL != core) || (NULL != dlbu) || (NULL != bcast))
+ group->pm_domain = mali_pm_register_group(domain_index, group);
+
+ mali_global_groups[mali_global_num_groups] = group;
+ mali_global_num_groups++;
+
+ return group;
+ }
+ _mali_osk_free(group);
+ }
+
+ return NULL;
+}
+
+void mali_group_delete(struct mali_group *group)
+{
+ u32 i;
+
+ MALI_DEBUG_PRINT(4, ("Deleting group %s\n",
+ mali_group_core_description(group)));
+
+ MALI_DEBUG_ASSERT(NULL == group->parent_group);
+ MALI_DEBUG_ASSERT((MALI_GROUP_STATE_INACTIVE == group->state) || ((MALI_GROUP_STATE_ACTIVATION_PENDING == group->state)));
+
+ /* Delete the resources that this group owns */
+ if (NULL != group->gp_core) {
+ mali_gp_delete(group->gp_core);
+ }
+
+ if (NULL != group->pp_core) {
+ mali_pp_delete(group->pp_core);
+ }
+
+ if (NULL != group->mmu) {
+ mali_mmu_delete(group->mmu);
+ }
+
+ if (mali_group_is_virtual(group)) {
+ /* Remove all groups from virtual group */
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ child->parent_group = NULL;
+ mali_group_delete(child);
+ }
+
+ mali_dlbu_delete(group->dlbu_core);
+
+ if (NULL != group->bcast_core) {
+ mali_bcast_unit_delete(group->bcast_core);
+ }
+ }
+
+ for (i = 0; i < mali_global_num_groups; i++) {
+ if (mali_global_groups[i] == group) {
+ mali_global_groups[i] = NULL;
+ mali_global_num_groups--;
+
+ if (i != mali_global_num_groups) {
+ /* We removed a group from the middle of the array -- move the last
+ * group to the current position to close the gap */
+ mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
+ mali_global_groups[mali_global_num_groups] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ if (NULL != group->timeout_timer) {
+ _mali_osk_timer_del(group->timeout_timer);
+ _mali_osk_timer_term(group->timeout_timer);
+ }
+
+ if (NULL != group->bottom_half_work_mmu) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+ }
+
+ if (NULL != group->bottom_half_work_gp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ }
+
+ if (NULL != group->bottom_half_work_pp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ }
+
+ _mali_osk_free(group);
+}
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core)
+{
+ /* This group object now owns the MMU core object */
+ group->mmu = mmu_core;
+ group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
+ if (NULL == group->bottom_half_work_mmu) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_mmu_core(struct mali_group *group)
+{
+ /* This group object no longer owns the MMU core object */
+ group->mmu = NULL;
+ if (NULL != group->bottom_half_work_mmu) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+ }
+}
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core)
+{
+ /* This group object now owns the GP core object */
+ group->gp_core = gp_core;
+ group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
+ if (NULL == group->bottom_half_work_gp) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_gp_core(struct mali_group *group)
+{
+ /* This group object no longer owns the GP core object */
+ group->gp_core = NULL;
+ if (NULL != group->bottom_half_work_gp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ }
+}
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core)
+{
+ /* This group object now owns the PP core object */
+ group->pp_core = pp_core;
+ group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
+ if (NULL == group->bottom_half_work_pp) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_pp_core(struct mali_group *group)
+{
+ /* This group object no longer owns the PP core object */
+ group->pp_core = NULL;
+ if (NULL != group->bottom_half_work_pp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ }
+}
+
+enum mali_group_state mali_group_activate(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(4, ("Group: Activating group %s\n",
+ mali_group_core_description(group)));
+
+ if (MALI_GROUP_STATE_INACTIVE == group->state) {
+ /* Group is inactive, get PM refs in order to power up */
+
+ /*
+ * We'll take a maximum of 2 power domain references pr group,
+ * one for the group itself, and one for it's L2 cache.
+ */
+ struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+ struct mali_group *groups[MALI_MAX_NUM_DOMAIN_REFS];
+ u32 num_domains = 0;
+ mali_bool all_groups_on;
+
+ /* Deal with child groups first */
+ if (mali_group_is_virtual(group)) {
+ /*
+ * The virtual group might have 0, 1 or 2 L2s in
+ * its l2_cache_core array, but we ignore these and
+ * let the child groups take the needed L2 cache ref
+ * on behalf of the virtual group.
+ * In other words; The L2 refs are taken in pair with
+ * the physical group which the L2 is attached to.
+ */
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ /*
+ * Child group is inactive, get PM
+ * refs in order to power up.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+ &group->group_list,
+ struct mali_group, group_list) {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE
+ == child->state);
+
+ child->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
+
+ MALI_DEBUG_ASSERT_POINTER(
+ child->pm_domain);
+ domains[num_domains] = child->pm_domain;
+ groups[num_domains] = child;
+ num_domains++;
+
+ /*
+ * Take L2 domain ref for child group.
+ */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS
+ > num_domains);
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ child->l2_cache_core[0]);
+ groups[num_domains] = NULL;
+ MALI_DEBUG_ASSERT(NULL ==
+ child->l2_cache_core[1]);
+ num_domains++;
+ }
+ } else {
+ /* Take L2 domain ref for physical groups. */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+ num_domains);
+
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ group->l2_cache_core[0]);
+ groups[num_domains] = NULL;
+ MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+ num_domains++;
+ }
+
+ /* Do the group itself last (it's dependencies first) */
+
+ group->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
+
+ MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+ domains[num_domains] = group->pm_domain;
+ groups[num_domains] = group;
+ num_domains++;
+
+ all_groups_on = mali_pm_get_domain_refs(domains, groups,
+ num_domains);
+
+ /*
+ * Complete activation for group, include
+ * virtual group or physical group.
+ */
+ if (MALI_TRUE == all_groups_on) {
+
+ mali_group_set_active(group);
+ }
+ } else if (MALI_GROUP_STATE_ACTIVE == group->state) {
+ /* Already active */
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ } else {
+ /*
+ * Activation already pending, group->power_is_on could
+ * be both true or false. We need to wait for power up
+ * notification anyway.
+ */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING
+ == group->state);
+ }
+
+ MALI_DEBUG_PRINT(4, ("Group: group %s activation result: %s\n",
+ mali_group_core_description(group),
+ MALI_GROUP_STATE_ACTIVE == group->state ?
+ "ACTIVE" : "PENDING"));
+
+ return group->state;
+}
+
+mali_bool mali_group_set_active(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING == group->state);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+
+ MALI_DEBUG_PRINT(4, ("Group: Activation completed for %s\n",
+ mali_group_core_description(group)));
+
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
+ struct mali_group, group_list) {
+ if (MALI_TRUE != child->power_is_on) {
+ return MALI_FALSE;
+ }
+
+ child->state = MALI_GROUP_STATE_ACTIVE;
+ }
+
+ mali_group_reset(group);
+ }
+
+ /* Go to ACTIVE state */
+ group->state = MALI_GROUP_STATE_ACTIVE;
+
+ return MALI_TRUE;
+}
+
+mali_bool mali_group_deactivate(struct mali_group *group)
+{
+ struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+ u32 num_domains = 0;
+ mali_bool power_down = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE != group->state);
+
+ MALI_DEBUG_PRINT(3, ("Group: Deactivating group %s\n",
+ mali_group_core_description(group)));
+
+ group->state = MALI_GROUP_STATE_INACTIVE;
+
+ MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+ domains[num_domains] = group->pm_domain;
+ num_domains++;
+
+ if (mali_group_is_virtual(group)) {
+ /* Release refs for all child groups */
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+ &group->group_list,
+ struct mali_group, group_list) {
+ child->state = MALI_GROUP_STATE_INACTIVE;
+
+ MALI_DEBUG_ASSERT_POINTER(child->pm_domain);
+ domains[num_domains] = child->pm_domain;
+ num_domains++;
+
+ /* Release L2 cache domain for child groups */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+ num_domains);
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ child->l2_cache_core[0]);
+ MALI_DEBUG_ASSERT(NULL == child->l2_cache_core[1]);
+ num_domains++;
+ }
+
+ /*
+ * Must do mali_group_power_down() steps right here for
+ * virtual group, because virtual group itself is likely to
+ * stay powered on, however child groups are now very likely
+ * to be powered off (and thus lose their state).
+ */
+
+ mali_group_clear_session(group);
+ /*
+ * Disable the broadcast unit (clear it's mask).
+ * This is needed in case the GPU isn't actually
+ * powered down at this point and groups are
+ * removed from an inactive virtual group.
+ * If not, then the broadcast unit will intercept
+ * their interrupts!
+ */
+ mali_bcast_disable(group->bcast_core);
+ } else {
+ /* Release L2 cache domain for physical groups */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+ num_domains);
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ group->l2_cache_core[0]);
+ MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+ num_domains++;
+ }
+
+ power_down = mali_pm_put_domain_refs(domains, num_domains);
+
+ return power_down;
+}
+
+void mali_group_power_up(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Power up for %s\n",
+ mali_group_core_description(group)));
+
+ group->power_is_on = MALI_TRUE;
+
+ if (MALI_FALSE == mali_group_is_virtual(group)
+ && MALI_FALSE == mali_group_is_in_virtual(group)) {
+ mali_group_reset(group);
+ }
+
+ /*
+ * When we just acquire only one physical group form virt group,
+ * we should remove the bcast&dlbu mask from virt group and
+ * reset bcast and dlbu core, although part of pp cores in virt
+ * group maybe not be powered on.
+ */
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ mali_bcast_reset(group->bcast_core);
+ mali_dlbu_update_mask(group->dlbu_core);
+ }
+}
+
+void mali_group_power_down(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Power down for %s\n",
+ mali_group_core_description(group)));
+
+ group->power_is_on = MALI_FALSE;
+
+ if (mali_group_is_virtual(group)) {
+ /*
+ * What we do for physical jobs in this function should
+ * already have been done in mali_group_deactivate()
+ * for virtual group.
+ */
+ MALI_DEBUG_ASSERT(NULL == group->session);
+ } else {
+ mali_group_clear_session(group);
+ }
+}
+
+MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
+{
+ u32 i;
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\n",
+ mali_group_core_description(vgroup),
+ vgroup));
+ MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
+ MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
+
+ i = 0;
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
+ MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\n",
+ i, mali_group_core_description(group),
+ group, group->l2_cache_core[0]));
+ i++;
+ }
+})
+
+static void mali_group_dump_core_status(struct mali_group *group)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(NULL != group->gp_core || (NULL != group->pp_core && !mali_group_is_virtual(group)));
+
+ if (NULL != group->gp_core) {
+ MALI_PRINT(("Dump Group %s\n", group->gp_core->hw_core.description));
+
+ for (i = 0; i < 0xA8; i += 0x10) {
+ MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->gp_core->hw_core, i),
+ mali_hw_core_register_read(&group->gp_core->hw_core, i + 4),
+ mali_hw_core_register_read(&group->gp_core->hw_core, i + 8),
+ mali_hw_core_register_read(&group->gp_core->hw_core, i + 12)));
+ }
+
+
+ } else {
+ MALI_PRINT(("Dump Group %s\n", group->pp_core->hw_core.description));
+
+ for (i = 0; i < 0x5c; i += 0x10) {
+ MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i),
+ mali_hw_core_register_read(&group->pp_core->hw_core, i + 4),
+ mali_hw_core_register_read(&group->pp_core->hw_core, i + 8),
+ mali_hw_core_register_read(&group->pp_core->hw_core, i + 12)));
+ }
+
+ /* Ignore some minor registers */
+ for (i = 0x1000; i < 0x1068; i += 0x10) {
+ MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i),
+ mali_hw_core_register_read(&group->pp_core->hw_core, i + 4),
+ mali_hw_core_register_read(&group->pp_core->hw_core, i + 8),
+ mali_hw_core_register_read(&group->pp_core->hw_core, i + 12)));
+ }
+ }
+
+ MALI_PRINT(("Dump Group MMU\n"));
+ for (i = 0; i < 0x24; i += 0x10) {
+ MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->mmu->hw_core, i),
+ mali_hw_core_register_read(&group->mmu->hw_core, i + 4),
+ mali_hw_core_register_read(&group->mmu->hw_core, i + 8),
+ mali_hw_core_register_read(&group->mmu->hw_core, i + 12)));
+ }
+}
+
+
+/**
+ * @Dump group status
+ */
+void mali_group_dump_status(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *group_c;
+ struct mali_group *temp;
+ _MALI_OSK_LIST_FOREACHENTRY(group_c, temp, &group->group_list, struct mali_group, group_list) {
+ mali_group_dump_core_status(group_c);
+ }
+ } else {
+ mali_group_dump_core_status(group);
+ }
+}
+
+/**
+ * @brief Add child group to virtual group parent
+ */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child)
+{
+ mali_bool found;
+ u32 i;
+
+ MALI_DEBUG_PRINT(3, ("Adding group %s to virtual group %s\n",
+ mali_group_core_description(child),
+ mali_group_core_description(parent)));
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+ MALI_DEBUG_ASSERT(NULL == child->parent_group);
+
+ _mali_osk_list_addtail(&child->group_list, &parent->group_list);
+
+ child->parent_group = parent;
+
+ MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
+
+ MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
+ MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
+
+ /* Keep track of the L2 cache cores of child groups */
+ found = MALI_FALSE;
+ for (i = 0; i < 2; i++) {
+ if (parent->l2_cache_core[i] == child->l2_cache_core[0]) {
+ MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
+ parent->l2_cache_core_ref_count[i]++;
+ found = MALI_TRUE;
+ }
+ }
+
+ if (!found) {
+ /* First time we see this L2 cache, add it to our list */
+ i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
+
+ MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
+
+ MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
+
+ parent->l2_cache_core[i] = child->l2_cache_core[0];
+ parent->l2_cache_core_ref_count[i]++;
+ }
+
+ /* Update Broadcast Unit and DLBU */
+ mali_bcast_add_group(parent->bcast_core, child);
+ mali_dlbu_add_group(parent->dlbu_core, child);
+
+ if (MALI_TRUE == parent->power_is_on) {
+ mali_bcast_reset(parent->bcast_core);
+ mali_dlbu_update_mask(parent->dlbu_core);
+ }
+
+ if (MALI_TRUE == child->power_is_on) {
+ if (NULL == parent->session) {
+ if (NULL != child->session) {
+ /*
+ * Parent has no session, so clear
+ * child session as well.
+ */
+ mali_mmu_activate_empty_page_directory(child->mmu);
+ }
+ } else {
+ if (parent->session == child->session) {
+ /* We already have same session as parent,
+ * so a simple zap should be enough.
+ */
+ mali_mmu_zap_tlb(child->mmu);
+ } else {
+ /*
+ * Parent has a different session, so we must
+ * switch to that sessions page table
+ */
+ mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
+ }
+
+ /* It is the parent which keeps the session from now on */
+ child->session = NULL;
+ }
+ } else {
+ /* should have been cleared when child was powered down */
+ MALI_DEBUG_ASSERT(NULL == child->session);
+ }
+
+ /* Start job on child when parent is active */
+ if (NULL != parent->pp_running_job) {
+ struct mali_pp_job *job = parent->pp_running_job;
+
+ MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
+ child, mali_pp_job_get_id(job), parent));
+
+ /* Only allowed to add active child to an active parent */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == parent->state);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == child->state);
+
+ mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(
+ mali_pp_core_description(group->pp_core),
+ sched_clock(), mali_pp_job_get_tid(job),
+ 0, mali_pp_job_get_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+ }
+
+ MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
+}
+
+/**
+ * @brief Remove child group from virtual group parent
+ */
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
+{
+ u32 i;
+
+ MALI_DEBUG_PRINT(3, ("Removing group %s from virtual group %s\n",
+ mali_group_core_description(child),
+ mali_group_core_description(parent)));
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+ MALI_DEBUG_ASSERT(parent == child->parent_group);
+
+ /* Update Broadcast Unit and DLBU */
+ mali_bcast_remove_group(parent->bcast_core, child);
+ mali_dlbu_remove_group(parent->dlbu_core, child);
+
+ if (MALI_TRUE == parent->power_is_on) {
+ mali_bcast_reset(parent->bcast_core);
+ mali_dlbu_update_mask(parent->dlbu_core);
+ }
+
+ child->session = parent->session;
+ child->parent_group = NULL;
+
+ _mali_osk_list_delinit(&child->group_list);
+ if (_mali_osk_list_empty(&parent->group_list)) {
+ parent->session = NULL;
+ }
+
+ /* Keep track of the L2 cache cores of child groups */
+ i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
+
+ MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
+
+ parent->l2_cache_core_ref_count[i]--;
+ if (parent->l2_cache_core_ref_count[i] == 0) {
+ parent->l2_cache_core[i] = NULL;
+ }
+
+ MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+}
+
+struct mali_group *mali_group_acquire_group(struct mali_group *parent)
+{
+ struct mali_group *child = NULL;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+
+ if (!_mali_osk_list_empty(&parent->group_list)) {
+ child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+ mali_group_remove_group(parent, child);
+ }
+
+ if (NULL != child) {
+ if (MALI_GROUP_STATE_ACTIVE != parent->state
+ && MALI_TRUE == child->power_is_on) {
+ mali_group_reset(child);
+ }
+ }
+
+ return child;
+}
+
+void mali_group_reset(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
+ MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
+
+ MALI_DEBUG_PRINT(3, ("Group: reset of %s\n",
+ mali_group_core_description(group)));
+
+ if (NULL != group->dlbu_core) {
+ mali_dlbu_reset(group->dlbu_core);
+ }
+
+ if (NULL != group->bcast_core) {
+ mali_bcast_reset(group->bcast_core);
+ }
+
+ MALI_DEBUG_ASSERT(NULL != group->mmu);
+ mali_group_reset_mmu(group);
+
+ if (NULL != group->gp_core) {
+ MALI_DEBUG_ASSERT(NULL == group->pp_core);
+ mali_gp_reset(group->gp_core);
+ } else {
+ MALI_DEBUG_ASSERT(NULL != group->pp_core);
+ mali_group_reset_pp(group);
+ }
+}
+
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job, mali_bool gpu_secure_mode_pre_enabled)
+{
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Starting GP job 0x%08X on group %s\n",
+ job,
+ mali_group_core_description(group)));
+
+ session = mali_gp_job_get_session(job);
+
+ MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
+
+ /* Reset GPU and disable gpu secure mode if needed. */
+ if (MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) {
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ _mali_osk_gpu_reset_and_secure_mode_disable();
+ /* Need to disable the pmu interrupt mask register */
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ }
+ }
+
+ /* Reload mmu page table if needed */
+ if (MALI_TRUE == gpu_secure_mode_pre_enabled) {
+ mali_group_reset(group);
+ mali_group_activate_page_directory(group, session, MALI_TRUE);
+ } else {
+ mali_group_activate_page_directory(group, session, MALI_FALSE);
+ }
+
+ mali_gp_job_start(group->gp_core, job);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(job), 1 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, 0);
+ }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_gp_core_description(group->gp_core),
+ sched_clock(), mali_gp_job_get_tid(job),
+ 0, mali_gp_job_get_id(job));
+#endif
+
+ group->gp_running_job = job;
+ group->is_working = MALI_TRUE;
+
+ /* Setup SW timer and record start time */
+ group->start_time = _mali_osk_time_tickcount();
+ _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+
+ MALI_DEBUG_PRINT(4, ("Group: Started GP job 0x%08X on group %s at %u\n",
+ job,
+ mali_group_core_description(group),
+ group->start_time));
+}
+
+/* Used to set all the registers except frame renderer list address and fragment shader stack address
+ * It means the caller must set these two registers properly before calling this function
+ */
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool gpu_secure_mode_pre_enabled)
+{
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Starting PP job 0x%08X part %u/%u on group %s\n",
+ job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job),
+ mali_group_core_description(group)));
+
+ session = mali_pp_job_get_session(job);
+
+ if (NULL != group->l2_cache_core[0]) {
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job));
+ }
+
+ if (NULL != group->l2_cache_core[1]) {
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job));
+ }
+
+ /* Reset GPU and change gpu secure mode if needed. */
+ if (MALI_TRUE == mali_pp_job_is_protected_job(job) && MALI_FALSE == _mali_osk_gpu_secure_mode_is_enabled()) {
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ _mali_osk_gpu_reset_and_secure_mode_enable();
+ /* Need to disable the pmu interrupt mask register */
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ }
+ } else if (MALI_FALSE == mali_pp_job_is_protected_job(job) && MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) {
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ _mali_osk_gpu_reset_and_secure_mode_disable();
+ /* Need to disable the pmu interrupt mask register */
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ }
+ }
+
+ /* Reload the mmu page table if needed */
+ if ((MALI_TRUE == mali_pp_job_is_protected_job(job) && MALI_FALSE == gpu_secure_mode_pre_enabled)
+ || (MALI_FALSE == mali_pp_job_is_protected_job(job) && MALI_TRUE == gpu_secure_mode_pre_enabled)) {
+ mali_group_reset(group);
+ mali_group_activate_page_directory(group, session, MALI_TRUE);
+ } else {
+ mali_group_activate_page_directory(group, session, MALI_FALSE);
+ }
+
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+ u32 core_num = 0;
+
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+
+ /* Configure DLBU for the job */
+ mali_dlbu_config_job(group->dlbu_core, job);
+
+ /* Write stack address for each child group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_write_addr_stack(child->pp_core, job);
+ core_num++;
+ }
+
+ mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
+ } else {
+ mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
+ }
+
+ /* if the group is virtual, loop through physical groups which belong to this group
+ * and call profiling events for its cores as virtual */
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+ }
+
+#if defined(CONFIG_MALI400_PROFILING)
+ if (0 != group->l2_cache_core_ref_count[0]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+ }
+ if (0 != group->l2_cache_core_ref_count[1]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+ }
+ }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+ } else { /* group is physical - call profiling events for physical cores */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+ }
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_pp_core_description(group->pp_core),
+ sched_clock(), mali_pp_job_get_tid(job),
+ 0, mali_pp_job_get_id(job));
+#endif
+
+ group->pp_running_job = job;
+ group->pp_running_sub_job = sub_job;
+ group->is_working = MALI_TRUE;
+
+ /* Setup SW timer and record start time */
+ group->start_time = _mali_osk_time_tickcount();
+ _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+
+ MALI_DEBUG_PRINT(4, ("Group: Started PP job 0x%08X part %u/%u on group %s at %u\n",
+ job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job),
+ mali_group_core_description(group),
+ group->start_time));
+
+}
+
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
+{
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+ mali_l2_cache_invalidate(group->l2_cache_core[0]);
+
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+
+ mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ 0, 0, 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
+}
+
+static void mali_group_reset_mmu(struct mali_group *group)
+{
+ struct mali_group *child;
+ struct mali_group *temp;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (!mali_group_is_virtual(group)) {
+ /* This is a physical group or an idle virtual group -- simply wait for
+ * the reset to complete. */
+ err = mali_mmu_reset(group->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ } else { /* virtual group */
+ /* Loop through all members of this virtual group and wait
+ * until they are done resetting.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ err = mali_mmu_reset(child->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ }
+ }
+}
+
+static void mali_group_reset_pp(struct mali_group *group)
+{
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ mali_pp_reset_async(group->pp_core);
+
+ if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
+ /* This is a physical group or an idle virtual group -- simply wait for
+ * the reset to complete. */
+ mali_pp_reset_wait(group->pp_core);
+ } else {
+ /* Loop through all members of this virtual group and wait until they
+ * are done resetting.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_reset_wait(child->pp_core);
+ }
+ }
+}
+
+struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job)
+{
+ struct mali_pp_job *pp_job_to_return;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+ MALI_DEBUG_ASSERT_POINTER(sub_job);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+
+ /* Stop/clear the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL != group->pp_running_job) {
+
+ /* Deal with HW counters and profiling */
+
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ /* update performance counters from each physical pp core within this virtual group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
+ }
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* send profiling data per physical core */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+ 0, 0);
+
+ trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+ 0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(group->pp_running_job),
+ mali_pp_job_get_flush_id(group->pp_running_job));
+ }
+ if (0 != group->l2_cache_core_ref_count[0]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+ }
+ if (0 != group->l2_cache_core_ref_count[1]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+ }
+ }
+
+#endif
+ } else {
+ /* update performance counters for a physical group's pp core */
+ mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+ 0, 0);
+
+ trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+ 0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+ mali_pp_job_get_frame_builder_id(group->pp_running_job),
+ mali_pp_job_get_flush_id(group->pp_running_job));
+
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+#endif
+ }
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(
+ mali_gp_core_description(group->gp_core),
+ sched_clock(), 0, 0, 0);
+#endif
+
+ }
+
+ if (success) {
+ /* Only do soft reset for successful jobs, a full recovery
+ * reset will be done for failed jobs. */
+ mali_pp_reset_async(group->pp_core);
+ }
+
+ pp_job_to_return = group->pp_running_job;
+ group->pp_running_job = NULL;
+ group->is_working = MALI_FALSE;
+ *sub_job = group->pp_running_sub_job;
+
+ if (!success) {
+ MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+ mali_group_recovery_reset(group);
+ } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) {
+ MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+ mali_group_recovery_reset(group);
+ }
+
+ return pp_job_to_return;
+}
+
+struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success)
+{
+ struct mali_gp_job *gp_job_to_return;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+
+ /* Stop/clear the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL != group->gp_running_job) {
+ mali_gp_update_performance_counters(group->gp_core, group->gp_running_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+ mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+ mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+ 0, 0);
+
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ mali_group_report_l2_cache_counters_per_core(group, 0);
+#endif
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(
+ mali_pp_core_description(group->pp_core),
+ sched_clock(), 0, 0, 0);
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
+
+ mali_gp_job_set_current_heap_addr(group->gp_running_job,
+ mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+ }
+
+ if (success) {
+ /* Only do soft reset for successful jobs, a full recovery
+ * reset will be done for failed jobs. */
+ mali_gp_reset_async(group->gp_core);
+ }
+
+ gp_job_to_return = group->gp_running_job;
+ group->gp_running_job = NULL;
+ group->is_working = MALI_FALSE;
+
+ if (!success) {
+ MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+ mali_group_recovery_reset(group);
+ } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) {
+ MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+ mali_group_recovery_reset(group);
+ }
+
+ return gp_job_to_return;
+}
+
+struct mali_group *mali_group_get_glob_group(u32 index)
+{
+ if (mali_global_num_groups > index) {
+ return mali_global_groups[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_group_get_glob_num_groups(void)
+{
+ return mali_global_num_groups;
+}
+
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session, mali_bool is_reload)
+{
+ MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n",
+ mali_session_get_page_directory(session), session,
+ mali_group_core_description(group)));
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (group->session != session || MALI_TRUE == is_reload) {
+ /* Different session than last time, so we need to do some work */
+ MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group %s\n",
+ session, group->session,
+ mali_group_core_description(group)));
+ mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
+ group->session = session;
+ } else {
+ /* Same session as last time, so no work required */
+ MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group %s\n",
+ session->page_directory,
+ mali_group_core_description(group)));
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+ }
+}
+
+static void mali_group_recovery_reset(struct mali_group *group)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ /* Stop cores, bus stop */
+ if (NULL != group->pp_core) {
+ mali_pp_stop_bus(group->pp_core);
+ } else {
+ mali_gp_stop_bus(group->gp_core);
+ }
+
+ /* Flush MMU and clear page fault (if any) */
+ mali_mmu_activate_fault_flush_page_directory(group->mmu);
+ mali_mmu_page_fault_done(group->mmu);
+
+ /* Wait for cores to stop bus, then do a hard reset on them */
+ if (NULL != group->pp_core) {
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child, *temp;
+
+ /* Disable the broadcast unit while we do reset directly on the member cores. */
+ mali_bcast_disable(group->bcast_core);
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_stop_bus_wait(child->pp_core);
+ mali_pp_hard_reset(child->pp_core);
+ }
+
+ mali_bcast_enable(group->bcast_core);
+ } else {
+ mali_pp_stop_bus_wait(group->pp_core);
+ mali_pp_hard_reset(group->pp_core);
+ }
+ } else {
+ mali_gp_stop_bus_wait(group->gp_core);
+ mali_gp_hard_reset(group->gp_core);
+ }
+
+ /* Reset MMU */
+ err = mali_mmu_reset(group->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ MALI_IGNORE(err);
+
+ group->session = NULL;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
+{
+ int n = 0;
+ int i;
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ if (mali_group_is_virtual(group)) {
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP Group: %p\n", group);
+ } else if (mali_group_is_in_virtual(group)) {
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Child PP Group: %p\n", group);
+ } else if (NULL != group->pp_core) {
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP Group: %p\n", group);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP Group: %p\n", group);
+ }
+
+ switch (group->state) {
+ case MALI_GROUP_STATE_INACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: INACTIVE\n");
+ break;
+ case MALI_GROUP_STATE_ACTIVATION_PENDING:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: ACTIVATION_PENDING\n");
+ break;
+ case MALI_GROUP_STATE_ACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: MALI_GROUP_STATE_ACTIVE\n");
+ break;
+ default:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: UNKNOWN (%d)\n", group->state);
+ MALI_DEBUG_ASSERT(0);
+ break;
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tSW power: %s\n",
+ group->power_is_on ? "On" : "Off");
+
+ n += mali_pm_dump_state_domain(group->pm_domain, buf + n, size - n);
+
+ for (i = 0; i < 2; i++) {
+ if (NULL != group->l2_cache_core[i]) {
+ struct mali_pm_domain *domain;
+ domain = mali_l2_cache_get_pm_domain(
+ group->l2_cache_core[i]);
+ n += mali_pm_dump_state_domain(domain,
+ buf + n, size - n);
+ }
+ }
+
+ if (group->gp_core) {
+ n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tGP running job: %p\n", group->gp_running_job);
+ }
+
+ if (group->pp_core) {
+ n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tPP running job: %p, subjob %d \n",
+ group->pp_running_job,
+ group->pp_running_sub_job);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
+ struct mali_group, group_list) {
+ n += mali_group_dump_state(child, buf + n, size - n);
+ }
+
+ return n;
+}
+#endif
+
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ _mali_osk_errcode_t ret;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ }
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_unlock();
+#endif
+#endif
+
+ ret = mali_executor_interrupt_mmu(group, MALI_TRUE);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_lock();
+ if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) {
+ /* group complete and on job shedule on it, it already power off */
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ 0xFFFFFFFF, 0);
+ } else {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ 0xFFFFFFFF, 0);
+ }
+
+ mali_executor_unlock();
+ return ret;
+ }
+#endif
+
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ }
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_unlock();
+#endif
+#endif
+
+ return ret;
+}
+
+static void mali_group_bottom_half_mmu(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ }
+
+ mali_executor_interrupt_mmu(group, MALI_FALSE);
+
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ }
+}
+
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ _mali_osk_errcode_t ret;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
+
+ MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+ mali_gp_get_rawstat(group->gp_core),
+ mali_group_core_description(group)));
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_unlock();
+#endif
+#endif
+ ret = mali_executor_interrupt_gp(group, MALI_TRUE);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_lock();
+ if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) {
+ /* group complete and on job shedule on it, it already power off */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ 0xFFFFFFFF, 0);
+ mali_executor_unlock();
+ return ret;
+ }
+#endif
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_unlock();
+#endif
+#endif
+ return ret;
+}
+
+static void mali_group_bottom_half_gp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
+
+ mali_executor_interrupt_gp(group, MALI_FALSE);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
+}
+
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ _mali_osk_errcode_t ret;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
+
+ MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+ mali_pp_get_rawstat(group->pp_core),
+ mali_group_core_description(group)));
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_unlock();
+#endif
+#endif
+
+ ret = mali_executor_interrupt_pp(group, MALI_TRUE);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_lock();
+ if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) {
+ /* group complete and on job shedule on it, it already power off */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ 0xFFFFFFFF, 0);
+ mali_executor_unlock();
+ return ret;
+ }
+#endif
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_unlock();
+#endif
+#endif
+ return ret;
+}
+
+static void mali_group_bottom_half_pp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
+
+ mali_executor_interrupt_pp(group, MALI_FALSE);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
+}
+
+static void mali_group_timeout(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ MALI_DEBUG_PRINT(2, ("Group: timeout handler for %s at %u\n",
+ mali_group_core_description(group),
+ _mali_osk_time_tickcount()));
+
+ if (NULL != group->gp_core) {
+ mali_group_schedule_bottom_half_gp(group);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ mali_group_schedule_bottom_half_pp(group);
+ }
+}
+
+mali_bool mali_group_zap_session(struct mali_group *group,
+ struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (group->session != session) {
+ /* not running from this session */
+ return MALI_TRUE; /* success */
+ }
+
+ if (group->is_working) {
+ /* The Zap also does the stall and disable_stall */
+ mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
+ return zap_success;
+ } else {
+ /* Just remove the session instead of zapping */
+ mali_group_clear_session(group);
+ return MALI_TRUE; /* success */
+ }
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num)
+{
+ u32 source0 = 0;
+ u32 value0 = 0;
+ u32 source1 = 0;
+ u32 value1 = 0;
+ u32 profiling_channel = 0;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ switch (core_num) {
+ case 0:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+ break;
+ case 1:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
+ break;
+ case 2:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
+ break;
+ default:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+ break;
+ }
+
+ if (0 == core_num) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ }
+ if (1 == core_num) {
+ if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+ }
+ }
+ if (2 == core_num) {
+ if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+ }
+ }
+
+ _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
+}
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
diff --git a/drivers/gpu/arm/utgard/common/mali_group.h b/drivers/gpu/arm/utgard/common/mali_group.h
new file mode 100644
index 000000000000..32481e4a6748
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_group.h
@@ -0,0 +1,460 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GROUP_H__
+#define __MALI_GROUP_H__
+
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_mmu.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_session.h"
+#include "mali_osk_profiling.h"
+
+/**
+ * @brief Default max runtime [ms] for a core job - used by timeout timers
+ */
+#define MALI_MAX_JOB_RUNTIME_DEFAULT 5000
+
+extern int mali_max_job_runtime;
+
+#define MALI_MAX_NUMBER_OF_GROUPS 10
+#define MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS 8
+
+enum mali_group_state {
+ MALI_GROUP_STATE_INACTIVE,
+ MALI_GROUP_STATE_ACTIVATION_PENDING,
+ MALI_GROUP_STATE_ACTIVE,
+};
+
+/**
+ * The structure represents a render group
+ * A render group is defined by all the cores that share the same Mali MMU
+ */
+
+struct mali_group {
+ struct mali_mmu_core *mmu;
+ struct mali_session_data *session;
+
+ enum mali_group_state state;
+ mali_bool power_is_on;
+
+ mali_bool is_working;
+ unsigned long start_time; /* in ticks */
+
+ struct mali_gp_core *gp_core;
+ struct mali_gp_job *gp_running_job;
+
+ struct mali_pp_core *pp_core;
+ struct mali_pp_job *pp_running_job;
+ u32 pp_running_sub_job;
+
+ struct mali_pm_domain *pm_domain;
+
+ struct mali_l2_cache_core *l2_cache_core[2];
+ u32 l2_cache_core_ref_count[2];
+
+ /* Parent virtual group (if any) */
+ struct mali_group *parent_group;
+
+ struct mali_dlbu_core *dlbu_core;
+ struct mali_bcast_unit *bcast_core;
+
+ /* Used for working groups which needs to be disabled */
+ mali_bool disable_requested;
+
+ /* Used by group to link child groups (for virtual group) */
+ _mali_osk_list_t group_list;
+
+ /* Used by executor module in order to link groups of same state */
+ _mali_osk_list_t executor_list;
+
+ /* Used by PM domains to link groups of same domain */
+ _mali_osk_list_t pm_domain_list;
+
+ _mali_osk_wq_work_t *bottom_half_work_mmu;
+ _mali_osk_wq_work_t *bottom_half_work_gp;
+ _mali_osk_wq_work_t *bottom_half_work_pp;
+
+ _mali_osk_timer_t *timeout_timer;
+};
+
+/** @brief Create a new Mali group object
+ *
+ * @return A pointer to a new group object
+ */
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+ struct mali_dlbu_core *dlbu,
+ struct mali_bcast_unit *bcast,
+ u32 domain_index);
+
+void mali_group_dump_status(struct mali_group *group);
+
+void mali_group_delete(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group,
+ struct mali_mmu_core *mmu_core);
+void mali_group_remove_mmu_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group,
+ struct mali_gp_core *gp_core);
+void mali_group_remove_gp_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group,
+ struct mali_pp_core *pp_core);
+void mali_group_remove_pp_core(struct mali_group *group);
+
+MALI_STATIC_INLINE const char *mali_group_core_description(
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ if (NULL != group->pp_core) {
+ return mali_pp_core_description(group->pp_core);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ return mali_gp_core_description(group->gp_core);
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+ return (NULL != group->dlbu_core);
+#else
+ return MALI_FALSE;
+#endif
+}
+
+/** @brief Check if a group is a part of a virtual group or not
+ */
+MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+ return (NULL != group->parent_group) ? MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_FALSE;
+#endif
+}
+
+/** @brief Reset group
+ *
+ * This function will reset the entire group,
+ * including all the cores present in the group.
+ *
+ * @param group Pointer to the group to reset
+ */
+void mali_group_reset(struct mali_group *group);
+
+MALI_STATIC_INLINE struct mali_session_data *mali_group_get_session(
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ return group->session;
+}
+
+MALI_STATIC_INLINE void mali_group_clear_session(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (NULL != group->session) {
+ mali_mmu_activate_empty_page_directory(group->mmu);
+ group->session = NULL;
+ }
+}
+
+enum mali_group_state mali_group_activate(struct mali_group *group);
+
+/*
+ * Change state from ACTIVATION_PENDING to ACTIVE
+ * For virtual group, all childs need to be ACTIVE first
+ */
+mali_bool mali_group_set_active(struct mali_group *group);
+
+/*
+ * @return MALI_TRUE means one or more domains can now be powered off,
+ * and caller should call either mali_pm_update_async() or
+ * mali_pm_update_sync() in order to do so.
+ */
+mali_bool mali_group_deactivate(struct mali_group *group);
+
+MALI_STATIC_INLINE enum mali_group_state mali_group_get_state(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return group->state;
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_power_is_on(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ return group->power_is_on;
+}
+
+void mali_group_power_up(struct mali_group *group);
+void mali_group_power_down(struct mali_group *group);
+
+MALI_STATIC_INLINE void mali_group_set_disable_request(
+ struct mali_group *group, mali_bool disable)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ group->disable_requested = disable;
+
+ /**
+ * When one of child group's disable_requeset is set TRUE, then
+ * the disable_request of parent group should also be set to TRUE.
+ * While, the disable_request of parent group should only be set to FALSE
+ * only when all of its child group's disable_request are set to FALSE.
+ */
+ if (NULL != group->parent_group && MALI_TRUE == disable) {
+ group->parent_group->disable_requested = disable;
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_disable_requested(
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return group->disable_requested;
+}
+
+/** @brief Virtual groups */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child);
+struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
+
+/** @brief Checks if the group is working.
+ */
+MALI_STATIC_INLINE mali_bool mali_group_is_working(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ if (mali_group_is_in_virtual(group)) {
+ struct mali_group *tmp_group = mali_executor_get_virtual_group();
+ return tmp_group->is_working;
+ }
+ return group->is_working;
+}
+
+MALI_STATIC_INLINE struct mali_gp_job *mali_group_get_running_gp_job(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return group->gp_running_job;
+}
+
+/** @brief Zap MMU TLB on all groups
+ *
+ * Zap TLB on group if \a session is active.
+ */
+mali_bool mali_group_zap_session(struct mali_group *group,
+ struct mali_session_data *session);
+
+/** @brief Get pointer to GP core object
+ */
+MALI_STATIC_INLINE struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ return group->gp_core;
+}
+
+/** @brief Get pointer to PP core object
+ */
+MALI_STATIC_INLINE struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ return group->pp_core;
+}
+
+/** @brief Start GP job
+ */
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job, mali_bool gpu_secure_mode_pre_enabled);
+
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool gpu_secure_mode_pre_enabled);
+
+/** @brief Start virtual group Job on a virtual group
+*/
+void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job, u32 first_subjob, u32 last_subjob);
+
+
+/** @brief Start a subjob from a particular on a specific PP group
+*/
+void mali_group_start_job_on_group(struct mali_group *group, struct mali_pp_job *job, u32 subjob);
+
+
+/** @brief remove all the unused groups in tmp_unused group list, so that the group is in consistent status.
+ */
+void mali_group_non_dlbu_job_done_virtual(struct mali_group *group);
+
+
+/** @brief Resume GP job that suspended waiting for more heap memory
+ */
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_gp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_gp_get_interrupt_result(group->gp_core);
+}
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_pp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_pp_get_interrupt_result(group->pp_core);
+}
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_mmu(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_mmu_get_interrupt_result(group->mmu);
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_gp_is_active(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_gp_is_active(group->gp_core);
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_pp_is_active(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_pp_is_active(group->pp_core);
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_has_timed_out(struct mali_group *group)
+{
+ unsigned long time_cost;
+ struct mali_group *tmp_group = group;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ /* if the group is in virtual need to use virtual_group's start time */
+ if (mali_group_is_in_virtual(group)) {
+ tmp_group = mali_executor_get_virtual_group();
+ }
+
+ time_cost = _mali_osk_time_tickcount() - tmp_group->start_time;
+ if (_mali_osk_time_mstoticks(mali_max_job_runtime) <= time_cost) {
+ /*
+ * current tick is at or after timeout end time,
+ * so this is a valid timeout
+ */
+ return MALI_TRUE;
+ } else {
+ /*
+ * Not a valid timeout. A HW interrupt probably beat
+ * us to it, and the timer wasn't properly deleted
+ * (async deletion used due to atomic context).
+ */
+ return MALI_FALSE;
+ }
+}
+
+MALI_STATIC_INLINE void mali_group_mask_all_interrupts_gp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_gp_mask_all_interrupts(group->gp_core);
+}
+
+MALI_STATIC_INLINE void mali_group_mask_all_interrupts_pp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_pp_mask_all_interrupts(group->pp_core);
+}
+
+MALI_STATIC_INLINE void mali_group_enable_interrupts_gp(
+ struct mali_group *group,
+ enum mali_interrupt_result exceptions)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ mali_gp_enable_interrupts(group->gp_core, exceptions);
+}
+
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_gp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+}
+
+
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_pp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+}
+
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_mmu(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+ _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
+}
+
+struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job);
+
+struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success);
+
+#if defined(CONFIG_MALI400_PROFILING)
+MALI_STATIC_INLINE void mali_group_oom(struct mali_group *group)
+{
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ 0, 0, 0, 0, 0);
+}
+#endif
+
+struct mali_group *mali_group_get_glob_group(u32 index);
+u32 mali_group_get_glob_num_groups(void);
+
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+
+
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data);
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+
+MALI_STATIC_INLINE mali_bool mali_group_is_empty(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return _mali_osk_list_empty(&group->group_list);
+}
+
+#endif /* __MALI_GROUP_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_hw_core.c b/drivers/gpu/arm/utgard/common/mali_hw_core.c
new file mode 100644
index 000000000000..a813816e998d
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_hw_core.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_osk_mali.h"
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size)
+{
+ core->phys_addr = resource->base;
+ core->phys_offset = resource->base - _mali_osk_resource_base_address();
+ core->description = resource->description;
+ core->size = reg_size;
+
+ MALI_DEBUG_ASSERT(core->phys_offset < core->phys_addr);
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_mem_reqregion(core->phys_addr, core->size, core->description)) {
+ core->mapped_registers = _mali_osk_mem_mapioregion(core->phys_addr, core->size, core->description);
+ if (NULL != core->mapped_registers) {
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("Failed to map memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+ } else {
+ MALI_PRINT_ERROR(("Failed to request memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_hw_core_delete(struct mali_hw_core *core)
+{
+ if (NULL != core->mapped_registers) {
+ _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
+ core->mapped_registers = NULL;
+ }
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_hw_core.h b/drivers/gpu/arm/utgard/common/mali_hw_core.h
new file mode 100644
index 000000000000..38d96e240a20
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_hw_core.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_HW_CORE_H__
+#define __MALI_HW_CORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * The common parts for all Mali HW cores (GP, PP, MMU, L2 and PMU)
+ * This struct is embedded inside all core specific structs.
+ */
+struct mali_hw_core {
+ uintptr_t phys_addr; /**< Physical address of the registers */
+ u32 phys_offset; /**< Offset from start of Mali to registers */
+ u32 size; /**< Size of registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ const char *description; /**< Name of unit (as specified in device configuration) */
+};
+
+#define MALI_REG_POLL_COUNT_FAST 1000000
+#define MALI_REG_POLL_COUNT_SLOW 1000000
+
+/*
+ * GP and PP core translate their int_stat/rawstat into one of these
+ */
+enum mali_interrupt_result {
+ MALI_INTERRUPT_RESULT_NONE,
+ MALI_INTERRUPT_RESULT_SUCCESS,
+ MALI_INTERRUPT_RESULT_SUCCESS_VS,
+ MALI_INTERRUPT_RESULT_SUCCESS_PLBU,
+ MALI_INTERRUPT_RESULT_OOM,
+ MALI_INTERRUPT_RESULT_ERROR
+};
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size);
+void mali_hw_core_delete(struct mali_hw_core *core);
+
+MALI_STATIC_INLINE u32 mali_hw_core_register_read(struct mali_hw_core *core, u32 relative_address)
+{
+ u32 read_val;
+ read_val = _mali_osk_mem_ioread32(core->mapped_registers, relative_address);
+ MALI_DEBUG_PRINT(6, ("register_read for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, read_val));
+ return read_val;
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+}
+
+/* Conditionally write a register.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 new_val, const u32 old_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ if (old_val != new_val) {
+ _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+ }
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32(core->mapped_registers, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs)
+{
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+ core->description, relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+ for (i = 0; i < nr_of_regs; i++) {
+ mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]);
+ }
+}
+
+/* Conditionally write a set of registers.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs, const u32 *old_array)
+{
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+ core->description, relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+ for (i = 0; i < nr_of_regs; i++) {
+ if (old_array[i] != write_array[i]) {
+ mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]);
+ }
+ }
+}
+
+#endif /* __MALI_HW_CORE_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_common.h b/drivers/gpu/arm/utgard/common/mali_kernel_common.h
new file mode 100644
index 000000000000..6a8f0f0116a4
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_kernel_common.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2010, 2012-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+#include "mali_osk.h"
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+#if defined(_DEBUG)
+#define DEBUG
+#endif
+#endif
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...) Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X ) The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+/**
+* Fundamental error macro. Reports an error code. This is abstracted to allow us to
+* easily switch to a different error reporting method if we want, and also to allow
+* us to search for error returns easily.
+*
+* Note no closing semicolon - this is supplied in typical usage:
+*
+* MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+*/
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ * Basic error macro, to indicate success.
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ * Basic error macro. This checks whether the given condition is true, and if not returns
+ * from this function with the supplied error code. This is a macro so that we can override it
+ * for stress testing.
+ *
+ * Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ * else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ * Error propagation macro. If the expression given is anything other than
+ * _MALI_OSK_NO_ERROR, then the value is returned from the enclosing function
+ * as an error code. This effectively acts as a guard clause, and propagates
+ * error values up the call stack. This uses a temporary value to ensure that
+ * the error expression is not evaluated twice.
+ * If the counter for forcing a failure has been set using _mali_force_error,
+ * this error will be returned without evaluating the expression in
+ * MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+ do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+ if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+ MALI_ERROR(_check_no_error_result); \
+ } while(0)
+
+/**
+ * Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ * Error macro with goto. This checks whether the given condition is true, and if not jumps
+ * to the specified label using a goto. The label must therefore be local to the function in
+ * which this macro appears. This is most usually used to execute some clean-up code before
+ * exiting with a call to ERROR.
+ *
+ * Like the other macros, this is a macro to allow us to override the condition if we wish,
+ * e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ * Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ * Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#if defined(CONFIG_MALI_QUIET)
+#define MALI_PRINTF(args)
+#else
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+#endif
+
+#define MALI_PRINT_ERROR(args) do{ \
+ MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+ MALI_PRINTF((" %s()%4d\n ", __FUNCTION__, __LINE__)) ; \
+ MALI_PRINTF(args); \
+ MALI_PRINTF(("\n")); \
+ } while(0)
+
+#define MALI_PRINT(args) do{ \
+ MALI_PRINTF(("Mali: ")); \
+ MALI_PRINTF(args); \
+ } while (0)
+
+#ifdef DEBUG
+#ifndef mali_debug_level
+extern int mali_debug_level;
+#endif
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args) do { \
+ if((level) <= mali_debug_level)\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+ } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args) \
+ if((condition)&&((level) <= mali_debug_level))\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+ else if((level) <= mali_debug_level)\
+ { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_core.c b/drivers/gpu/arm/utgard/common/mali_kernel_core.c
new file mode 100644
index 000000000000..81152f789980
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_kernel_core.c
@@ -0,0 +1,1339 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_memory.h"
+#include "mali_mem_validation.h"
+#include "mali_mmu.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_executor.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_utilization.h"
+#include "mali_l2_cache.h"
+#include "mali_timeline.h"
+#include "mali_soft_job.h"
+#include "mali_pm_domain.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+#include "mali_control_timer.h"
+#include "mali_dvfs_policy.h"
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+#include <linux/fence.h>
+#endif
+
+#define MALI_SHARED_MEMORY_DEFAULT_SIZE 0xffffffff
+
+/* Mali GPU memory. Real values come from module parameter or from device specific data */
+unsigned int mali_dedicated_mem_start = 0;
+unsigned int mali_dedicated_mem_size = 0;
+
+/* Default shared memory size is set to 4G. */
+unsigned int mali_shared_mem_size = MALI_SHARED_MEMORY_DEFAULT_SIZE;
+
+/* Frame buffer memory to be accessible by Mali GPU */
+int mali_fb_start = 0;
+int mali_fb_size = 0;
+
+/* Mali max job runtime */
+extern int mali_max_job_runtime;
+
+/** Start profiling from module load? */
+int mali_boot_profiling = 0;
+
+/** Limits for the number of PP cores behind each L2 cache. */
+int mali_max_pp_cores_group_1 = 0xFF;
+int mali_max_pp_cores_group_2 = 0xFF;
+
+int mali_inited_pp_cores_group_1 = 0;
+int mali_inited_pp_cores_group_2 = 0;
+
+static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN;
+static uintptr_t global_gpu_base_address = 0;
+static u32 global_gpu_major_version = 0;
+static u32 global_gpu_minor_version = 0;
+
+mali_bool mali_gpu_class_is_mali450 = MALI_FALSE;
+mali_bool mali_gpu_class_is_mali470 = MALI_FALSE;
+
+static _mali_osk_errcode_t mali_set_global_gpu_base_address(void)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ global_gpu_base_address = _mali_osk_resource_base_address();
+ if (0 == global_gpu_base_address) {
+ err = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return err;
+}
+
+static u32 mali_get_bcast_id(_mali_osk_resource_t *resource_pp)
+{
+ switch (resource_pp->base - global_gpu_base_address) {
+ case 0x08000:
+ case 0x20000: /* fall-through for aliased mapping */
+ return 0x01;
+ case 0x0A000:
+ case 0x22000: /* fall-through for aliased mapping */
+ return 0x02;
+ case 0x0C000:
+ case 0x24000: /* fall-through for aliased mapping */
+ return 0x04;
+ case 0x0E000:
+ case 0x26000: /* fall-through for aliased mapping */
+ return 0x08;
+ case 0x28000:
+ return 0x10;
+ case 0x2A000:
+ return 0x20;
+ case 0x2C000:
+ return 0x40;
+ case 0x2E000:
+ return 0x80;
+ default:
+ return 0;
+ }
+}
+
+static _mali_osk_errcode_t mali_parse_product_info(void)
+{
+ _mali_osk_resource_t first_pp_resource;
+
+ /* Find the first PP core resource (again) */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PP0, &first_pp_resource)) {
+ /* Create a dummy PP object for this core so that we can read the version register */
+ struct mali_group *group = mali_group_create(NULL, NULL, NULL, MALI_DOMAIN_INDEX_PP0);
+ if (NULL != group) {
+ struct mali_pp_core *pp_core = mali_pp_create(&first_pp_resource, group, MALI_FALSE, mali_get_bcast_id(&first_pp_resource));
+ if (NULL != pp_core) {
+ u32 pp_version;
+
+ pp_version = mali_pp_core_get_version(pp_core);
+
+ mali_group_delete(group);
+
+ global_gpu_major_version = (pp_version >> 8) & 0xFF;
+ global_gpu_minor_version = pp_version & 0xFF;
+
+ switch (pp_version >> 16) {
+ case MALI200_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI200;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-200 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ MALI_PRINT_ERROR(("Mali-200 is not supported by this driver.\n"));
+ _mali_osk_abort();
+ break;
+ case MALI300_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI300;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-300 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI400_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI400;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-400 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI450_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI450;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI470_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI470;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-470 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ default:
+ MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("Failed to create initial PP object\n"));
+ }
+ } else {
+ MALI_PRINT_ERROR(("Failed to create initial group object\n"));
+ }
+ } else {
+ MALI_PRINT_ERROR(("First PP core not specified in config file\n"));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_delete_groups(void)
+{
+ struct mali_group *group;
+
+ group = mali_group_get_glob_group(0);
+ while (NULL != group) {
+ mali_group_delete(group);
+ group = mali_group_get_glob_group(0);
+ }
+
+ MALI_DEBUG_ASSERT(0 == mali_group_get_glob_num_groups());
+}
+
+static void mali_delete_l2_cache_cores(void)
+{
+ struct mali_l2_cache_core *l2;
+
+ l2 = mali_l2_cache_core_get_glob_l2_core(0);
+ while (NULL != l2) {
+ mali_l2_cache_delete(l2);
+ l2 = mali_l2_cache_core_get_glob_l2_core(0);
+ }
+
+ MALI_DEBUG_ASSERT(0 == mali_l2_cache_core_get_glob_num_l2_cores());
+}
+
+static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource, u32 domain_index)
+{
+ struct mali_l2_cache_core *l2_cache = NULL;
+
+ if (NULL != resource) {
+
+ MALI_DEBUG_PRINT(3, ("Found L2 cache %s\n", resource->description));
+
+ l2_cache = mali_l2_cache_create(resource, domain_index);
+ if (NULL == l2_cache) {
+ MALI_PRINT_ERROR(("Failed to create L2 cache object\n"));
+ return NULL;
+ }
+ }
+ MALI_DEBUG_PRINT(3, ("Created L2 cache core object\n"));
+
+ return l2_cache;
+}
+
+static _mali_osk_errcode_t mali_parse_config_l2_cache(void)
+{
+ struct mali_l2_cache_core *l2_cache = NULL;
+
+ if (mali_is_mali400()) {
+ _mali_osk_resource_t l2_resource;
+ if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(MALI400_OFFSET_L2_CACHE0, &l2_resource)) {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ l2_cache = mali_create_l2_cache_core(&l2_resource, MALI_DOMAIN_INDEX_L20);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else if (mali_is_mali450()) {
+ /*
+ * L2 for GP at 0x10000
+ * L2 for PP0-3 at 0x01000
+ * L2 for PP4-7 at 0x11000 (optional)
+ */
+
+ _mali_osk_resource_t l2_gp_resource;
+ _mali_osk_resource_t l2_pp_grp0_resource;
+ _mali_osk_resource_t l2_pp_grp1_resource;
+
+ /* Make cluster for GP's L2 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE0, &l2_gp_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for GP\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_gp_resource, MALI_DOMAIN_INDEX_L20);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Find corresponding l2 domain */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE1, &l2_pp_grp0_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 0\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource, MALI_DOMAIN_INDEX_L21);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Second PP core group is optional, don't fail if we don't find it */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE2, &l2_pp_grp1_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 1\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource, MALI_DOMAIN_INDEX_L22);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+ } else if (mali_is_mali470()) {
+ _mali_osk_resource_t l2c1_resource;
+
+ /* Make cluster for L2C1 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI470_OFFSET_L2_CACHE1, &l2c1_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-470 L2 cache 1\n"));
+ l2_cache = mali_create_l2_cache_core(&l2c1_resource, MALI_DOMAIN_INDEX_L21);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for L2C1\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache,
+ _mali_osk_resource_t *resource_mmu,
+ _mali_osk_resource_t *resource_gp,
+ _mali_osk_resource_t *resource_pp,
+ u32 domain_index)
+{
+ struct mali_mmu_core *mmu;
+ struct mali_group *group;
+
+ MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description));
+
+ /* Create the group object */
+ group = mali_group_create(cache, NULL, NULL, domain_index);
+ if (NULL == group) {
+ MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description));
+ return NULL;
+ }
+
+ /* Create the MMU object inside group */
+ mmu = mali_mmu_create(resource_mmu, group, MALI_FALSE);
+ if (NULL == mmu) {
+ MALI_PRINT_ERROR(("Failed to create MMU object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+
+ if (NULL != resource_gp) {
+ /* Create the GP core object inside this group */
+ struct mali_gp_core *gp_core = mali_gp_create(resource_gp, group);
+ if (NULL == gp_core) {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create GP object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+ }
+
+ if (NULL != resource_pp) {
+ struct mali_pp_core *pp_core;
+
+ /* Create the PP core object inside this group */
+ pp_core = mali_pp_create(resource_pp, group, MALI_FALSE, mali_get_bcast_id(resource_pp));
+ if (NULL == pp_core) {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create PP object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+ }
+
+ return group;
+}
+
+static _mali_osk_errcode_t mali_create_virtual_group(_mali_osk_resource_t *resource_mmu_pp_bcast,
+ _mali_osk_resource_t *resource_pp_bcast,
+ _mali_osk_resource_t *resource_dlbu,
+ _mali_osk_resource_t *resource_bcast)
+{
+ struct mali_mmu_core *mmu_pp_bcast_core;
+ struct mali_pp_core *pp_bcast_core;
+ struct mali_dlbu_core *dlbu_core;
+ struct mali_bcast_unit *bcast_core;
+ struct mali_group *group;
+
+ MALI_DEBUG_PRINT(2, ("Starting new virtual group for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+
+ /* Create the DLBU core object */
+ dlbu_core = mali_dlbu_create(resource_dlbu);
+ if (NULL == dlbu_core) {
+ MALI_PRINT_ERROR(("Failed to create DLBU object \n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the Broadcast unit core */
+ bcast_core = mali_bcast_unit_create(resource_bcast);
+ if (NULL == bcast_core) {
+ MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+ mali_dlbu_delete(dlbu_core);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the group object */
+#if defined(DEBUG)
+ /* Get a physical PP group to temporarily add to broadcast unit. IRQ
+ * verification needs a physical group in the broadcast unit to test
+ * the broadcast unit interrupt line. */
+ {
+ struct mali_group *phys_group = NULL;
+ int i;
+ for (i = 0; i < mali_group_get_glob_num_groups(); i++) {
+ phys_group = mali_group_get_glob_group(i);
+ if (NULL != mali_group_get_pp_core(phys_group)) break;
+ }
+ MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(phys_group));
+
+ /* Add the group temporarily to the broadcast, and update the
+ * broadcast HW. Since the HW is not updated when removing the
+ * group the IRQ check will work when the virtual PP is created
+ * later.
+ *
+ * When the virtual group gets populated, the actually used
+ * groups will be added to the broadcast unit and the HW will
+ * be updated.
+ */
+ mali_bcast_add_group(bcast_core, phys_group);
+ mali_bcast_reset(bcast_core);
+ mali_bcast_remove_group(bcast_core, phys_group);
+ }
+#endif /* DEBUG */
+ group = mali_group_create(NULL, dlbu_core, bcast_core, MALI_DOMAIN_INDEX_DUMMY);
+ if (NULL == group) {
+ MALI_PRINT_ERROR(("Failed to create group object for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+ mali_bcast_unit_delete(bcast_core);
+ mali_dlbu_delete(dlbu_core);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the MMU object inside group */
+ mmu_pp_bcast_core = mali_mmu_create(resource_mmu_pp_bcast, group, MALI_TRUE);
+ if (NULL == mmu_pp_bcast_core) {
+ MALI_PRINT_ERROR(("Failed to create MMU PP broadcast object\n"));
+ mali_group_delete(group);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the PP core object inside this group */
+ pp_bcast_core = mali_pp_create(resource_pp_bcast, group, MALI_TRUE, 0);
+ if (NULL == pp_bcast_core) {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create PP object\n"));
+ mali_group_delete(group);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_groups(void)
+{
+ struct mali_group *group;
+ int cluster_id_gp = 0;
+ int cluster_id_pp_grp0 = 0;
+ int cluster_id_pp_grp1 = 0;
+ int i;
+
+ _mali_osk_resource_t resource_gp;
+ _mali_osk_resource_t resource_gp_mmu;
+ _mali_osk_resource_t resource_pp[8];
+ _mali_osk_resource_t resource_pp_mmu[8];
+ _mali_osk_resource_t resource_pp_mmu_bcast;
+ _mali_osk_resource_t resource_pp_bcast;
+ _mali_osk_resource_t resource_dlbu;
+ _mali_osk_resource_t resource_bcast;
+ _mali_osk_errcode_t resource_gp_found;
+ _mali_osk_errcode_t resource_gp_mmu_found;
+ _mali_osk_errcode_t resource_pp_found[8];
+ _mali_osk_errcode_t resource_pp_mmu_found[8];
+ _mali_osk_errcode_t resource_pp_mmu_bcast_found;
+ _mali_osk_errcode_t resource_pp_bcast_found;
+ _mali_osk_errcode_t resource_dlbu_found;
+ _mali_osk_errcode_t resource_bcast_found;
+
+ if (!(mali_is_mali400() || mali_is_mali450() || mali_is_mali470())) {
+ /* No known HW core */
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (MALI_MAX_JOB_RUNTIME_DEFAULT == mali_max_job_runtime) {
+ /* Group settings are not overridden by module parameters, so use device settings */
+ _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ if (0 != data.max_job_runtime) {
+ mali_max_job_runtime = data.max_job_runtime;
+ }
+ }
+ }
+
+ if (mali_is_mali450()) {
+ /* Mali-450 have separate L2s for GP, and PP core group(s) */
+ cluster_id_pp_grp0 = 1;
+ cluster_id_pp_grp1 = 2;
+ }
+
+ resource_gp_found = _mali_osk_resource_find(MALI_OFFSET_GP, &resource_gp);
+ resource_gp_mmu_found = _mali_osk_resource_find(MALI_OFFSET_GP_MMU, &resource_gp_mmu);
+ resource_pp_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0, &(resource_pp[0]));
+ resource_pp_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1, &(resource_pp[1]));
+ resource_pp_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2, &(resource_pp[2]));
+ resource_pp_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3, &(resource_pp[3]));
+ resource_pp_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4, &(resource_pp[4]));
+ resource_pp_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5, &(resource_pp[5]));
+ resource_pp_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6, &(resource_pp[6]));
+ resource_pp_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7, &(resource_pp[7]));
+ resource_pp_mmu_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0_MMU, &(resource_pp_mmu[0]));
+ resource_pp_mmu_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1_MMU, &(resource_pp_mmu[1]));
+ resource_pp_mmu_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2_MMU, &(resource_pp_mmu[2]));
+ resource_pp_mmu_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3_MMU, &(resource_pp_mmu[3]));
+ resource_pp_mmu_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4_MMU, &(resource_pp_mmu[4]));
+ resource_pp_mmu_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5_MMU, &(resource_pp_mmu[5]));
+ resource_pp_mmu_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6_MMU, &(resource_pp_mmu[6]));
+ resource_pp_mmu_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7_MMU, &(resource_pp_mmu[7]));
+
+
+ if (mali_is_mali450() || mali_is_mali470()) {
+ resource_bcast_found = _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast);
+ resource_dlbu_found = _mali_osk_resource_find(MALI_OFFSET_DLBU, &resource_dlbu);
+ resource_pp_mmu_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST_MMU, &resource_pp_mmu_bcast);
+ resource_pp_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST, &resource_pp_bcast);
+
+ if (_MALI_OSK_ERR_OK != resource_bcast_found ||
+ _MALI_OSK_ERR_OK != resource_dlbu_found ||
+ _MALI_OSK_ERR_OK != resource_pp_mmu_bcast_found ||
+ _MALI_OSK_ERR_OK != resource_pp_bcast_found) {
+ /* Missing mandatory core(s) for Mali-450 or Mali-470 */
+ MALI_DEBUG_PRINT(2, ("Missing mandatory resources, Mali-450 needs DLBU, Broadcast unit, virtual PP core and virtual MMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK != resource_gp_found ||
+ _MALI_OSK_ERR_OK != resource_gp_mmu_found ||
+ _MALI_OSK_ERR_OK != resource_pp_found[0] ||
+ _MALI_OSK_ERR_OK != resource_pp_mmu_found[0]) {
+ /* Missing mandatory core(s) */
+ MALI_DEBUG_PRINT(2, ("Missing mandatory resource, need at least one GP and one PP, both with a separate MMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT(1 <= mali_l2_cache_core_get_glob_num_l2_cores());
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL, MALI_DOMAIN_INDEX_GP);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create group for first (and mandatory) PP core */
+ MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0], MALI_DOMAIN_INDEX_PP0);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_inited_pp_cores_group_1++;
+
+ /* Create groups for rest of the cores in the first PP core group */
+ for (i = 1; i < 4; i++) { /* First half of the PP cores belong to first core group */
+ if (mali_inited_pp_cores_group_1 < mali_max_pp_cores_group_1) {
+ if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_inited_pp_cores_group_1++;
+ }
+ }
+ }
+
+ /* Create groups for cores in the second PP core group */
+ for (i = 4; i < 8; i++) { /* Second half of the PP cores belong to second core group */
+ if (mali_inited_pp_cores_group_2 < mali_max_pp_cores_group_2) {
+ if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
+ MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= 2); /* Only Mali-450 have a second core group */
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_inited_pp_cores_group_2++;
+ }
+ }
+ }
+
+ if (mali_is_mali450() || mali_is_mali470()) {
+ _mali_osk_errcode_t err = mali_create_virtual_group(&resource_pp_mmu_bcast, &resource_pp_bcast, &resource_dlbu, &resource_bcast);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ }
+
+ mali_max_pp_cores_group_1 = mali_inited_pp_cores_group_1;
+ mali_max_pp_cores_group_2 = mali_inited_pp_cores_group_2;
+ MALI_DEBUG_PRINT(2, ("%d+%d PP cores initialized\n", mali_inited_pp_cores_group_1, mali_inited_pp_cores_group_2));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_check_shared_interrupts(void)
+{
+#if !defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_TRUE == _mali_osk_shared_interrupts()) {
+ MALI_PRINT_ERROR(("Shared interrupts detected, but driver support is not enabled\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif /* !defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+ /* It is OK to compile support for shared interrupts even if Mali is not using it. */
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_pmu(void)
+{
+ _mali_osk_resource_t resource_pmu;
+
+ MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PMU, &resource_pmu)) {
+ struct mali_pmu_core *pmu;
+
+ pmu = mali_pmu_create(&resource_pmu);
+ if (NULL == pmu) {
+ MALI_PRINT_ERROR(("Failed to create PMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ /* It's ok if the PMU doesn't exist */
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_memory(void)
+{
+ _mali_osk_device_data data = { 0, };
+ _mali_osk_errcode_t ret;
+
+ /* The priority of setting the value of mali_shared_mem_size,
+ * mali_dedicated_mem_start and mali_dedicated_mem_size:
+ * 1. module parameter;
+ * 2. platform data;
+ * 3. default value;
+ **/
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Memory settings are not overridden by module parameters, so use device settings */
+ if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size) {
+ /* Use device specific settings (if defined) */
+ mali_dedicated_mem_start = data.dedicated_mem_start;
+ mali_dedicated_mem_size = data.dedicated_mem_size;
+ }
+
+ if (MALI_SHARED_MEMORY_DEFAULT_SIZE == mali_shared_mem_size &&
+ 0 != data.shared_mem_size) {
+ mali_shared_mem_size = data.shared_mem_size;
+ }
+ }
+
+ if (0 < mali_dedicated_mem_size && 0 != mali_dedicated_mem_start) {
+ MALI_DEBUG_PRINT(2, ("Mali memory settings (dedicated: 0x%08X@0x%08X)\n",
+ mali_dedicated_mem_size, mali_dedicated_mem_start));
+
+ /* Dedicated memory */
+ ret = mali_memory_core_resource_dedicated_memory(mali_dedicated_mem_start, mali_dedicated_mem_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to register dedicated memory\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ if (0 < mali_shared_mem_size) {
+ MALI_DEBUG_PRINT(2, ("Mali memory settings (shared: 0x%08X)\n", mali_shared_mem_size));
+
+ /* Shared OS memory */
+ ret = mali_memory_core_resource_os_memory(mali_shared_mem_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to register shared OS memory\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ if (0 == mali_fb_start && 0 == mali_fb_size) {
+ /* Frame buffer settings are not overridden by module parameters, so use device settings */
+ _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ mali_fb_start = data.fb_start;
+ mali_fb_size = data.fb_size;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Using device defined frame buffer settings (0x%08X@0x%08X)\n",
+ mali_fb_size, mali_fb_start));
+ } else {
+ MALI_DEBUG_PRINT(2, ("Using module defined frame buffer settings (0x%08X@0x%08X)\n",
+ mali_fb_size, mali_fb_start));
+ }
+
+ if (0 != mali_fb_size) {
+ /* Register frame buffer */
+ ret = mali_mem_validation_add_range(mali_fb_start, mali_fb_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to register frame buffer memory region\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_detect_gpu_class(void)
+{
+ if (_mali_osk_identify_gpu_resource() == 0x450)
+ mali_gpu_class_is_mali450 = MALI_TRUE;
+
+ if (_mali_osk_identify_gpu_resource() == 0x470)
+ mali_gpu_class_is_mali470 = MALI_TRUE;
+}
+
+static _mali_osk_errcode_t mali_init_hw_reset(void)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+ _mali_osk_resource_t resource_bcast;
+
+ /* Ensure broadcast unit is in a good state before we start creating
+ * groups and cores.
+ */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast)) {
+ struct mali_bcast_unit *bcast_core;
+
+ bcast_core = mali_bcast_unit_create(&resource_bcast);
+ if (NULL == bcast_core) {
+ MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_bcast_unit_delete(bcast_core);
+ }
+#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_initialize_subsystems(void)
+{
+ _mali_osk_errcode_t err;
+
+#if defined(CONFIG_MALI_DT) && !defined(CONFIG_MALI_PLAT_SPECIFIC_DT)
+ err = _mali_osk_resource_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+#endif
+
+ mali_pp_job_initialize();
+
+ mali_timeline_initialize();
+
+ err = mali_session_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /*Try to init gpu secure mode */
+ _mali_osk_gpu_secure_mode_init();
+
+#if defined(CONFIG_MALI400_PROFILING)
+ err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+ if (_MALI_OSK_ERR_OK != err) {
+ /* No biggie if we weren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+ }
+#endif
+
+ err = mali_memory_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ err = mali_executor_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ err = mali_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Configure memory early, needed by mali_mmu_initialize. */
+ err = mali_parse_config_memory();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ err = mali_set_global_gpu_base_address();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Detect GPU class (uses L2 cache count) */
+ mali_detect_gpu_class();
+
+ err = mali_check_shared_interrupts();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Initialize the MALI PMU (will not touch HW!) */
+ err = mali_parse_config_pmu();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Initialize the power management module */
+ err = mali_pm_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Make sure the entire GPU stays on for the rest of this function */
+ mali_pm_init_begin();
+
+ /* Ensure HW is in a good state before starting to access cores. */
+ err = mali_init_hw_reset();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Detect which Mali GPU we are dealing with */
+ err = mali_parse_product_info();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* The global_product_id is now populated with the correct Mali GPU */
+
+ /* Start configuring the actual Mali hardware. */
+
+ err = mali_mmu_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ if (mali_is_mali450() || mali_is_mali470()) {
+ err = mali_dlbu_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+ }
+
+ err = mali_parse_config_l2_cache();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ err = mali_parse_config_groups();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Move groups into executor */
+ mali_executor_populate();
+
+ /* Need call after all group has assigned a domain */
+ mali_pm_power_cost_setup();
+
+ /* Initialize the GPU timer */
+ err = mali_control_timer_init();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Initialize the GPU utilization tracking */
+ err = mali_utilization_init();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
+#if defined(CONFIG_MALI_DVFS)
+ err = mali_dvfs_policy_init();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+#endif
+
+ /* Allowing the system to be turned off */
+ mali_pm_init_end();
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
+
+void mali_terminate_subsystems(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n"));
+
+ mali_utilization_term();
+ mali_control_timer_term();
+
+ mali_executor_depopulate();
+ mali_delete_groups(); /* Delete groups not added to executor */
+ mali_executor_terminate();
+
+ mali_scheduler_terminate();
+ mali_pp_job_terminate();
+ mali_delete_l2_cache_cores();
+ mali_mmu_terminate();
+
+ if (mali_is_mali450() || mali_is_mali470()) {
+ mali_dlbu_terminate();
+ }
+
+ mali_pm_terminate();
+
+ if (NULL != pmu) {
+ mali_pmu_delete(pmu);
+ }
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_term();
+#endif
+
+ _mali_osk_gpu_secure_mode_deinit();
+
+ mali_memory_terminate();
+
+ mali_session_terminate();
+
+ mali_timeline_terminate();
+
+ global_gpu_base_address = 0;
+}
+
+_mali_product_id_t mali_kernel_core_get_product_id(void)
+{
+ return global_product_id;
+}
+
+u32 mali_kernel_core_get_gpu_major_version(void)
+{
+ return global_gpu_major_version;
+}
+
+u32 mali_kernel_core_get_gpu_minor_version(void)
+{
+ return global_gpu_minor_version;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ /* check compatability */
+ if (args->version == _MALI_UK_API_VERSION) {
+ args->compatible = 1;
+ } else {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ /* check compatability */
+ if (args->version == _MALI_UK_API_VERSION) {
+ args->compatible = 1;
+ } else {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args)
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+ struct mali_session_data *session;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ queue = session->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue) {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+ return _MALI_OSK_ERR_OK;
+ }
+
+ /* receive a notification, might sleep */
+ err = _mali_osk_notification_queue_receive(queue, &notification);
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_ERROR(err); /* errcode returned, pass on to caller */
+ }
+
+ /* copy the buffer to the user */
+ args->type = (_mali_uk_notification_type)notification->notification_type;
+ _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+ /* finished with the notification */
+ _mali_osk_notification_delete(notification);
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args)
+{
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+ struct mali_session_data *session;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ queue = session->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue) {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ return _MALI_OSK_ERR_OK;
+ }
+
+ notification = _mali_osk_notification_create(args->type, 0);
+ if (NULL == notification) {
+ MALI_PRINT_ERROR(("Failed to create notification object\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _mali_osk_notification_queue_send(queue, notification);
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_pending_submit(_mali_uk_pending_submit_s *args)
+{
+ wait_queue_head_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ queue = mali_session_get_wait_queue();
+
+ /* check pending big job number, might sleep if larger than MAX allowed number */
+ if (wait_event_interruptible(*queue, MALI_MAX_PENDING_BIG_JOB > mali_scheduler_job_gp_big_job_count())) {
+ return _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
+
+
+_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args)
+{
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ if (!session->use_high_priority_job_queue) {
+ session->use_high_priority_job_queue = MALI_TRUE;
+ MALI_DEBUG_PRINT(2, ("Session 0x%08X with pid %d was granted higher priority.\n", session, _mali_osk_get_pid()));
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+ u32 i;
+ struct mali_session_data *session;
+
+ /* allocated struct to track this session */
+ session = (struct mali_session_data *)_mali_osk_calloc(1, sizeof(struct mali_session_data));
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_NOMEM);
+
+ MALI_DEBUG_PRINT(3, ("Session starting\n"));
+
+ /* create a response queue for this session */
+ session->ioctl_queue = _mali_osk_notification_queue_init();
+ if (NULL == session->ioctl_queue) {
+ goto err;
+ }
+
+ /*create a wait queue for this session */
+ session->wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == session->wait_queue) {
+ goto err_wait_queue;
+ }
+
+ session->page_directory = mali_mmu_pagedir_alloc();
+ if (NULL == session->page_directory) {
+ goto err_mmu;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE)) {
+ MALI_PRINT_ERROR(("Failed to map DLBU page into session\n"));
+ goto err_mmu;
+ }
+
+ if (0 != mali_dlbu_phys_addr) {
+ mali_mmu_pagedir_update(session->page_directory, MALI_DLBU_VIRT_ADDR, mali_dlbu_phys_addr,
+ _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session)) {
+ goto err_session;
+ }
+
+ /* Create soft system. */
+ session->soft_job_system = mali_soft_job_system_create(session);
+ if (NULL == session->soft_job_system) {
+ goto err_soft;
+ }
+
+ /* Initialize the dma fence context.*/
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+ session->fence_context = fence_context_alloc(1);
+ _mali_osk_atomic_init(&session->fence_seqno, 0);
+#else
+ MALI_PRINT_ERROR(("The kernel version not support dma fence!\n"));
+ goto err_time_line;
+#endif
+#endif
+
+ /* Create timeline system. */
+ session->timeline_system = mali_timeline_system_create(session);
+ if (NULL == session->timeline_system) {
+ goto err_time_line;
+ }
+
+#if defined(CONFIG_MALI_DVFS)
+ _mali_osk_atomic_init(&session->number_of_window_jobs, 0);
+#endif
+
+ _mali_osk_atomic_init(&session->number_of_pp_jobs, 0);
+
+ session->use_high_priority_job_queue = MALI_FALSE;
+
+ /* Initialize list of PP jobs on this session. */
+ _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_list);
+
+ /* Initialize the pp_job_fb_lookup_list array used to quickly lookup jobs from a given frame builder */
+ for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i) {
+ _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_fb_lookup_list[i]);
+ }
+
+ session->pid = _mali_osk_get_pid();
+ session->comm = _mali_osk_get_comm();
+ session->max_mali_mem_allocated_size = 0;
+ for (i = 0; i < MALI_MEM_TYPE_MAX; i ++) {
+ atomic_set(&session->mali_mem_array[i], 0);
+ }
+ atomic_set(&session->mali_mem_allocated_pages, 0);
+ *context = (void *)session;
+
+ /* Add session to the list of all sessions. */
+ mali_session_add(session);
+
+ MALI_DEBUG_PRINT(3, ("Session started\n"));
+ return _MALI_OSK_ERR_OK;
+
+err_time_line:
+ mali_soft_job_system_destroy(session->soft_job_system);
+err_soft:
+ mali_memory_session_end(session);
+err_session:
+ mali_mmu_pagedir_free(session->page_directory);
+err_mmu:
+ _mali_osk_wait_queue_term(session->wait_queue);
+err_wait_queue:
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+err:
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+}
+
+#if defined(DEBUG)
+/* parameter used for debug */
+extern u32 num_pm_runtime_resume;
+extern u32 num_pm_updates;
+extern u32 num_pm_updates_up;
+extern u32 num_pm_updates_down;
+#endif
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+ struct mali_session_data *session;
+ MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (struct mali_session_data *)*context;
+
+ MALI_DEBUG_PRINT(3, ("Session ending\n"));
+
+ MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
+ MALI_DEBUG_ASSERT_POINTER(session->timeline_system);
+
+ /* Remove session from list of all sessions. */
+ mali_session_remove(session);
+
+ /* This flag is used to prevent queueing of jobs due to activation. */
+ session->is_aborting = MALI_TRUE;
+
+ /* Stop the soft job timer. */
+ mali_timeline_system_stop_timer(session->timeline_system);
+
+ /* Abort queued jobs */
+ mali_scheduler_abort_session(session);
+
+ /* Abort executing jobs */
+ mali_executor_abort_session(session);
+
+ /* Abort the soft job system. */
+ mali_soft_job_system_abort(session->soft_job_system);
+
+ /* Force execution of all pending bottom half processing for GP and PP. */
+ _mali_osk_wq_flush();
+
+ /* The session PP list should now be empty. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_list));
+
+ /* At this point the GP and PP scheduler no longer has any jobs queued or running from this
+ * session, and all soft jobs in the soft job system has been destroyed. */
+
+ /* Any trackers left in the timeline system are directly or indirectly waiting on external
+ * sync fences. Cancel all sync fence waiters to trigger activation of all remaining
+ * trackers. This call will sleep until all timelines are empty. */
+ mali_timeline_system_abort(session->timeline_system);
+
+ /* Flush pending work.
+ * Needed to make sure all bottom half processing related to this
+ * session has been completed, before we free internal data structures.
+ */
+ _mali_osk_wq_flush();
+
+ /* Destroy timeline system. */
+ mali_timeline_system_destroy(session->timeline_system);
+ session->timeline_system = NULL;
+
+ /* Destroy soft system. */
+ mali_soft_job_system_destroy(session->soft_job_system);
+ session->soft_job_system = NULL;
+
+ /*Wait for the session job lists become empty.*/
+ _mali_osk_wait_queue_wait_event(session->wait_queue, mali_session_pp_job_is_empty, (void *) session);
+
+ /* Free remaining memory allocated to this session */
+ mali_memory_session_end(session);
+
+#if defined(CONFIG_MALI_DVFS)
+ _mali_osk_atomic_term(&session->number_of_window_jobs);
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_stop_sampling(session->pid);
+#endif
+
+ /* Free session data structures */
+ mali_mmu_pagedir_unmap(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE);
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_wait_queue_term(session->wait_queue);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+
+ *context = NULL;
+
+ MALI_DEBUG_PRINT(3, ("Session has ended\n"));
+
+#if defined(DEBUG)
+ MALI_DEBUG_PRINT(3, ("Stats: # runtime resumes: %u\n", num_pm_runtime_resume));
+ MALI_DEBUG_PRINT(3, (" # PM updates: .... %u (up %u, down %u)\n", num_pm_updates, num_pm_updates_up, num_pm_updates_down));
+
+ num_pm_runtime_resume = 0;
+ num_pm_updates = 0;
+ num_pm_updates_up = 0;
+ num_pm_updates_down = 0;
+#endif
+
+ return _MALI_OSK_ERR_OK;;
+}
+
+#if MALI_STATE_TRACKING
+u32 _mali_kernel_core_dump_state(char *buf, u32 size)
+{
+ int n = 0; /* Number of bytes written to buf */
+
+ n += mali_scheduler_dump_state(buf + n, size - n);
+ n += mali_executor_dump_state(buf + n, size - n);
+
+ return n;
+}
+#endif
diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_core.h b/drivers/gpu/arm/utgard/common/mali_kernel_core.h
new file mode 100644
index 000000000000..c471fc955107
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_kernel_core.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+
+typedef enum {
+ _MALI_PRODUCT_ID_UNKNOWN,
+ _MALI_PRODUCT_ID_MALI200,
+ _MALI_PRODUCT_ID_MALI300,
+ _MALI_PRODUCT_ID_MALI400,
+ _MALI_PRODUCT_ID_MALI450,
+ _MALI_PRODUCT_ID_MALI470,
+} _mali_product_id_t;
+
+extern mali_bool mali_gpu_class_is_mali450;
+extern mali_bool mali_gpu_class_is_mali470;
+
+_mali_osk_errcode_t mali_initialize_subsystems(void);
+
+void mali_terminate_subsystems(void);
+
+_mali_product_id_t mali_kernel_core_get_product_id(void);
+
+u32 mali_kernel_core_get_gpu_major_version(void);
+
+u32 mali_kernel_core_get_gpu_minor_version(void);
+
+u32 _mali_kernel_core_dump_state(char *buf, u32 size);
+
+MALI_STATIC_INLINE mali_bool mali_is_mali470(void)
+{
+ return mali_gpu_class_is_mali470;
+}
+
+MALI_STATIC_INLINE mali_bool mali_is_mali450(void)
+{
+ return mali_gpu_class_is_mali450;
+}
+
+MALI_STATIC_INLINE mali_bool mali_is_mali400(void)
+{
+ if (mali_gpu_class_is_mali450 || mali_gpu_class_is_mali470)
+ return MALI_FALSE;
+
+ return MALI_TRUE;
+}
+#endif /* __MALI_KERNEL_CORE_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_utilization.c b/drivers/gpu/arm/utgard/common/mali_kernel_utilization.c
new file mode 100644
index 000000000000..d1b8dc3b0b0e
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_kernel_utilization.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_scheduler.h"
+
+#include "mali_executor.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
+
+/* Thresholds for GP bound detection. */
+#define MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD 240
+#define MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD 250
+
+static _mali_osk_spinlock_irq_t *utilization_data_lock;
+
+static u32 num_running_gp_cores = 0;
+static u32 num_running_pp_cores = 0;
+
+static u64 work_start_time_gpu = 0;
+static u64 work_start_time_gp = 0;
+static u64 work_start_time_pp = 0;
+static u64 accumulated_work_time_gpu = 0;
+static u64 accumulated_work_time_gp = 0;
+static u64 accumulated_work_time_pp = 0;
+
+static u32 last_utilization_gpu = 0 ;
+static u32 last_utilization_gp = 0 ;
+static u32 last_utilization_pp = 0 ;
+
+void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data) = NULL;
+
+/* Define the first timer control timer timeout in milliseconds */
+static u32 mali_control_first_timeout = 100;
+static struct mali_gpu_utilization_data mali_util_data = {0, };
+
+struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period, mali_bool *need_add_timer)
+{
+ u64 time_now;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 work_normalized_gpu;
+ u32 work_normalized_gp;
+ u32 work_normalized_pp;
+ u32 period_normalized;
+ u32 utilization_gpu;
+ u32 utilization_gp;
+ u32 utilization_pp;
+
+ mali_utilization_data_lock();
+
+ time_now = _mali_osk_time_get_ns();
+
+ *time_period = time_now - *start_time;
+
+ if (accumulated_work_time_gpu == 0 && work_start_time_gpu == 0) {
+ mali_control_timer_pause();
+ /*
+ * No work done for this period
+ * - No need to reschedule timer
+ * - Report zero usage
+ */
+ last_utilization_gpu = 0;
+ last_utilization_gp = 0;
+ last_utilization_pp = 0;
+
+ mali_util_data.utilization_gpu = last_utilization_gpu;
+ mali_util_data.utilization_gp = last_utilization_gp;
+ mali_util_data.utilization_pp = last_utilization_pp;
+
+ mali_utilization_data_unlock();
+
+ *need_add_timer = MALI_FALSE;
+
+ mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND);
+
+ MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu));
+ MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp));
+ MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp));
+
+ return &mali_util_data;
+ }
+
+ /* If we are currently busy, update working period up to now */
+ if (work_start_time_gpu != 0) {
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = time_now;
+
+ /* GP and/or PP will also be busy if the GPU is busy at this point */
+
+ if (work_start_time_gp != 0) {
+ accumulated_work_time_gp += (time_now - work_start_time_gp);
+ work_start_time_gp = time_now;
+ }
+
+ if (work_start_time_pp != 0) {
+ accumulated_work_time_pp += (time_now - work_start_time_pp);
+ work_start_time_pp = time_now;
+ }
+ }
+
+ /*
+ * We have two 64-bit values, a dividend and a divisor.
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ * We shift the dividend up and possibly the divisor down, making the result X in 256.
+ */
+
+ /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+ leading_zeroes = _mali_osk_clz((u32)(*time_period >> 32));
+ shift_val = 32 - leading_zeroes;
+ work_normalized_gpu = (u32)(accumulated_work_time_gpu >> shift_val);
+ work_normalized_gp = (u32)(accumulated_work_time_gp >> shift_val);
+ work_normalized_pp = (u32)(accumulated_work_time_pp >> shift_val);
+ period_normalized = (u32)(*time_period >> shift_val);
+
+ /*
+ * Now, we should report the usage in parts of 256
+ * this means we must shift up the dividend or down the divisor by 8
+ * (we could do a combination, but we just use one for simplicity,
+ * but the end result should be good enough anyway)
+ */
+ if (period_normalized > 0x00FFFFFF) {
+ /* The divisor is so big that it is safe to shift it down */
+ period_normalized >>= 8;
+ } else {
+ /*
+ * The divisor is so small that we can shift up the dividend, without loosing any data.
+ * (dividend is always smaller than the divisor)
+ */
+ work_normalized_gpu <<= 8;
+ work_normalized_gp <<= 8;
+ work_normalized_pp <<= 8;
+ }
+
+ utilization_gpu = work_normalized_gpu / period_normalized;
+ utilization_gp = work_normalized_gp / period_normalized;
+ utilization_pp = work_normalized_pp / period_normalized;
+
+ last_utilization_gpu = utilization_gpu;
+ last_utilization_gp = utilization_gp;
+ last_utilization_pp = utilization_pp;
+
+ if ((MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD < last_utilization_gp) &&
+ (MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD > last_utilization_pp)) {
+ mali_executor_hint_enable(MALI_EXECUTOR_HINT_GP_BOUND);
+ } else {
+ mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND);
+ }
+
+ /* starting a new period */
+ accumulated_work_time_gpu = 0;
+ accumulated_work_time_gp = 0;
+ accumulated_work_time_pp = 0;
+
+ *start_time = time_now;
+
+ mali_util_data.utilization_gp = last_utilization_gp;
+ mali_util_data.utilization_gpu = last_utilization_gpu;
+ mali_util_data.utilization_pp = last_utilization_pp;
+
+ mali_utilization_data_unlock();
+
+ *need_add_timer = MALI_TRUE;
+
+ MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu));
+ MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp));
+ MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp));
+
+ return &mali_util_data;
+}
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+#if USING_GPU_UTILIZATION
+ _mali_osk_device_data data;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ if (NULL != data.utilization_callback) {
+ mali_utilization_callback = data.utilization_callback;
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed \n"));
+ }
+ }
+#endif /* defined(USING_GPU_UTILIZATION) */
+
+ if (NULL == mali_utilization_callback) {
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No platform utilization handler installed\n"));
+ }
+
+ utilization_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION);
+ if (NULL == utilization_data_lock) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ num_running_gp_cores = 0;
+ num_running_pp_cores = 0;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_term(void)
+{
+ if (NULL != utilization_data_lock) {
+ _mali_osk_spinlock_irq_term(utilization_data_lock);
+ }
+}
+
+void mali_utilization_gp_start(void)
+{
+ mali_utilization_data_lock();
+
+ ++num_running_gp_cores;
+ if (1 == num_running_gp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* First GP core started, consider GP busy from now and onwards */
+ work_start_time_gp = time_now;
+
+ if (0 == num_running_pp_cores) {
+ mali_bool is_resume = MALI_FALSE;
+ /*
+ * There are no PP cores running, so this is also the point
+ * at which we consider the GPU to be busy as well.
+ */
+ work_start_time_gpu = time_now;
+
+ is_resume = mali_control_timer_resume(time_now);
+
+ mali_utilization_data_unlock();
+
+ if (is_resume) {
+ /* Do some policy in new period for performance consideration */
+#if defined(CONFIG_MALI_DVFS)
+ /* Clear session->number_of_window_jobs, prepare parameter for dvfs */
+ mali_session_max_window_num();
+ if (0 == last_utilization_gpu) {
+ /*
+ * for mali_dev_pause is called in set clock,
+ * so each time we change clock, we will set clock to
+ * highest step even if under down clock case,
+ * it is not nessesary, so we only set the clock under
+ * last time utilization equal 0, we stop the timer then
+ * start the GPU again case
+ */
+ mali_dvfs_policy_new_period();
+ }
+#endif
+ /*
+ * First timeout using short interval for power consideration
+ * because we give full power in the new period, but if the
+ * job loading is light, finish in 10ms, the other time all keep
+ * in high freq it will wast time.
+ */
+ mali_control_timer_add(mali_control_first_timeout);
+ }
+ } else {
+ mali_utilization_data_unlock();
+ }
+
+ } else {
+ /* Nothing to do */
+ mali_utilization_data_unlock();
+ }
+}
+
+void mali_utilization_pp_start(void)
+{
+ mali_utilization_data_lock();
+
+ ++num_running_pp_cores;
+ if (1 == num_running_pp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* First PP core started, consider PP busy from now and onwards */
+ work_start_time_pp = time_now;
+
+ if (0 == num_running_gp_cores) {
+ mali_bool is_resume = MALI_FALSE;
+ /*
+ * There are no GP cores running, so this is also the point
+ * at which we consider the GPU to be busy as well.
+ */
+ work_start_time_gpu = time_now;
+
+ /* Start a new period if stoped */
+ is_resume = mali_control_timer_resume(time_now);
+
+ mali_utilization_data_unlock();
+
+ if (is_resume) {
+#if defined(CONFIG_MALI_DVFS)
+ /* Clear session->number_of_window_jobs, prepare parameter for dvfs */
+ mali_session_max_window_num();
+ if (0 == last_utilization_gpu) {
+ /*
+ * for mali_dev_pause is called in set clock,
+ * so each time we change clock, we will set clock to
+ * highest step even if under down clock case,
+ * it is not nessesary, so we only set the clock under
+ * last time utilization equal 0, we stop the timer then
+ * start the GPU again case
+ */
+ mali_dvfs_policy_new_period();
+ }
+#endif
+
+ /*
+ * First timeout using short interval for power consideration
+ * because we give full power in the new period, but if the
+ * job loading is light, finish in 10ms, the other time all keep
+ * in high freq it will wast time.
+ */
+ mali_control_timer_add(mali_control_first_timeout);
+ }
+ } else {
+ mali_utilization_data_unlock();
+ }
+ } else {
+ /* Nothing to do */
+ mali_utilization_data_unlock();
+ }
+}
+
+void mali_utilization_gp_end(void)
+{
+ mali_utilization_data_lock();
+
+ --num_running_gp_cores;
+ if (0 == num_running_gp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* Last GP core ended, consider GP idle from now and onwards */
+ accumulated_work_time_gp += (time_now - work_start_time_gp);
+ work_start_time_gp = 0;
+
+ if (0 == num_running_pp_cores) {
+ /*
+ * There are no PP cores running, so this is also the point
+ * at which we consider the GPU to be idle as well.
+ */
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = 0;
+ }
+ }
+
+ mali_utilization_data_unlock();
+}
+
+void mali_utilization_pp_end(void)
+{
+ mali_utilization_data_lock();
+
+ --num_running_pp_cores;
+ if (0 == num_running_pp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* Last PP core ended, consider PP idle from now and onwards */
+ accumulated_work_time_pp += (time_now - work_start_time_pp);
+ work_start_time_pp = 0;
+
+ if (0 == num_running_gp_cores) {
+ /*
+ * There are no GP cores running, so this is also the point
+ * at which we consider the GPU to be idle as well.
+ */
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = 0;
+ }
+ }
+
+ mali_utilization_data_unlock();
+}
+
+mali_bool mali_utilization_enabled(void)
+{
+#if defined(CONFIG_MALI_DVFS)
+ return mali_dvfs_policy_enabled();
+#else
+ return (NULL != mali_utilization_callback);
+#endif /* defined(CONFIG_MALI_DVFS) */
+}
+
+void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data)
+{
+ MALI_DEBUG_ASSERT_POINTER(mali_utilization_callback);
+
+ mali_utilization_callback(util_data);
+}
+
+void mali_utilization_reset(void)
+{
+ accumulated_work_time_gpu = 0;
+ accumulated_work_time_gp = 0;
+ accumulated_work_time_pp = 0;
+
+ last_utilization_gpu = 0;
+ last_utilization_gp = 0;
+ last_utilization_pp = 0;
+}
+
+void mali_utilization_data_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(utilization_data_lock);
+}
+
+void mali_utilization_data_unlock(void)
+{
+ _mali_osk_spinlock_irq_unlock(utilization_data_lock);
+}
+
+void mali_utilization_data_assert_locked(void)
+{
+ MALI_DEBUG_ASSERT_LOCK_HELD(utilization_data_lock);
+}
+
+u32 _mali_ukk_utilization_gp_pp(void)
+{
+ return last_utilization_gpu;
+}
+
+u32 _mali_ukk_utilization_gp(void)
+{
+ return last_utilization_gp;
+}
+
+u32 _mali_ukk_utilization_pp(void)
+{
+ return last_utilization_pp;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_utilization.h b/drivers/gpu/arm/utgard/common/mali_kernel_utilization.h
new file mode 100644
index 000000000000..06f585dcb238
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_kernel_utilization.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_osk.h"
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Check if Mali utilization is enabled
+ */
+mali_bool mali_utilization_enabled(void);
+
+/**
+ * Should be called when a job is about to execute a GP job
+ */
+void mali_utilization_gp_start(void);
+
+/**
+ * Should be called when a job has completed executing a GP job
+ */
+void mali_utilization_gp_end(void);
+
+/**
+ * Should be called when a job is about to execute a PP job
+ */
+void mali_utilization_pp_start(void);
+
+/**
+ * Should be called when a job has completed executing a PP job
+ */
+void mali_utilization_pp_end(void);
+
+/**
+ * Should be called to calcution the GPU utilization
+ */
+struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period, mali_bool *need_add_timer);
+
+_mali_osk_spinlock_irq_t *mali_utilization_get_lock(void);
+
+void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data);
+
+void mali_utilization_data_lock(void);
+
+void mali_utilization_data_unlock(void);
+
+void mali_utilization_data_assert_locked(void);
+
+void mali_utilization_reset(void);
+
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_vsync.c b/drivers/gpu/arm/utgard/common/mali_kernel_vsync.c
new file mode 100644
index 000000000000..dd44e5e7fa03
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_kernel_vsync.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2011-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+#include "mali_osk_profiling.h"
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+ _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+ MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+
+ /*
+ * Manually generate user space events in kernel space.
+ * This saves user space from calling kernel space twice in this case.
+ * We just need to remember to add pid and tid manually.
+ */
+ if (event == _MALI_UK_VSYNC_EVENT_BEGIN_WAIT) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+
+ if (event == _MALI_UK_VSYNC_EVENT_END_WAIT) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+
+
+ MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/gpu/arm/utgard/common/mali_l2_cache.c b/drivers/gpu/arm/utgard/common/mali_l2_cache.c
new file mode 100644
index 000000000000..fe33f561b2aa
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_l2_cache.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_scheduler.h"
+#include "mali_pm.h"
+#include "mali_pm_domain.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+ MALI400_L2_CACHE_REGISTER_SIZE = 0x0004,
+ MALI400_L2_CACHE_REGISTER_STATUS = 0x0008,
+ /*unused = 0x000C */
+ MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010,
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0014,
+ MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018,
+ MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C,
+} mali_l2_cache_register;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command {
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01,
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable {
+ MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /* Default */
+ MALI400_L2_CACHE_ENABLE_ACCESS = 0x01,
+ MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02,
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status {
+ MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01,
+ MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02,
+} mali_l2_cache_status;
+
+#define MALI400_L2_MAX_READS_NOT_SET -1
+
+static struct mali_l2_cache_core *
+ mali_global_l2s[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
+static u32 mali_global_num_l2s = 0;
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_NOT_SET;
+
+
+/* Local helper functions */
+
+static void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
+
+static _mali_osk_errcode_t mali_l2_cache_send_command(
+ struct mali_l2_cache_core *cache, u32 reg, u32 val);
+
+static void mali_l2_cache_lock(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ _mali_osk_spinlock_irq_lock(cache->lock);
+}
+
+static void mali_l2_cache_unlock(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ _mali_osk_spinlock_irq_unlock(cache->lock);
+}
+
+/* Implementation of the L2 cache interface */
+
+struct mali_l2_cache_core *mali_l2_cache_create(
+ _mali_osk_resource_t *resource, u32 domain_index)
+{
+ struct mali_l2_cache_core *cache = NULL;
+#if defined(DEBUG)
+ u32 cache_size;
+#endif
+
+ MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n",
+ resource->description));
+
+ if (mali_global_num_l2s >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 caches\n"));
+ return NULL;
+ }
+
+ cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
+ if (NULL == cache) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+ return NULL;
+ }
+
+ cache->core_id = mali_global_num_l2s;
+ cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+ cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+ cache->counter_value0_base = 0;
+ cache->counter_value1_base = 0;
+ cache->pm_domain = NULL;
+ cache->power_is_on = MALI_FALSE;
+ cache->last_invalidated_id = 0;
+
+ if (_MALI_OSK_ERR_OK != mali_hw_core_create(&cache->hw_core,
+ resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
+ _mali_osk_free(cache);
+ return NULL;
+ }
+
+#if defined(DEBUG)
+ cache_size = mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_SIZE);
+ MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
+ resource->description,
+ 1 << (((cache_size >> 16) & 0xff) - 10),
+ 1 << ((cache_size >> 8) & 0xff),
+ 1 << (cache_size & 0xff),
+ 1 << ((cache_size >> 24) & 0xff)));
+#endif
+
+ cache->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_L2);
+ if (NULL == cache->lock) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n",
+ cache->hw_core.description));
+ mali_hw_core_delete(&cache->hw_core);
+ _mali_osk_free(cache);
+ return NULL;
+ }
+
+ /* register with correct power domain */
+ cache->pm_domain = mali_pm_register_l2_cache(
+ domain_index, cache);
+
+ mali_global_l2s[mali_global_num_l2s] = cache;
+ mali_global_num_l2s++;
+
+ return cache;
+}
+
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
+{
+ u32 i;
+ for (i = 0; i < mali_global_num_l2s; i++) {
+ if (mali_global_l2s[i] != cache) {
+ continue;
+ }
+
+ mali_global_l2s[i] = NULL;
+ mali_global_num_l2s--;
+
+ if (i == mali_global_num_l2s) {
+ /* Removed last element, nothing more to do */
+ break;
+ }
+
+ /*
+ * We removed a l2 cache from the middle of the array,
+ * so move the last l2 cache to current position
+ */
+ mali_global_l2s[i] = mali_global_l2s[mali_global_num_l2s];
+ mali_global_l2s[mali_global_num_l2s] = NULL;
+
+ /* All good */
+ break;
+ }
+
+ _mali_osk_spinlock_irq_term(cache->lock);
+ mali_hw_core_delete(&cache->hw_core);
+ _mali_osk_free(cache);
+}
+
+void mali_l2_cache_power_up(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ mali_l2_cache_lock(cache);
+
+ mali_l2_cache_reset(cache);
+
+ if ((1 << MALI_DOMAIN_INDEX_DUMMY) != cache->pm_domain->pmu_mask)
+ MALI_DEBUG_ASSERT(MALI_FALSE == cache->power_is_on);
+ cache->power_is_on = MALI_TRUE;
+
+ mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_power_down(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ mali_l2_cache_lock(cache);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == cache->power_is_on);
+
+ /*
+ * The HW counters will start from zero again when we resume,
+ * but we should report counters as always increasing.
+ * Take a copy of the HW values now in order to add this to
+ * the values we report after being powered up.
+ *
+ * The physical power off of the L2 cache might be outside our
+ * own control (e.g. runtime PM). That is why we must manually
+ * set set the counter value to zero as well.
+ */
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+ cache->counter_value0_base += mali_hw_core_register_read(
+ &cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0, 0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ cache->counter_value1_base += mali_hw_core_register_read(
+ &cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1, 0);
+ }
+
+
+ cache->power_is_on = MALI_FALSE;
+
+ mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_core_set_counter_src(
+ struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
+{
+ u32 reg_offset_src;
+ u32 reg_offset_val;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT(source_id >= 0 && source_id <= 1);
+
+ mali_l2_cache_lock(cache);
+
+ if (0 == source_id) {
+ /* start counting from 0 */
+ cache->counter_value0_base = 0;
+ cache->counter_src0 = counter;
+ reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
+ reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0;
+ } else {
+ /* start counting from 0 */
+ cache->counter_value1_base = 0;
+ cache->counter_src1 = counter;
+ reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
+ reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1;
+ }
+
+ if (cache->power_is_on) {
+ u32 hw_src;
+
+ if (MALI_HW_CORE_NO_COUNTER != counter) {
+ hw_src = counter;
+ } else {
+ hw_src = 0; /* disable value for HW */
+ }
+
+ /* Set counter src */
+ mali_hw_core_register_write(&cache->hw_core,
+ reg_offset_src, hw_src);
+
+ /* Make sure the HW starts counting from 0 again */
+ mali_hw_core_register_write(&cache->hw_core,
+ reg_offset_val, 0);
+ }
+
+ mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_core_get_counter_values(
+ struct mali_l2_cache_core *cache,
+ u32 *src0, u32 *value0, u32 *src1, u32 *value1)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT(NULL != src0);
+ MALI_DEBUG_ASSERT(NULL != value0);
+ MALI_DEBUG_ASSERT(NULL != src1);
+ MALI_DEBUG_ASSERT(NULL != value1);
+
+ mali_l2_cache_lock(cache);
+
+ *src0 = cache->counter_src0;
+ *src1 = cache->counter_src1;
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+ if (MALI_TRUE == cache->power_is_on) {
+ *value0 = mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ } else {
+ *value0 = 0;
+ }
+
+ /* Add base offset value (in case we have been power off) */
+ *value0 += cache->counter_value0_base;
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ if (MALI_TRUE == cache->power_is_on) {
+ *value1 = mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ } else {
+ *value1 = 0;
+ }
+
+ /* Add base offset value (in case we have been power off) */
+ *value1 += cache->counter_value1_base;
+ }
+
+ mali_l2_cache_unlock(cache);
+}
+
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
+{
+ if (mali_global_num_l2s > index) {
+ return mali_global_l2s[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
+{
+ return mali_global_num_l2s;
+}
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ if (NULL == cache) {
+ return;
+ }
+
+ mali_l2_cache_lock(cache);
+
+ cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+
+ mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_invalidate_conditional(
+ struct mali_l2_cache_core *cache, u32 id)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ if (NULL == cache) {
+ return;
+ }
+
+ /*
+ * If the last cache invalidation was done by a job with a higher id we
+ * don't have to flush. Since user space will store jobs w/ their
+ * corresponding memory in sequence (first job #0, then job #1, ...),
+ * we don't have to flush for job n-1 if job n has already invalidated
+ * the cache since we know for sure that job n-1's memory was already
+ * written when job n was started.
+ */
+
+ mali_l2_cache_lock(cache);
+
+ if (((s32)id) > ((s32)cache->last_invalidated_id)) {
+ /* Set latest invalidated id to current "point in time" */
+ cache->last_invalidated_id =
+ mali_scheduler_get_new_cache_order();
+ mali_l2_cache_send_command(cache,
+ MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ }
+
+ mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_invalidate_all(void)
+{
+ u32 i;
+ for (i = 0; i < mali_global_num_l2s; i++) {
+ struct mali_l2_cache_core *cache = mali_global_l2s[i];
+ _mali_osk_errcode_t ret;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ mali_l2_cache_lock(cache);
+
+ if (MALI_TRUE != cache->power_is_on) {
+ mali_l2_cache_unlock(cache);
+ continue;
+ }
+
+ cache->last_invalidated_id =
+ mali_scheduler_get_new_cache_order();
+
+ ret = mali_l2_cache_send_command(cache,
+ MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
+ }
+
+ mali_l2_cache_unlock(cache);
+ }
+}
+
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
+{
+ u32 i;
+ for (i = 0; i < mali_global_num_l2s; i++) {
+ struct mali_l2_cache_core *cache = mali_global_l2s[i];
+ u32 j;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ mali_l2_cache_lock(cache);
+
+ if (MALI_TRUE != cache->power_is_on) {
+ mali_l2_cache_unlock(cache);
+ continue;
+ }
+
+ for (j = 0; j < num_pages; j++) {
+ _mali_osk_errcode_t ret;
+
+ ret = mali_l2_cache_send_command(cache,
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE,
+ pages[j]);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to invalidate cache (page)\n"));
+ }
+ }
+
+ mali_l2_cache_unlock(cache);
+ }
+}
+
+/* -------- local helper functions below -------- */
+
+static void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock);
+
+ /* Invalidate cache (just to keep it in a known state at startup) */
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+
+ /* Enable cache */
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_ENABLE,
+ (u32)MALI400_L2_CACHE_ENABLE_ACCESS |
+ (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+
+ if (MALI400_L2_MAX_READS_NOT_SET != mali_l2_max_reads) {
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_MAX_READS,
+ (u32)mali_l2_max_reads);
+ }
+
+ /* Restart any performance counters (if enabled) */
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0,
+ cache->counter_src0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1,
+ cache->counter_src1);
+ }
+}
+
+static _mali_osk_errcode_t mali_l2_cache_send_command(
+ struct mali_l2_cache_core *cache, u32 reg, u32 val)
+{
+ int i = 0;
+ const int loop_count = 100000;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock);
+
+ /*
+ * First, wait for L2 cache command handler to go idle.
+ * (Commands received while processing another command will be ignored)
+ */
+ for (i = 0; i < loop_count; i++) {
+ if (!(mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_STATUS) &
+ (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
+ break;
+ }
+ }
+
+ if (i == loop_count) {
+ MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for command interface to go idle\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* then issue the command */
+ mali_hw_core_register_write(&cache->hw_core, reg, val);
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_l2_cache.h b/drivers/gpu/arm/utgard/common/mali_l2_cache.h
new file mode 100644
index 000000000000..c48a8844075f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_l2_cache.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES 3
+/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 MP4) */
+#define MALI_MAX_NUMBER_OF_GROUPS_PER_L2_CACHE 5
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_l2_cache_core {
+ /* Common HW core functionality */
+ struct mali_hw_core hw_core;
+
+ /* Synchronize L2 cache access */
+ _mali_osk_spinlock_irq_t *lock;
+
+ /* Unique core ID */
+ u32 core_id;
+
+ /* The power domain this L2 cache belongs to */
+ struct mali_pm_domain *pm_domain;
+
+ /* MALI_TRUE if power is on for this L2 cache */
+ mali_bool power_is_on;
+
+ /* A "timestamp" to avoid unnecessary flushes */
+ u32 last_invalidated_id;
+
+ /* Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src0;
+
+ /* Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src1;
+
+ /*
+ * Performance counter 0 value base/offset
+ * (allows accumulative reporting even after power off)
+ */
+ u32 counter_value0_base;
+
+ /*
+ * Performance counter 0 value base/offset
+ * (allows accumulative reporting even after power off)
+ */
+ u32 counter_value1_base;
+
+ /* Used by PM domains to link L2 caches of same domain */
+ _mali_osk_list_t pm_domain_list;
+};
+
+_mali_osk_errcode_t mali_l2_cache_initialize(void);
+void mali_l2_cache_terminate(void);
+
+struct mali_l2_cache_core *mali_l2_cache_create(
+ _mali_osk_resource_t *resource, u32 domain_index);
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache);
+
+MALI_STATIC_INLINE u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->core_id;
+}
+
+MALI_STATIC_INLINE struct mali_pm_domain *mali_l2_cache_get_pm_domain(
+ struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->pm_domain;
+}
+
+void mali_l2_cache_power_up(struct mali_l2_cache_core *cache);
+void mali_l2_cache_power_down(struct mali_l2_cache_core *cache);
+
+void mali_l2_cache_core_set_counter_src(
+ struct mali_l2_cache_core *cache, u32 source_id, u32 counter);
+
+MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src0(
+ struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src1(
+ struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->counter_src1;
+}
+
+void mali_l2_cache_core_get_counter_values(
+ struct mali_l2_cache_core *cache,
+ u32 *src0, u32 *value0, u32 *src1, u32 *value1);
+
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index);
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void);
+
+struct mali_group *mali_l2_cache_get_group(
+ struct mali_l2_cache_core *cache, u32 index);
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache);
+void mali_l2_cache_invalidate_conditional(
+ struct mali_l2_cache_core *cache, u32 id);
+
+void mali_l2_cache_invalidate_all(void);
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_mem_validation.c b/drivers/gpu/arm/utgard/common/mali_mem_validation.c
new file mode 100644
index 000000000000..5ae5cdd5592a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_mem_validation.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2011-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_mem_validation.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#define MALI_INVALID_MEM_ADDR 0xFFFFFFFF
+
+typedef struct {
+ u32 phys_base; /**< Mali physical base of the memory, page aligned */
+ u32 size; /**< size in bytes of the memory, multiple of page size */
+} _mali_mem_validation_t;
+
+static _mali_mem_validation_t mali_mem_validator = { MALI_INVALID_MEM_ADDR, MALI_INVALID_MEM_ADDR };
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size)
+{
+ /* Check that no other MEM_VALIDATION resources exist */
+ if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base) {
+ MALI_PRINT_ERROR(("Failed to add frame buffer memory; another range is already specified\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Check restrictions on page alignment */
+ if ((0 != (start & (~_MALI_OSK_CPU_PAGE_MASK))) ||
+ (0 != (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+ MALI_PRINT_ERROR(("Failed to add frame buffer memory; incorrect alignment\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_mem_validator.phys_base = start;
+ mali_mem_validator.size = size;
+ MALI_DEBUG_PRINT(2, ("Memory Validator installed for Mali physical address base=0x%08X, size=0x%08X\n",
+ mali_mem_validator.phys_base, mali_mem_validator.size));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size)
+{
+ if (phys_addr < (phys_addr + size)) { /* Don't allow overflow (or zero size) */
+ if ((0 == (phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
+ (0 == (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+ if ((phys_addr >= mali_mem_validator.phys_base) &&
+ ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) &&
+ (phys_addr <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) &&
+ ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1)))) {
+ MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1)));
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+ }
+
+ MALI_PRINT_ERROR(("MALI PHYSICAL RANGE VALIDATION ERROR: The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_addr, size));
+
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_mem_validation.h b/drivers/gpu/arm/utgard/common/mali_mem_validation.h
new file mode 100644
index 000000000000..05013f46f901
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_mem_validation.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011-2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEM_VALIDATION_H__
+#define __MALI_MEM_VALIDATION_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size);
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size);
+
+#endif /* __MALI_MEM_VALIDATION_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_mmu.c b/drivers/gpu/arm/utgard/common/mali_mmu.c
new file mode 100644
index 000000000000..b82486fa66c0
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_mmu.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#include "mali_mmu.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_mmu_page_directory.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command {
+ MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+ MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+ MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */
+ MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+ MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+ MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+ MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+static void mali_mmu_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data);
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu);
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static mali_dma_addr mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
+static mali_dma_addr mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
+static mali_dma_addr mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static mali_dma_addr mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+static mali_io_address mali_empty_page_directory_virt = NULL;
+
+
+_mali_osk_errcode_t mali_mmu_initialize(void)
+{
+ /* allocate the helper pages */
+ mali_empty_page_directory_phys = mali_allocate_empty_page(&mali_empty_page_directory_virt);
+ if (0 == mali_empty_page_directory_phys) {
+ MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate empty page directory.\n"));
+ mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_directory_mapping,
+ &mali_page_fault_flush_page_table,
+ &mali_page_fault_flush_page_table_mapping,
+ &mali_page_fault_flush_data_page,
+ &mali_page_fault_flush_data_page_mapping)) {
+ MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate fault flush pages\n"));
+ mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
+ mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+ mali_empty_page_directory_virt = NULL;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mmu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n"));
+
+ /* Free global helper pages */
+ mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
+ mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+ mali_empty_page_directory_virt = NULL;
+
+ /* Free the page fault flush pages */
+ mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_directory_mapping,
+ &mali_page_fault_flush_page_table,
+ &mali_page_fault_flush_page_table_mapping,
+ &mali_page_fault_flush_data_page,
+ &mali_page_fault_flush_data_page_mapping);
+}
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
+{
+ struct mali_mmu_core *mmu = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(resource);
+
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
+
+ mmu = _mali_osk_calloc(1, sizeof(struct mali_mmu_core));
+ if (NULL != mmu) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE)) {
+ if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu)) {
+ if (is_virtual) {
+ /* Skip reset and IRQ setup for virtual MMU */
+ return mmu;
+ }
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu)) {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ mmu->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_mmu,
+ group,
+ mali_mmu_probe_trigger,
+ mali_mmu_probe_ack,
+ mmu,
+ resource->description);
+ if (NULL != mmu->irq) {
+ return mmu;
+ } else {
+ MALI_PRINT_ERROR(("Mali MMU: Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
+ }
+ }
+ mali_group_remove_mmu_core(group);
+ } else {
+ MALI_PRINT_ERROR(("Mali MMU: Failed to add core %s to group\n", mmu->hw_core.description));
+ }
+ mali_hw_core_delete(&mmu->hw_core);
+ }
+
+ _mali_osk_free(mmu);
+ } else {
+ MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
+ }
+
+ return NULL;
+}
+
+void mali_mmu_delete(struct mali_mmu_core *mmu)
+{
+ if (NULL != mmu->irq) {
+ _mali_osk_irq_term(mmu->irq);
+ }
+
+ mali_hw_core_delete(&mmu->hw_core);
+ _mali_osk_free(mmu);
+}
+
+static void mali_mmu_enable_paging(struct mali_mmu_core *mmu)
+{
+ int i;
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ }
+}
+
+/**
+ * Issues the enable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out)
+ */
+static mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
+{
+ int i;
+ u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+ MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enabled.\n"));
+ return MALI_TRUE;
+ }
+
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
+ return MALI_FALSE;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ break;
+ }
+ if ((mmu_status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) && (0 == (mmu_status & MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE))) {
+ break;
+ }
+ if (0 == (mmu_status & (MALI_MMU_STATUS_BIT_PAGING_ENABLED))) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_DEBUG_PRINT(2, ("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return MALI_FALSE;
+ }
+
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ MALI_DEBUG_PRINT(2, ("Aborting MMU stall request since it has a pagefault.\n"));
+ return MALI_FALSE;
+ }
+
+ return MALI_TRUE;
+}
+
+/**
+ * Issues the disable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ */
+static void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
+{
+ int i;
+ u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+ MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n"));
+ return;
+ }
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n"));
+ return;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if (0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE)) {
+ break;
+ }
+ if (status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ break;
+ }
+ if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) MALI_DEBUG_PRINT(1, ("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu)
+{
+ MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu)
+{
+ int i;
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE);
+ MALI_DEBUG_ASSERT(0xCAFEB000 == mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ stall_success = mali_mmu_enable_stall(mmu);
+ if (!stall_success) {
+ err = _MALI_OSK_ERR_BUSY;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu)) {
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* no session is active, so just activate the empty page directory */
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory_phys);
+ mali_mmu_enable_paging(mmu);
+ err = _MALI_OSK_ERR_OK;
+ }
+ mali_mmu_disable_stall(mmu);
+
+ return err;
+}
+
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success = mali_mmu_enable_stall(mmu);
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+ if (MALI_FALSE == stall_success) {
+ /* False means that it is in Pagefault state. Not possible to disable_stall then */
+ return MALI_FALSE;
+ }
+
+ mali_mmu_disable_stall(mmu);
+ return MALI_TRUE;
+}
+
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+}
+
+
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address));
+}
+
+static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory)
+{
+ /* The MMU must be in stalled or page fault mode, for this writing to work */
+ MALI_DEBUG_ASSERT(0 != (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
+ & (MALI_MMU_STATUS_BIT_STALL_ACTIVE | MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+}
+
+void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description));
+
+ stall_success = mali_mmu_enable_stall(mmu);
+ MALI_DEBUG_ASSERT(stall_success);
+ MALI_IGNORE(stall_success);
+ mali_mmu_activate_address_space(mmu, pagedir->page_directory);
+ mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+ MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));
+
+ stall_success = mali_mmu_enable_stall(mmu);
+
+ /* This function can only be called when the core is idle, so it could not fail. */
+ MALI_DEBUG_ASSERT(stall_success);
+ MALI_IGNORE(stall_success);
+
+ mali_mmu_activate_address_space(mmu, mali_empty_page_directory_phys);
+ mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
+ stall_success = mali_mmu_enable_stall(mmu);
+ /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
+ mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
+ if (MALI_TRUE == stall_success) mali_mmu_disable_stall(mmu);
+}
+
+/* Is called when we want the mmu to give an interrupt */
+static void mali_mmu_probe_trigger(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ u32 int_stat;
+
+ int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT) {
+ MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ } else {
+ MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+ }
+
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR) {
+ MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ } else {
+ MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+ }
+
+ if ((int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+ (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+#if 0
+void mali_mmu_print_state(struct mali_mmu_core *mmu)
+{
+ MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+#endif
diff --git a/drivers/gpu/arm/utgard/common/mali_mmu.h b/drivers/gpu/arm/utgard/common/mali_mmu.h
new file mode 100644
index 000000000000..6ed48585f3d2
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_mmu.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_H__
+#define __MALI_MMU_H__
+
+#include "mali_osk.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_hw_core.h"
+
+/* Forward declaration from mali_group.h */
+struct mali_group;
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+ MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+ MALI_MMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
+ MALI_MMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
+ MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
+ MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
+ MALI_MMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
+ MALI_MMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
+ MALI_MMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
+ MALI_MMU_REGISTER_INT_STATUS = 0x0020 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt {
+ MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+ MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+typedef enum mali_mmu_status_bits {
+ MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
+ MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
+ MALI_MMU_STATUS_BIT_IDLE = 1 << 3,
+ MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+ MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE = 1 << 31,
+} mali_mmu_status_bits;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_mmu_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_mmu_initialize(void);
+
+void mali_mmu_terminate(void);
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual);
+void mali_mmu_delete(struct mali_mmu_core *mmu);
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu);
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu);
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu);
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address);
+
+void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir);
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu);
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu);
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu);
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_mmu_get_interrupt_result(struct mali_mmu_core *mmu)
+{
+ u32 rawstat_used = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+ if (0 == rawstat_used) {
+ return MALI_INTERRUPT_RESULT_NONE;
+ }
+
+ return MALI_INTERRUPT_RESULT_ERROR;
+}
+
+
+MALI_STATIC_INLINE u32 mali_mmu_get_int_status(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_rawstat(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+}
+
+MALI_STATIC_INLINE void mali_mmu_mask_all_interrupts(struct mali_mmu_core *mmu)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, 0);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_status(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_page_fault_addr(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+}
+
+#endif /* __MALI_MMU_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.c b/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.c
new file mode 100644
index 000000000000..9ad3e8970b7d
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_memory.h"
+#include "mali_l2_cache.h"
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+u32 mali_allocate_empty_page(mali_io_address *virt_addr)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address mapping;
+ mali_dma_addr address;
+
+ if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) {
+ /* Allocation failed */
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to get table page for empty pgdir\n"));
+ return 0;
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(mapping);
+
+ err = fill_page(mapping, 0);
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_mmu_release_table_page(address, mapping);
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to zero page\n"));
+ return 0;
+ }
+
+ *virt_addr = mapping;
+ return address;
+}
+
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr)
+{
+ if (MALI_INVALID_PAGE != address) {
+ mali_mmu_release_table_page(address, virt_addr);
+ }
+}
+
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+ mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping)
+{
+ _mali_osk_errcode_t err;
+
+ err = mali_mmu_get_table_page(data_page, data_page_mapping);
+ if (_MALI_OSK_ERR_OK == err) {
+ err = mali_mmu_get_table_page(page_table, page_table_mapping);
+ if (_MALI_OSK_ERR_OK == err) {
+ err = mali_mmu_get_table_page(page_directory, page_directory_mapping);
+ if (_MALI_OSK_ERR_OK == err) {
+ fill_page(*data_page_mapping, 0);
+ fill_page(*page_table_mapping, *data_page | MALI_MMU_FLAGS_DEFAULT);
+ fill_page(*page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT);
+ MALI_SUCCESS;
+ }
+ mali_mmu_release_table_page(*page_table, *page_table_mapping);
+ *page_table = MALI_INVALID_PAGE;
+ }
+ mali_mmu_release_table_page(*data_page, *data_page_mapping);
+ *data_page = MALI_INVALID_PAGE;
+ }
+ return err;
+}
+
+void mali_destroy_fault_flush_pages(
+ mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping)
+{
+ if (MALI_INVALID_PAGE != *page_directory) {
+ mali_mmu_release_table_page(*page_directory, *page_directory_mapping);
+ *page_directory = MALI_INVALID_PAGE;
+ *page_directory_mapping = NULL;
+ }
+
+ if (MALI_INVALID_PAGE != *page_table) {
+ mali_mmu_release_table_page(*page_table, *page_table_mapping);
+ *page_table = MALI_INVALID_PAGE;
+ *page_table_mapping = NULL;
+ }
+
+ if (MALI_INVALID_PAGE != *data_page) {
+ mali_mmu_release_table_page(*data_page, *data_page_mapping);
+ *data_page = MALI_INVALID_PAGE;
+ *data_page_mapping = NULL;
+ }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+ int i;
+ MALI_DEBUG_ASSERT_POINTER(mapping);
+
+ for (i = 0; i < MALI_MMU_PAGE_SIZE / 4; i++) {
+ _mali_osk_mem_iowrite32_relaxed(mapping, i * sizeof(u32), data);
+ }
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ _mali_osk_errcode_t err;
+ mali_io_address pde_mapping;
+ mali_dma_addr pde_phys;
+ int i, page_count;
+ u32 start_address;
+ if (last_pde < first_pde)
+ return _MALI_OSK_ERR_INVALID_ARGS;
+
+ for (i = first_pde; i <= last_pde; i++) {
+ if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
+ /* Page table not present */
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
+
+ err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
+ return err;
+ }
+ pagedir->page_entries_mapped[i] = pde_mapping;
+
+ /* Update PDE, mark as present */
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32),
+ pde_phys | MALI_MMU_FLAGS_PRESENT);
+
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ }
+
+ if (first_pde == last_pde) {
+ pagedir->page_entries_usage_count[i] += size / MALI_MMU_PAGE_SIZE;
+ } else if (i == first_pde) {
+ start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE;
+ page_count = (start_address + MALI_MMU_VIRTUAL_PAGE_SIZE - mali_address) / MALI_MMU_PAGE_SIZE;
+ pagedir->page_entries_usage_count[i] += page_count;
+ } else if (i == last_pde) {
+ start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE;
+ page_count = (mali_address + size - start_address) / MALI_MMU_PAGE_SIZE;
+ pagedir->page_entries_usage_count[i] += page_count;
+ } else {
+ pagedir->page_entries_usage_count[i] = 1024;
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
+{
+ int i;
+ const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
+ const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);
+
+ for (i = first_pte; i <= last_pte; i++) {
+ _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
+ }
+}
+
+static u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+{
+ return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+}
+
+
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ u32 left = size;
+ int i;
+ mali_bool pd_changed = MALI_FALSE;
+ u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
+ u32 num_pages_inv = 0;
+ mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */
+
+ /* For all page directory entries in range. */
+ for (i = first_pde; i <= last_pde; i++) {
+ u32 size_in_pde, offset;
+
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
+ MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);
+
+ /* Offset into page table, 0 if mali_address is 4MiB aligned */
+ offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
+ if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) {
+ size_in_pde = left;
+ } else {
+ size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
+ }
+
+ pagedir->page_entries_usage_count[i] -= size_in_pde / MALI_MMU_PAGE_SIZE;
+
+ /* If entire page table is unused, free it */
+ if (0 == pagedir->page_entries_usage_count[i]) {
+ u32 page_phys;
+ void *page_virt;
+ MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+ /* last reference removed, no need to zero out each PTE */
+
+ page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)));
+ page_virt = pagedir->page_entries_mapped[i];
+ pagedir->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+
+ mali_mmu_release_table_page(page_phys, page_virt);
+ pd_changed = MALI_TRUE;
+ } else {
+ MALI_DEBUG_ASSERT(num_pages_inv < 2);
+ if (num_pages_inv < 2) {
+ pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
+ num_pages_inv++;
+ } else {
+ invalidate_all = MALI_TRUE;
+ }
+
+ /* If part of the page table is still in use, zero the relevant PTEs */
+ mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
+ }
+
+ left -= size_in_pde;
+ mali_address += size_in_pde;
+ }
+ _mali_osk_write_mem_barrier();
+
+ /* L2 pages invalidation */
+ if (MALI_TRUE == pd_changed) {
+ MALI_DEBUG_ASSERT(num_pages_inv < 3);
+ if (num_pages_inv < 3) {
+ pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
+ num_pages_inv++;
+ } else {
+ invalidate_all = MALI_TRUE;
+ }
+ }
+
+ if (invalidate_all) {
+ mali_l2_cache_invalidate_all();
+ } else {
+ mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv);
+ }
+
+ MALI_SUCCESS;
+}
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void)
+{
+ struct mali_page_directory *pagedir;
+ _mali_osk_errcode_t err;
+ mali_dma_addr phys;
+
+ pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
+ if (NULL == pagedir) {
+ return NULL;
+ }
+
+ err = mali_mmu_get_table_page(&phys, &pagedir->page_directory_mapped);
+ if (_MALI_OSK_ERR_OK != err) {
+ _mali_osk_free(pagedir);
+ return NULL;
+ }
+
+ pagedir->page_directory = (u32)phys;
+
+ /* Zero page directory */
+ fill_page(pagedir->page_directory_mapped, 0);
+
+ return pagedir;
+}
+
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
+{
+ const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
+ int i;
+
+ /* Free referenced page tables and zero PDEs. */
+ for (i = 0; i < num_page_table_entries; i++) {
+ if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(
+ pagedir->page_directory_mapped,
+ sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
+ mali_dma_addr phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+ mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]);
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ /* Free the page directory page. */
+ mali_mmu_release_table_page(pagedir->page_directory, pagedir->page_directory_mapped);
+
+ _mali_osk_free(pagedir);
+}
+
+
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+ mali_dma_addr phys_address, u32 size, u32 permission_bits)
+{
+ u32 end_address = mali_address + size;
+ u32 mali_phys = (u32)phys_address;
+
+ /* Map physical pages into MMU page tables */
+ for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, mali_phys += MALI_MMU_PAGE_SIZE) {
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
+ MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
+ mali_phys | permission_bits);
+ }
+}
+
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr)
+{
+#if defined(DEBUG)
+ u32 pde_index, pte_index;
+ u32 pde, pte;
+
+ pde_index = MALI_MMU_PDE_ENTRY(fault_addr);
+ pte_index = MALI_MMU_PTE_ENTRY(fault_addr);
+
+
+ pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ pde_index * sizeof(u32));
+
+
+ if (pde & MALI_MMU_FLAGS_PRESENT) {
+ u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde);
+
+ pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index],
+ pte_index * sizeof(u32));
+
+ MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n"
+ "\t\tPTE: %08x, page %08x is %s\n",
+ fault_addr, pte_addr, pte,
+ MALI_MMU_ENTRY_ADDRESS(pte),
+ pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present"));
+ } else {
+ MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n",
+ fault_addr, pde));
+ }
+#else
+ MALI_IGNORE(pagedir);
+ MALI_IGNORE(fault_addr);
+#endif
+}
+
+/* For instrumented */
+struct dump_info {
+ u32 buffer_left;
+ u32 register_writes_size;
+ u32 page_table_dump_size;
+ u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
+{
+ if (NULL != info) {
+ info->register_writes_size += sizeof(u32) * 2; /* two 32-bit words */
+
+ if (NULL != info->buffer) {
+ /* check that we have enough space */
+ if (info->buffer_left < sizeof(u32) * 2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = where;
+ info->buffer++;
+
+ *info->buffer = what;
+ info->buffer++;
+
+ info->buffer_left -= sizeof(u32) * 2;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info *info)
+{
+ if (NULL != info) {
+ /* 4096 for the page and 4 bytes for the address */
+ const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+ const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+ const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+ info->page_table_dump_size += dump_size_in_bytes;
+
+ if (NULL != info->buffer) {
+ if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = phys_addr;
+ info->buffer++;
+
+ _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+ info->buffer += page_size_in_elements;
+
+ info->buffer_left -= dump_size_in_bytes;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info *info)
+{
+ MALI_DEBUG_ASSERT_POINTER(pagedir);
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ if (NULL != pagedir->page_directory_mapped) {
+ int i;
+
+ MALI_CHECK_NO_ERROR(
+ mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
+ );
+
+ for (i = 0; i < 1024; i++) {
+ if (NULL != pagedir->page_entries_mapped[i]) {
+ MALI_CHECK_NO_ERROR(
+ mali_mmu_dump_page(pagedir->page_entries_mapped[i],
+ _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
+ );
+ }
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info *info)
+{
+ MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
+ "set the page directory address", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args)
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data *session_data;
+
+ session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+ args->size = info.register_writes_size + info.page_table_dump_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args)
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data *session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ info.buffer_left = args->size;
+ info.buffer = (u32 *)(uintptr_t)args->buffer;
+
+ args->register_writes = (uintptr_t)info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+
+ args->page_table_dump = (uintptr_t)info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+
+ args->register_writes_size = info.register_writes_size;
+ args->page_table_dump_size = info.page_table_dump_size;
+
+ MALI_SUCCESS;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.h b/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.h
new file mode 100644
index 000000000000..3fdf07210259
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2011-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_PAGE_DIRECTORY_H__
+#define __MALI_MMU_PAGE_DIRECTORY_H__
+
+#include "mali_osk.h"
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/*
+ * Size of the address space referenced by a page table page
+ */
+#define MALI_MMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags {
+ MALI_MMU_FLAGS_PRESENT = 0x01,
+ MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+ MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+ MALI_MMU_FLAGS_OVERRIDE_CACHE = 0x8,
+ MALI_MMU_FLAGS_WRITE_CACHEABLE = 0x10,
+ MALI_MMU_FLAGS_WRITE_ALLOCATE = 0x20,
+ MALI_MMU_FLAGS_WRITE_BUFFERABLE = 0x40,
+ MALI_MMU_FLAGS_READ_CACHEABLE = 0x80,
+ MALI_MMU_FLAGS_READ_ALLOCATE = 0x100,
+ MALI_MMU_FLAGS_MASK = 0x1FF,
+} mali_mmu_entry_flags;
+
+
+#define MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE ( \
+ MALI_MMU_FLAGS_PRESENT | \
+ MALI_MMU_FLAGS_READ_PERMISSION | \
+ MALI_MMU_FLAGS_WRITE_PERMISSION | \
+ MALI_MMU_FLAGS_OVERRIDE_CACHE | \
+ MALI_MMU_FLAGS_WRITE_CACHEABLE | \
+ MALI_MMU_FLAGS_WRITE_BUFFERABLE | \
+ MALI_MMU_FLAGS_READ_CACHEABLE | \
+ MALI_MMU_FLAGS_READ_ALLOCATE )
+
+#define MALI_MMU_FLAGS_DEFAULT ( \
+ MALI_MMU_FLAGS_PRESENT | \
+ MALI_MMU_FLAGS_READ_PERMISSION | \
+ MALI_MMU_FLAGS_WRITE_PERMISSION )
+
+
+struct mali_page_directory {
+ u32 page_directory; /**< Physical address of the memory session's page directory */
+ mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+
+ mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+ u32 page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+};
+
+/* Map Mali virtual address space (i.e. ensure page tables exist for the virtual range) */
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+
+/* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+ mali_dma_addr phys_address, u32 size, u32 permission_bits);
+
+u32 mali_allocate_empty_page(mali_io_address *virtual);
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr);
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+ mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping);
+void mali_destroy_fault_flush_pages(
+ mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping);
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void);
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir);
+
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr);
+
+#endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_osk.h b/drivers/gpu/arm/utgard/common/mali_osk.h
new file mode 100644
index 000000000000..a5017789c97e
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_osk.h
@@ -0,0 +1,1389 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#include <linux/seq_file.h>
+#include "mali_osk_types.h"
+#include "mali_osk_specific.h" /* include any per-os specifics */
+#include "mali_osk_locks.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @addtogroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+#ifdef DEBUG
+/** @brief Macro for asserting that the current thread holds a given lock
+ */
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) MALI_DEBUG_ASSERT(_mali_osk_lock_get_owner((_mali_osk_lock_debug_t *)l) == _mali_osk_get_tid());
+
+/** @brief returns a lock's owner (thread id) if debugging is enabled
+ */
+#else
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) do {} while(0)
+#endif
+
+#define _mali_osk_ctxprintf seq_printf
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ * ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+ ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @addtogroup _mali_osk_wq
+ * @{ */
+
+/** @brief Initialize work queues (for deferred work)
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_wq_init(void);
+
+/** @brief Terminate work queues (for deferred work)
+ */
+void _mali_osk_wq_term(void);
+
+/** @brief Create work in the work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, \a handler will be called with \a data as the argument.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delete_work()
+ * when no longer needed.
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief A high priority version of \a _mali_osk_wq_create_work()
+ *
+ * Creates a work object which can be scheduled in the high priority work queue.
+ *
+ * This is unfortunately needed to get low latency scheduling of the Mali cores. Normally we would
+ * schedule the next job in hw_irq or tasklet, but often we can't since we need to synchronously map
+ * and unmap shared memory when a job is connected to external fences (timelines). And this requires
+ * taking a mutex.
+ *
+ * We do signal a lot of other (low priority) work also as part of the job being finished, and if we
+ * don't set this Mali scheduling thread as high priority, we see that the CPU scheduler often runs
+ * random things instead of starting the next GPU job when the GPU is idle. So setting the gpu
+ * scheduler to high priority does give a visually more responsive system.
+ *
+ * Start the high priority work with: \a _mali_osk_wq_schedule_work_high_pri()
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief Delete a work object
+ *
+ * This will flush the work queue to ensure that the work handler will not
+ * be called after deletion.
+ */
+void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work);
+
+/** @brief Delete a work object
+ *
+ * This will NOT flush the work queue, so only call this if you are sure that the work handler will
+ * not be called after deletion.
+ */
+void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work);
+
+/** @brief Cause a queued, deferred call of the work handler
+ *
+ * _mali_osk_wq_schedule_work provides a mechanism for enqueuing deferred calls
+ * to the work handler. After calling \ref _mali_osk_wq_schedule_work(), the
+ * work handler will be scheduled to run at some point in the future.
+ *
+ * Typically this is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_wq_schedule_work() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_wq_schedule_work() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the work handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_wq_schedule_work()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The work handler callback (of type _mali_osk_wq_work_handler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_wq_schedule_work() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_wq_schedule_work() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * work will be lost.
+ *
+ * The queue that _mali_osk_wq_schedule_work implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_wq_schedule_work
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered work.
+ *
+ * @param work a pointer to the _mali_osk_wq_work_t object corresponding to the
+ * work to begin processing.
+ */
+void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work);
+
+/** @brief Cause a queued, deferred call of the high priority work handler
+ *
+ * Function is the same as \a _mali_osk_wq_schedule_work() with the only
+ * difference that it runs in a high (real time) priority on the system.
+ *
+ * Should only be used as a substitue for doing the same work in interrupts.
+ *
+ * This is allowed to sleep, but the work should be small since it will block
+ * all other applications.
+*/
+void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work);
+
+/** @brief Flush the work queue
+ *
+ * This will flush the OSK work queue, ensuring all work in the queue has
+ * completed before returning.
+ *
+ * Since this blocks on the completion of work in the work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any registered work handler. To do so may cause a deadlock.
+ *
+ */
+void _mali_osk_wq_flush(void);
+
+/** @brief Create work in the delayed work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, a timer will be start and the \a handler will be called with
+ * \a data as the argument when timer out
+ *
+ * Refer to \ref _mali_osk_wq_delayed_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delayed_delete_work_nonflush()
+ * when no longer needed.
+ */
+_mali_osk_wq_delayed_work_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief Delete a work object
+ *
+ * This will NOT flush the work queue, so only call this if you are sure that the work handler will
+ * not be called after deletion.
+ */
+void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Cancel a delayed work without waiting for it to finish
+ *
+ * Note that the \a work callback function may still be running on return from
+ * _mali_osk_wq_delayed_cancel_work_async().
+ *
+ * @param work The delayed work to be cancelled
+ */
+void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Cancel a delayed work and wait for it to finish
+ *
+ * When this function returns, the \a work was either cancelled or it finished running.
+ *
+ * @param work The delayed work to be cancelled
+ */
+void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Put \a work task in global workqueue after delay
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue.
+ *
+ * If \a work was already on a queue, this function will return without doing anything
+ *
+ * @param work job to be done
+ * @param delay number of jiffies to wait or 0 for immediate execution
+ */
+void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay);
+
+/** @} */ /* end group _mali_osk_wq */
+
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * Registers an interrupt handler \a uhandler for the given IRQ number \a irqnum.
+ * \a data will be passed as argument to the handler when an interrupt occurs.
+ *
+ * If \a irqnum is -1, _mali_osk_irq_init will probe for the IRQ number using
+ * the supplied \a trigger_func and \a ack_func. These functions will also
+ * receive \a data as their argument.
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and
+ * trigger_func and ack_func must be non-NULL.
+ * @param uhandler The interrupt handler, corresponding to a ISR handler for
+ * the resource
+ * @param int_data resource specific data, which will be passed to uhandler
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param probe_data resource-specific data, which will be passed to
+ * (if present) trigger_func and ack_func
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description);
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for any
+ * currently executing IRQ handlers to complete.
+ *
+ * @note If work is deferred to an IRQ bottom-half handler through
+ * \ref _mali_osk_wq_schedule_work(), be sure to flush any remaining work
+ * with \ref _mali_osk_wq_flush() or (implicitly) with \ref _mali_osk_wq_delete_work()
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term(_mali_osk_irq_t *irq);
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom);
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom);
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom);
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom);
+
+/** @brief Initialize an atomic counter
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ */
+void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val);
+
+/** @brief Read a value from an atomic counter
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom);
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term(_mali_osk_atomic_t *atom);
+
+/** @brief Assign a new val to atomic counter, and return the old atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the new value assign to the atomic counter
+ * @return the old value of the atomic counter
+ */
+u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val);
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc(u32 n, u32 size);
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc(u32 size);
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free(void *ptr);
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * This function is potentially slower than _mali_osk_malloc() and _mali_osk_calloc(),
+ * but do support bigger sizes.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_valloc(u32 size);
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_valloc() must be freed before the
+ * application exits. Otherwise a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_vfree(void *ptr);
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy(void *dst, const void *src, u32 len);
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset(void *s, u32 c, u32 n);
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated(u32 max_allocated);
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which forces an ordering constraint
+ * on memory read and write operations.
+ */
+void _mali_osk_mem_barrier(void);
+
+/** @brief Issue a write memory barrier
+ *
+ * This defines an write memory barrier operation which forces an ordering constraint
+ * on memory write operations.
+ */
+void _mali_osk_write_mem_barrier(void);
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description);
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address mapping);
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion(u32 *phys, u32 size);
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion(u32 phys, u32 size, mali_io_address mapping);
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description);
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion(uintptr_t phys, u32 size);
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32(volatile mali_io_address mapping, u32 offset);
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion without memory barriers
+ *
+ * This write a 32-bit word to a 32-bit aligned location without using memory barrier.
+ * It is a programming error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val);
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion with write memory barrier
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32(volatile mali_io_address mapping, u32 offset, u32 val);
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall(void);
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size);
+
+/** @brief Safely copy as much data as possible from src to dest
+ *
+ * Do not crash if src or dest isn't available.
+ *
+ * @param dest Destination buffer (limited to user space mapped Mali memory)
+ * @param src Source buffer
+ * @param size Number of bytes to copy
+ * @return Number of bytes actually copied
+ */
+u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size);
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size);
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete(_mali_osk_notification_t *object);
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void);
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue);
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object);
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result);
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result);
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire);
+
+/** @brief Modify a timer
+ *
+ * Set the relative time at which a timer will expire, and start it if it is
+ * stopped. If \a ticks_to_expire 0 the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at \a ticks_to_expire from the time of the call, at
+ * which point, the callback function will be invoked with the
+ * callback-specific data, as set by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param ticks_to_expire the \em absolute time in ticks at which this timer
+ * should trigger.
+ *
+ */
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the work-queue by the timer (with
+ * \ref _mali_osk_wq_schedule_work()) may still run. The timer callback and
+ * work handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del(_mali_osk_timer_t *tim);
+
+/** @brief Stop a timer.
+ *
+ * Stop the timer. When the function returns, the timer's callback may still be
+ * running on any CPU core.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ */
+void _mali_osk_timer_del_async(_mali_osk_timer_t *tim);
+
+/** @brief Check if timer is pending.
+ *
+ * Check if timer is active.
+ *
+ * @param tim the timer to check
+ * @return MALI_TRUE if time is active, MALI_FALSE if it is not active
+ */
+mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim);
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data);
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term(_mali_osk_timer_t *tim);
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after or at the same time as tickb
+ *
+ * Systems where ticks can wrap must handle that.
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return MALI_TRUE if ticka represents a time that occurs at or after tickb.
+ */
+mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb);
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+unsigned long _mali_osk_time_mstoticks(u32 ms);
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32 _mali_osk_time_tickstoms(unsigned long ticks);
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+unsigned long _mali_osk_time_tickcount(void);
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay(u32 usecs);
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns(void);
+
+/** @brief Return time in nano seconds, since boot time.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_boot_time_get_ns(void);
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz(u32 val);
+
+/** @brief find last (most-significant) bit set
+ *
+ * @param val 32-bit words to count last bit set on
+ * @return last bit set.
+ */
+u32 _mali_osk_fls(u32 val);
+
+/** @} */ /* end group _mali_osk_math */
+
+/** @addtogroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+
+/** @brief Initialize an empty Wait Queue */
+_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void);
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ * @param data data parameter for condition function
+ *
+ * Put thread to sleep if the given \a condition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true.
+ */
+void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data);
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ * @param data data parameter for condition function
+ * @param timeout timeout in ms
+ *
+ * Put thread to sleep if the given \a condition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true. Will return if time
+ * exceeds timeout.
+ */
+void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout);
+
+/** @brief Wake up all threads in wait queue if their respective conditions are
+ * true
+ *
+ * @param queue the queue whose threads should be woken up
+ *
+ * Wake up all threads in wait queue \a queue whose condition is now true.
+ */
+void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue);
+
+/** @brief terminate a wait queue
+ *
+ * @param queue the queue to terminate.
+ */
+void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue);
+/** @} */ /* end group _mali_osk_wait_queue */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg(const char *fmt, ...);
+
+/** @brief Print fmt into buf.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param buf a pointer to the result buffer
+ * @param size the total number of bytes allowed to write to \a buf
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ * @return The number of bytes written to \a buf
+ */
+u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...);
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an name for calling process.
+ *
+ * @return name for calling process.
+ */
+char *_mali_osk_get_comm(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+
+/** @brief Take a reference to the power manager system for the Mali device (synchronously).
+ *
+ * When function returns successfully, Mali is ON.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_put() to release this reference.
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void);
+
+/** @brief Take a reference to the external power manager system for the Mali device (asynchronously).
+ *
+ * Mali might not yet be on after this function as returned.
+ * Please use \a _mali_osk_pm_dev_barrier() or \a _mali_osk_pm_dev_ref_get_sync()
+ * to wait for Mali to be powered on.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void);
+
+/** @brief Release the reference to the external power manger system for the Mali device.
+ *
+ * When reference count reach zero, the cores can be off.
+ *
+ * @note This must be used to release references taken with
+ * \a _mali_osk_pm_dev_ref_get_sync() or \a _mali_osk_pm_dev_ref_get_sync().
+ */
+void _mali_osk_pm_dev_ref_put(void);
+
+/** @brief Block until pending PM operations are done
+ */
+void _mali_osk_pm_dev_barrier(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_bitmap OSK Bitmap
+ * @{ */
+
+/** @brief Allocate a unique number from the bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ * @return An unique existence in the bitmap object.
+ */
+u32 _mali_osk_bitmap_alloc(struct _mali_osk_bitmap *bitmap);
+
+/** @brief Free a interger to the bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ * @param obj An number allocated from bitmap object.
+ */
+void _mali_osk_bitmap_free(struct _mali_osk_bitmap *bitmap, u32 obj);
+
+/** @brief Allocate continuous number from the bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ * @return start number of the continuous number block.
+ */
+u32 _mali_osk_bitmap_alloc_range(struct _mali_osk_bitmap *bitmap, int cnt);
+
+/** @brief Free a block of continuous number block to the bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ * @param obj Start number.
+ * @param cnt The size of the continuous number block.
+ */
+void _mali_osk_bitmap_free_range(struct _mali_osk_bitmap *bitmap, u32 obj, int cnt);
+
+/** @brief Available count could be used to allocate in the given bitmap object.
+ *
+ */
+u32 _mali_osk_bitmap_avail(struct _mali_osk_bitmap *bitmap);
+
+/** @brief Initialize an bitmap object..
+ *
+ * @param bitmap An poiter of uninitialized bitmap object.
+ * @param num Size of thei bitmap object and decide the memory size allocated.
+ * @param reserve start number used to allocate.
+ */
+int _mali_osk_bitmap_init(struct _mali_osk_bitmap *bitmap, u32 num, u32 reserve);
+
+/** @brief Free the given bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ */
+void _mali_osk_bitmap_term(struct _mali_osk_bitmap *bitmap);
+/** @} */ /* end group _mali_osk_bitmap */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+#error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+#error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_osk_bitops.h b/drivers/gpu/arm/utgard/common/mali_osk_bitops.h
new file mode 100644
index 000000000000..bb1831753a40
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_osk_bitops.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2010, 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit(u32 bit, u32 *addr)
+{
+ MALI_DEBUG_ASSERT(bit < 32);
+ MALI_DEBUG_ASSERT(NULL != addr);
+
+ (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit(u32 bit, u32 *addr)
+{
+ MALI_DEBUG_ASSERT(bit < 32);
+ MALI_DEBUG_ASSERT(NULL != addr);
+
+ (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit(u32 bit, u32 value)
+{
+ MALI_DEBUG_ASSERT(bit < 32);
+ return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit(u32 value)
+{
+ u32 inverted;
+ u32 negated;
+ u32 isolated;
+ u32 leading_zeros;
+
+ /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range 0..31 */
+ inverted = ~value; /* zzz...z1000...0 */
+ /* Using count_trailing_zeros on inverted value -
+ * See ARM System Developers Guide for details of count_trailing_zeros */
+
+ /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+ negated = (u32) - inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+ /* negated = xxx...x1000...0 */
+
+ isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+ /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+ * Note that the output is zero if value was all 1s */
+
+ leading_zeros = _mali_osk_clz(isolated);
+
+ return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit(u32 nr, u32 *addr)
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+ _mali_internal_clear_bit(nr, addr);
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit(u32 nr, u32 *addr)
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+ _mali_internal_set_bit(nr, addr);
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit(u32 nr, u32 *addr)
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+ return _mali_internal_test_bit(nr, *addr);
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit(const u32 *addr, u32 maxbit)
+{
+ u32 total;
+
+ for (total = 0; total < maxbit; total += 32, ++addr) {
+ int result;
+ result = _mali_internal_find_first_zero_bit(*addr);
+
+ /* non-negative signifies the bit was found */
+ if (result >= 0) {
+ total += (u32)result;
+ break;
+ }
+ }
+
+ /* Now check if we reached maxbit or above */
+ if (total >= maxbit) {
+ total = maxbit;
+ }
+
+ return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_osk_list.h b/drivers/gpu/arm/utgard/common/mali_osk_list.h
new file mode 100644
index 000000000000..9af2d7d4d621
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_osk_list.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = new_entry;
+ new_entry->next = next;
+ new_entry->prev = prev;
+ prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp
+
+/** @brief Define a list variable, which is initialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD_STATIC_INIT(exp) _mali_osk_list_t exp = { &exp, &exp }
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init(_mali_osk_list_t *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *list)
+{
+ __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail(_mali_osk_list_t *new_entry, _mali_osk_list_t *list)
+{
+ __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del(_mali_osk_list_t *list)
+{
+ __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit(_mali_osk_list_t *list)
+{
+ __mali_osk_list_del(list->prev, list->next);
+ _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE mali_bool _mali_osk_list_empty(_mali_osk_list_t *list)
+{
+ return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move(_mali_osk_list_t *move_entry, _mali_osk_list_t *list)
+{
+ __mali_osk_list_del(move_entry->prev, move_entry->next);
+ _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Move an entire list
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to move a list from one list head to another list head
+ *
+ * @param old_list The existing list head
+ * @param new_list The new list head (must be an empty list)
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move_list(_mali_osk_list_t *old_list, _mali_osk_list_t *new_list)
+{
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(new_list));
+ if (!_mali_osk_list_empty(old_list)) {
+ new_list->next = old_list->next;
+ new_list->prev = old_list->prev;
+ new_list->next->prev = new_list;
+ new_list->prev->next = new_list;
+ old_list->next = old_list;
+ old_list->prev = old_list;
+ }
+}
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+ _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, \
+ tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+
+/** @brief Enumerate a list in reverse order safely
+ *
+ * This macro is identical to @ref _MALI_OSK_LIST_FOREACHENTRY, except that
+ * entries are enumerated in reverse order.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY_REVERSE(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->prev, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.prev, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, \
+ tmp = _MALI_OSK_LIST_ENTRY(tmp->member.prev, type, member))
+
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_osk_mali.h b/drivers/gpu/arm/utgard/common/mali_osk_mali.h
new file mode 100644
index 000000000000..9c06f5aa9d11
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_osk_mali.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <linux/mali/mali_utgard.h>
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef CONFIG_MALI_DEVFREQ
+struct mali_device {
+ struct device *dev;
+#ifdef CONFIG_HAVE_CLK
+ struct clk *clock;
+#endif
+#ifdef CONFIG_REGULATOR
+ struct regulator *regulator;
+#endif
+#ifdef CONFIG_PM_DEVFREQ
+ struct devfreq_dev_profile devfreq_profile;
+ struct devfreq *devfreq;
+ unsigned long current_freq;
+ unsigned long current_voltage;
+#ifdef CONFIG_DEVFREQ_THERMAL
+ struct thermal_cooling_device *devfreq_cooling;
+#endif
+#endif
+ struct mali_pm_metrics_data mali_metrics;
+};
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Struct with device specific configuration data
+ */
+typedef struct mali_gpu_device_data _mali_osk_device_data;
+
+#if defined(CONFIG_MALI_DT) && !defined(CONFIG_MALI_PLAT_SPECIFIC_DT)
+/** @brief Initialize those device resources when we use device tree
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_resource_initialize(void);
+#endif
+
+/** @brief Find Mali GPU HW resource
+ *
+ * @param addr Address of Mali GPU resource to find
+ * @param res Storage for resource information if resource is found.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if resource is not found
+ */
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res);
+
+
+/** @brief Find Mali GPU HW base address
+ *
+ * @return 0 if resources are found, otherwise the Mali GPU component with lowest address.
+ */
+uintptr_t _mali_osk_resource_base_address(void);
+
+/** @brief Find the specific GPU resource.
+ *
+ * @return value
+ * 0x400 if Mali 400 specific GPU resource identified
+ * 0x450 if Mali 450 specific GPU resource identified
+ * 0x470 if Mali 470 specific GPU resource identified
+ *
+ */
+u32 _mali_osk_identify_gpu_resource(void);
+
+/** @brief Retrieve the Mali GPU specific data
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data);
+
+/** @brief Find the pmu domain config from device data.
+ *
+ * @param domain_config_array used to store pmu domain config found in device data.
+ * @param array_size is the size of array domain_config_array.
+ */
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size);
+
+/** @brief Get Mali PMU switch delay
+ *
+ *@return pmu switch delay if it is configured
+ */
+u32 _mali_osk_get_pmu_switch_delay(void);
+
+/** @brief Determines if Mali GPU has been configured with shared interrupts.
+ *
+ * @return MALI_TRUE if shared interrupts, MALI_FALSE if not.
+ */
+mali_bool _mali_osk_shared_interrupts(void);
+
+/** @brief Initialize the gpu secure mode.
+ * The gpu secure mode will initially be in a disabled state.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_gpu_secure_mode_init(void);
+
+/** @brief Deinitialize the gpu secure mode.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_gpu_secure_mode_deinit(void);
+
+/** @brief Reset GPU and enable the gpu secure mode.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_enable(void);
+
+/** @brief Reset GPU and disable the gpu secure mode.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_disable(void);
+
+/** @brief Check if the gpu secure mode has been enabled.
+ * @return MALI_TRUE if enabled, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_gpu_secure_mode_is_enabled(void);
+
+/** @brief Check if the gpu secure mode is supported.
+ * @return MALI_TRUE if supported, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_gpu_secure_mode_is_supported(void);
+
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_osk_profiling.h b/drivers/gpu/arm/utgard/common/mali_osk_profiling.h
new file mode 100644
index 000000000000..6e4583db1c80
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_osk_profiling.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_OSK_PROFILING_H__
+#define __MALI_OSK_PROFILING_H__
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+
+#include "mali_linux_trace.h"
+#include "mali_profiling_events.h"
+#include "mali_profiling_gator_api.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+#define MALI_PROFILING_NO_HW_COUNTER = ((u32)-1)
+
+/** @defgroup _mali_osk_profiling External profiling connectivity
+ * @{ */
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_osk_profiling_term(void);
+
+/**
+ * Stop the profile sampling operation.
+ */
+void _mali_osk_profiling_stop_sampling(u32 pid);
+
+/**
+ * Start recording profiling data
+ *
+ * The specified limit will determine how large the capture buffer is.
+ * MALI_PROFILING_MAX_BUFFER_ENTRIES determines the maximum size allowed by the device driver.
+ *
+ * @param limit The desired maximum number of events to record on input, the actual maximum on output.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 *limit);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 First data parameter, depending on event_id specified.
+ * @param data1 Second data parameter, depending on event_id specified.
+ * @param data2 Third data parameter, depending on event_id specified.
+ * @param data3 Fourth data parameter, depending on event_id specified.
+ * @param data4 Fifth data parameter, depending on event_id specified.
+ */
+void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+/**
+ * Report a hardware counter event.
+ *
+ * @param counter_id The ID of the counter.
+ * @param value The value of the counter.
+ */
+
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_report_hw_counter(counter_id, value) trace_mali_hw_counter(counter_id, value)
+
+/**
+ * Report SW counters
+ *
+ * @param counters array of counter values
+ */
+void _mali_osk_profiling_report_sw_counters(u32 *counters);
+
+void _mali_osk_profiling_record_global_counters(int counter_id, u32 value);
+
+/**
+ * Stop recording profiling data
+ *
+ * @param count Returns the number of recorded events.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count);
+
+/**
+ * Retrieves the number of events that can be retrieved
+ *
+ * @return The number of recorded events that can be retrieved.
+ */
+u32 _mali_osk_profiling_get_count(void);
+
+/**
+ * Retrieve an event
+ *
+ * @param index Event index (start with 0 and continue until this function fails to retrieve all events)
+ * @param timestamp The timestamp for the retrieved event will be stored here.
+ * @param event_id The event ID for the retrieved event will be stored here.
+ * @param data The 5 data values for the retrieved event will be stored here.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]);
+
+/**
+ * Clear the recorded buffer.
+ *
+ * This is needed in order to start another recording.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_clear(void);
+
+/**
+ * Checks if a recording of profiling data is in progress
+ *
+ * @return MALI_TRUE if recording of profiling data is in progress, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_is_recording(void);
+
+/**
+ * Checks if profiling data is available for retrival
+ *
+ * @return MALI_TRUE if profiling data is avaiable, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_have_recording(void);
+
+/** @} */ /* end group _mali_osk_profiling */
+
+#else /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */
+
+/* Dummy add_event, for when profiling is disabled. */
+
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4)
+
+#endif /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */
+
+#endif /* __MALI_OSK_PROFILING_H__ */
+
+
diff --git a/drivers/gpu/arm/utgard/common/mali_osk_types.h b/drivers/gpu/arm/utgard/common/mali_osk_types.h
new file mode 100644
index 000000000000..b6fa94ce16b3
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_osk_types.h
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_types.h
+ * Defines types of the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_TYPES_H__
+#define __MALI_OSK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+typedef unsigned char u8;
+typedef signed char s8;
+typedef unsigned short u16;
+typedef signed short s16;
+typedef unsigned int u32;
+typedef signed int s32;
+typedef unsigned long long u64;
+#define BITS_PER_LONG (sizeof(long)*8)
+#else
+/* Ensure Linux types u32, etc. are defined */
+#include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+ */
+typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+#define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+#define MALI_FALSE ((mali_bool)0)
+#endif
+
+#define MALI_HW_CORE_NO_COUNTER ((u32)-1)
+
+
+#define MALI_S32_MAX 0x7fffffff
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum {
+ _MALI_OSK_ERR_OK = 0, /**< Success. */
+ _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+ _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+ _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+ _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+ _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+ _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+ _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+ _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+ _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wq OSK work queues
+ * @{ */
+
+/** @brief Private type for work objects */
+typedef struct _mali_osk_wq_work_s _mali_osk_wq_work_t;
+typedef struct _mali_osk_wq_delayed_work_s _mali_osk_wq_delayed_work_t;
+
+/** @brief Work queue handler function
+ *
+ * This function type is called when the work is scheduled by the work queue,
+ * e.g. as an IRQ bottom-half handler.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for more information on the
+ * work-queue and work handlers.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_wq_work_handler_t)(void *arg);
+
+/* @} */ /* end group _mali_osk_wq */
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void (*_mali_osk_irq_trigger_t)(void *arg);
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)(void *arg);
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_wq_schedule_work(). Refer to \ref _mali_osk_wq_create_work() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)(void *arg);
+
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct {
+ union {
+ u32 val;
+ void *obj;
+ } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+
+/** @brief OSK Mutual Exclusion Lock ordered list
+ *
+ * This lists the various types of locks in the system and is used to check
+ * that locks are taken in the correct order.
+ *
+ * - Holding more than one lock of the same order at the same time is not
+ * allowed.
+ * - Taking a lock of a lower order than the highest-order lock currently held
+ * is not allowed.
+ *
+ */
+typedef enum {
+ /* || Locks || */
+ /* || must be || */
+ /* _||_ taken in _||_ */
+ /* \ / this \ / */
+ /* \/ order! \/ */
+
+ _MALI_OSK_LOCK_ORDER_FIRST = 0,
+
+ _MALI_OSK_LOCK_ORDER_SESSIONS,
+ _MALI_OSK_LOCK_ORDER_MEM_SESSION,
+ _MALI_OSK_LOCK_ORDER_MEM_INFO,
+ _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE,
+ _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP,
+ _MALI_OSK_LOCK_ORDER_PM_EXECUTION,
+ _MALI_OSK_LOCK_ORDER_EXECUTOR,
+ _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED,
+ _MALI_OSK_LOCK_ORDER_PROFILING,
+ _MALI_OSK_LOCK_ORDER_L2,
+ _MALI_OSK_LOCK_ORDER_L2_COMMAND,
+ _MALI_OSK_LOCK_ORDER_UTILIZATION,
+ _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS,
+ _MALI_OSK_LOCK_ORDER_PM_STATE,
+
+ _MALI_OSK_LOCK_ORDER_LAST,
+} _mali_osk_lock_order_t;
+
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * - Any lock can use the order parameter.
+ */
+typedef enum {
+ _MALI_OSK_LOCKFLAG_UNORDERED = 0x1, /**< Indicate that the order of this lock should not be checked */
+ _MALI_OSK_LOCKFLAG_ORDERED = 0x2,
+ /** @enum _mali_osk_lock_flags_t
+ *
+ * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks when we call
+ * functions _mali_osk_mutex_rw_init/wait/signal/term/. In this case, the RO mode can
+ * be used to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * call functions _mali_osk_mutex_rw_wait/signal().
+ *
+ */
+typedef enum {
+ _MALI_OSK_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */
+ _MALI_OSK_LOCKMODE_RW = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+ _MALI_OSK_LOCKMODE_RO, /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+ /** @enum _mali_osk_lock_mode_t
+ *
+ * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private types for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_debug_s _mali_osk_lock_debug_t;
+typedef struct _mali_osk_spinlock_s _mali_osk_spinlock_t;
+typedef struct _mali_osk_spinlock_irq_s _mali_osk_spinlock_irq_t;
+typedef struct _mali_osk_mutex_s _mali_osk_mutex_t;
+typedef struct _mali_osk_mutex_rw_s _mali_osk_mutex_rw_t;
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address *mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes. */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER PAGE_SHIFT
+/** Mali Page Size, in bytes. */
+#define _MALI_OSK_MALI_PAGE_SIZE PAGE_SIZE
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK PAGE_MASK
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum {
+ _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct {
+ u32 notification_type; /**< The notification type */
+ u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+ void *result_buffer; /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_wq_schedule_work() to make use of a bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void *arg);
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s {
+ struct _mali_osk_list_s *next;
+ struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+/** @} */ /* end group _mali_osk_list */
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief resource description struct
+ *
+ * Platform independent representation of a Mali HW resource
+ */
+typedef struct _mali_osk_resource {
+ const char *description; /**< short description of the resource */
+ uintptr_t base; /**< Physical base address of the resource, as seen by Mali resources. */
+ const char *irq_name; /**< Name of irq belong to this resource */
+ u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+/** @brief Private type for wait queue objects */
+typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t;
+/** @} */ /* end group _mali_osk_wait_queue */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+/** @brief Mali print ctx type which uses seq_file
+ */
+typedef struct seq_file _mali_osk_print_ctx;
+
+#define _MALI_OSK_BITMAP_INVALIDATE_INDEX -1
+
+typedef struct _mali_osk_bitmap {
+ u32 reserve;
+ u32 last;
+ u32 max;
+ u32 avail;
+ _mali_osk_spinlock_t *lock;
+ unsigned long *table;
+} _mali_osk_bitmap_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_TYPES_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_pm.c b/drivers/gpu/arm/utgard/common/mali_pm.c
new file mode 100644
index 000000000000..3989a33aeaef
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pm.c
@@ -0,0 +1,1362 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pm.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_scheduler.h"
+#include "mali_group.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+
+#include "mali_executor.h"
+#include "mali_control_timer.h"
+
+#if defined(DEBUG)
+u32 num_pm_runtime_resume = 0;
+u32 num_pm_updates = 0;
+u32 num_pm_updates_up = 0;
+u32 num_pm_updates_down = 0;
+#endif
+
+#define MALI_PM_DOMAIN_DUMMY_MASK (1 << MALI_DOMAIN_INDEX_DUMMY)
+
+/* lock protecting power state (including pm_domains) */
+static _mali_osk_spinlock_irq_t *pm_lock_state = NULL;
+
+/* the wanted domain mask (protected by pm_lock_state) */
+static u32 pd_mask_wanted = 0;
+
+/* used to deferring the actual power changes */
+static _mali_osk_wq_work_t *pm_work = NULL;
+
+/* lock protecting power change execution */
+static _mali_osk_mutex_t *pm_lock_exec = NULL;
+
+/* PMU domains which are actually powered on (protected by pm_lock_exec) */
+static u32 pmu_mask_current = 0;
+
+/*
+ * domains which marked as powered on (protected by pm_lock_exec)
+ * This can be different from pmu_mask_current right after GPU power on
+ * if the PMU domains default to powered up.
+ */
+static u32 pd_mask_current = 0;
+
+static u16 domain_config[MALI_MAX_NUMBER_OF_DOMAINS] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1 << MALI_DOMAIN_INDEX_DUMMY
+};
+
+/* The relative core power cost */
+#define MALI_GP_COST 3
+#define MALI_PP_COST 6
+#define MALI_L2_COST 1
+
+/*
+ *We have MALI_MAX_NUMBER_OF_PP_PHYSICAL_CORES + 1 rows in this matrix
+ *because we mush store the mask of different pp cores: 0, 1, 2, 3, 4, 5, 6, 7, 8.
+ */
+static int mali_pm_domain_power_cost_result[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1][MALI_MAX_NUMBER_OF_DOMAINS];
+/*
+ * Keep track of runtime PM state, so that we know
+ * how to resume during OS resume.
+ */
+#ifdef CONFIG_PM_RUNTIME
+static mali_bool mali_pm_runtime_active = MALI_FALSE;
+#else
+/* when kernel don't enable PM_RUNTIME, set the flag always true,
+ * for GPU will not power off by runtime */
+static mali_bool mali_pm_runtime_active = MALI_TRUE;
+#endif
+
+static void mali_pm_state_lock(void);
+static void mali_pm_state_unlock(void);
+static _mali_osk_errcode_t mali_pm_create_pm_domains(void);
+static void mali_pm_set_pmu_domain_config(void);
+static u32 mali_pm_get_registered_cores_mask(void);
+static void mali_pm_update_sync_internal(void);
+static mali_bool mali_pm_common_suspend(void);
+static void mali_pm_update_work(void *data);
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask);
+const char *mali_pm_group_stats_to_string(void);
+#endif
+
+_mali_osk_errcode_t mali_pm_initialize(void)
+{
+ _mali_osk_errcode_t err;
+ struct mali_pmu_core *pmu;
+
+ pm_lock_state = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_PM_STATE);
+ if (NULL == pm_lock_state) {
+ mali_pm_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pm_lock_exec = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_PM_STATE);
+ if (NULL == pm_lock_exec) {
+ mali_pm_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pm_work = _mali_osk_wq_create_work(mali_pm_update_work, NULL);
+ if (NULL == pm_work) {
+ mali_pm_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pmu = mali_pmu_get_global_pmu_core();
+ if (NULL != pmu) {
+ /*
+ * We have a Mali PMU, set the correct domain
+ * configuration (default or custom)
+ */
+
+ u32 registered_cores_mask;
+
+ mali_pm_set_pmu_domain_config();
+
+ registered_cores_mask = mali_pm_get_registered_cores_mask();
+ mali_pmu_set_registered_cores_mask(pmu, registered_cores_mask);
+
+ MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
+ }
+
+ /* Create all power domains needed (at least one dummy domain) */
+ err = mali_pm_create_pm_domains();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_terminate();
+ return err;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pm_terminate(void)
+{
+ if (NULL != pm_work) {
+ _mali_osk_wq_delete_work(pm_work);
+ pm_work = NULL;
+ }
+
+ mali_pm_domain_terminate();
+
+ if (NULL != pm_lock_exec) {
+ _mali_osk_mutex_term(pm_lock_exec);
+ pm_lock_exec = NULL;
+ }
+
+ if (NULL != pm_lock_state) {
+ _mali_osk_spinlock_irq_term(pm_lock_state);
+ pm_lock_state = NULL;
+ }
+}
+
+struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index,
+ struct mali_l2_cache_core *l2_cache)
+{
+ struct mali_pm_domain *domain;
+
+ domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
+ if (NULL == domain) {
+ MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
+ domain = mali_pm_domain_get_from_index(
+ MALI_DOMAIN_INDEX_DUMMY);
+ domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
+ } else {
+ MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
+ }
+
+ MALI_DEBUG_ASSERT(NULL != domain);
+
+ mali_pm_domain_add_l2_cache(domain, l2_cache);
+
+ return domain; /* return the actual domain this was registered in */
+}
+
+struct mali_pm_domain *mali_pm_register_group(u32 domain_index,
+ struct mali_group *group)
+{
+ struct mali_pm_domain *domain;
+
+ domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
+ if (NULL == domain) {
+ MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
+ domain = mali_pm_domain_get_from_index(
+ MALI_DOMAIN_INDEX_DUMMY);
+ domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
+ } else {
+ MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
+ }
+
+ MALI_DEBUG_ASSERT(NULL != domain);
+
+ mali_pm_domain_add_group(domain, group);
+
+ return domain; /* return the actual domain this was registered in */
+}
+
+mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains,
+ struct mali_group **groups,
+ u32 num_domains)
+{
+ mali_bool ret = MALI_TRUE; /* Assume all is powered on instantly */
+ u32 i;
+
+ mali_pm_state_lock();
+
+ for (i = 0; i < num_domains; i++) {
+ MALI_DEBUG_ASSERT_POINTER(domains[i]);
+ pd_mask_wanted |= mali_pm_domain_ref_get(domains[i]);
+ if (MALI_FALSE == mali_pm_domain_power_is_on(domains[i])) {
+ /*
+ * Tell caller that the corresponding group
+ * was not already powered on.
+ */
+ ret = MALI_FALSE;
+ } else {
+ /*
+ * There is a time gap between we power on the domain and
+ * set the power state of the corresponding groups to be on.
+ */
+ if (NULL != groups[i] &&
+ MALI_FALSE == mali_group_power_is_on(groups[i])) {
+ ret = MALI_FALSE;
+ }
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (get refs)\n", pd_mask_wanted));
+
+ mali_pm_state_unlock();
+
+ return ret;
+}
+
+mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains,
+ u32 num_domains)
+{
+ u32 mask = 0;
+ mali_bool ret;
+ u32 i;
+
+ mali_pm_state_lock();
+
+ for (i = 0; i < num_domains; i++) {
+ MALI_DEBUG_ASSERT_POINTER(domains[i]);
+ mask |= mali_pm_domain_ref_put(domains[i]);
+ }
+
+ if (0 == mask) {
+ /* return false, all domains should still stay on */
+ ret = MALI_FALSE;
+ } else {
+ /* Assert that we are dealing with a change */
+ MALI_DEBUG_ASSERT((pd_mask_wanted & mask) == mask);
+
+ /* Update our desired domain mask */
+ pd_mask_wanted &= ~mask;
+
+ /* return true; one or more domains can now be powered down */
+ ret = MALI_TRUE;
+ }
+
+ MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (put refs)\n", pd_mask_wanted));
+
+ mali_pm_state_unlock();
+
+ return ret;
+}
+
+void mali_pm_init_begin(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ _mali_osk_pm_dev_ref_get_sync();
+
+ /* Ensure all PMU domains are on */
+ if (NULL != pmu) {
+ mali_pmu_power_up_all(pmu);
+ }
+}
+
+void mali_pm_init_end(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ /* Ensure all PMU domains are off */
+ if (NULL != pmu) {
+ mali_pmu_power_down_all(pmu);
+ }
+
+ _mali_osk_pm_dev_ref_put();
+}
+
+void mali_pm_update_sync(void)
+{
+ mali_pm_exec_lock();
+
+ if (MALI_TRUE == mali_pm_runtime_active) {
+ /*
+ * Only update if GPU is powered on.
+ * Deactivation of the last group will result in both a
+ * deferred runtime PM suspend operation and
+ * deferred execution of this function.
+ * mali_pm_runtime_active will be false if runtime PM
+ * executed first and thus the GPU is now fully powered off.
+ */
+ mali_pm_update_sync_internal();
+ }
+
+ mali_pm_exec_unlock();
+}
+
+void mali_pm_update_async(void)
+{
+ _mali_osk_wq_schedule_work(pm_work);
+}
+
+void mali_pm_os_suspend(mali_bool os_suspend)
+{
+ int ret;
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
+
+ /* Suspend execution of all jobs, and go to inactive state */
+ mali_executor_suspend();
+
+ if (os_suspend) {
+ mali_control_timer_suspend(MALI_TRUE);
+ }
+
+ mali_pm_exec_lock();
+
+ ret = mali_pm_common_suspend();
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == ret);
+ MALI_IGNORE(ret);
+
+ mali_pm_exec_unlock();
+}
+
+void mali_pm_os_resume(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
+
+ mali_pm_exec_lock();
+
+#if defined(DEBUG)
+ mali_pm_state_lock();
+
+ /* Assert that things are as we left them in os_suspend(). */
+ MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
+ MALI_DEBUG_ASSERT(0 == pd_mask_current);
+ MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+ mali_pm_state_unlock();
+#endif
+
+ if (MALI_TRUE == mali_pm_runtime_active) {
+ /* Runtime PM was active, so reset PMU */
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ pmu_mask_current = mali_pmu_get_mask(pmu);
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS resume 0x%x \n", pmu_mask_current));
+ }
+
+ mali_pm_update_sync_internal();
+ }
+
+ mali_pm_exec_unlock();
+
+ /* Start executing jobs again */
+ mali_executor_resume();
+}
+
+mali_bool mali_pm_runtime_suspend(void)
+{
+ mali_bool ret;
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n"));
+
+ mali_pm_exec_lock();
+
+ /*
+ * Put SW state directly into "off" state, and do not bother to power
+ * down each power domain, because entire GPU will be powered off
+ * when we return.
+ * For runtime PM suspend, in contrast to OS suspend, there is a race
+ * between this function and the mali_pm_update_sync_internal(), which
+ * is fine...
+ */
+ ret = mali_pm_common_suspend();
+ if (MALI_TRUE == ret) {
+ mali_pm_runtime_active = MALI_FALSE;
+ } else {
+ /*
+ * Process the "power up" instead,
+ * which could have been "lost"
+ */
+ mali_pm_update_sync_internal();
+ }
+
+ mali_pm_exec_unlock();
+
+ return ret;
+}
+
+void mali_pm_runtime_resume(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ mali_pm_exec_lock();
+
+ mali_pm_runtime_active = MALI_TRUE;
+
+#if defined(DEBUG)
+ ++num_pm_runtime_resume;
+
+ mali_pm_state_lock();
+
+ /*
+ * Assert that things are as we left them in runtime_suspend(),
+ * except for pd_mask_wanted which normally will be the reason we
+ * got here (job queued => domains wanted)
+ */
+ MALI_DEBUG_ASSERT(0 == pd_mask_current);
+ MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+ mali_pm_state_unlock();
+#endif
+
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ pmu_mask_current = mali_pmu_get_mask(pmu);
+ MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume 0x%x \n", pmu_mask_current));
+ }
+
+ /*
+ * Normally we are resumed because a job has just been queued.
+ * pd_mask_wanted should thus be != 0.
+ * It is however possible for others to take a Mali Runtime PM ref
+ * without having a job queued.
+ * We should however always call mali_pm_update_sync_internal(),
+ * because this will take care of any potential mismatch between
+ * pmu_mask_current and pd_mask_current.
+ */
+ mali_pm_update_sync_internal();
+
+ mali_pm_exec_unlock();
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain,
+ char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tPower domain: id %u\n",
+ mali_pm_domain_get_id(domain));
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tMask: 0x%04x\n",
+ mali_pm_domain_get_mask(domain));
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tUse count: %u\n",
+ mali_pm_domain_get_use_count(domain));
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tCurrent power state: %s\n",
+ (mali_pm_domain_get_mask(domain) & pd_mask_current) ?
+ "On" : "Off");
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tWanted power state: %s\n",
+ (mali_pm_domain_get_mask(domain) & pd_mask_wanted) ?
+ "On" : "Off");
+
+ return n;
+}
+#endif
+
+static void mali_pm_state_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(pm_lock_state);
+}
+
+static void mali_pm_state_unlock(void)
+{
+ _mali_osk_spinlock_irq_unlock(pm_lock_state);
+}
+
+void mali_pm_exec_lock(void)
+{
+ _mali_osk_mutex_wait(pm_lock_exec);
+}
+
+void mali_pm_exec_unlock(void)
+{
+ _mali_osk_mutex_signal(pm_lock_exec);
+}
+
+static void mali_pm_domain_power_up(u32 power_up_mask,
+ struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS],
+ u32 *num_groups_up,
+ struct mali_l2_cache_core *l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
+ u32 *num_l2_up)
+{
+ u32 domain_bit;
+ u32 notify_mask = power_up_mask;
+
+ MALI_DEBUG_ASSERT(0 != power_up_mask);
+ MALI_DEBUG_ASSERT_POINTER(groups_up);
+ MALI_DEBUG_ASSERT_POINTER(num_groups_up);
+ MALI_DEBUG_ASSERT(0 == *num_groups_up);
+ MALI_DEBUG_ASSERT_POINTER(l2_up);
+ MALI_DEBUG_ASSERT_POINTER(num_l2_up);
+ MALI_DEBUG_ASSERT(0 == *num_l2_up);
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
+
+ MALI_DEBUG_PRINT(5,
+ ("PM update: Powering up domains: . [%s]\n",
+ mali_pm_mask_to_string(power_up_mask)));
+
+ pd_mask_current |= power_up_mask;
+
+ domain_bit = _mali_osk_fls(notify_mask);
+ while (0 != domain_bit) {
+ u32 domain_id = domain_bit - 1;
+ struct mali_pm_domain *domain =
+ mali_pm_domain_get_from_index(
+ domain_id);
+ struct mali_l2_cache_core *l2_cache;
+ struct mali_l2_cache_core *l2_cache_tmp;
+ struct mali_group *group;
+ struct mali_group *group_tmp;
+
+ /* Mark domain as powered up */
+ mali_pm_domain_set_power_on(domain, MALI_TRUE);
+
+ /*
+ * Make a note of the L2 and/or group(s) to notify
+ * (need to release the PM state lock before doing so)
+ */
+
+ _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
+ l2_cache_tmp,
+ mali_pm_domain_get_l2_cache_list(
+ domain),
+ struct mali_l2_cache_core,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_l2_up <
+ MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
+ l2_up[*num_l2_up] = l2_cache;
+ (*num_l2_up)++;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group,
+ group_tmp,
+ mali_pm_domain_get_group_list(domain),
+ struct mali_group,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_groups_up <
+ MALI_MAX_NUMBER_OF_GROUPS);
+ groups_up[*num_groups_up] = group;
+
+ (*num_groups_up)++;
+ }
+
+ /* Remove current bit and find next */
+ notify_mask &= ~(1 << (domain_id));
+ domain_bit = _mali_osk_fls(notify_mask);
+ }
+}
+static void mali_pm_domain_power_down(u32 power_down_mask,
+ struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS],
+ u32 *num_groups_down,
+ struct mali_l2_cache_core *l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
+ u32 *num_l2_down)
+{
+ u32 domain_bit;
+ u32 notify_mask = power_down_mask;
+
+ MALI_DEBUG_ASSERT(0 != power_down_mask);
+ MALI_DEBUG_ASSERT_POINTER(groups_down);
+ MALI_DEBUG_ASSERT_POINTER(num_groups_down);
+ MALI_DEBUG_ASSERT(0 == *num_groups_down);
+ MALI_DEBUG_ASSERT_POINTER(l2_down);
+ MALI_DEBUG_ASSERT_POINTER(num_l2_down);
+ MALI_DEBUG_ASSERT(0 == *num_l2_down);
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
+
+ MALI_DEBUG_PRINT(5,
+ ("PM update: Powering down domains: [%s]\n",
+ mali_pm_mask_to_string(power_down_mask)));
+
+ pd_mask_current &= ~power_down_mask;
+
+ domain_bit = _mali_osk_fls(notify_mask);
+ while (0 != domain_bit) {
+ u32 domain_id = domain_bit - 1;
+ struct mali_pm_domain *domain =
+ mali_pm_domain_get_from_index(domain_id);
+ struct mali_l2_cache_core *l2_cache;
+ struct mali_l2_cache_core *l2_cache_tmp;
+ struct mali_group *group;
+ struct mali_group *group_tmp;
+
+ /* Mark domain as powered down */
+ mali_pm_domain_set_power_on(domain, MALI_FALSE);
+
+ /*
+ * Make a note of the L2s and/or groups to notify
+ * (need to release the PM state lock before doing so)
+ */
+
+ _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
+ l2_cache_tmp,
+ mali_pm_domain_get_l2_cache_list(domain),
+ struct mali_l2_cache_core,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_l2_down <
+ MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
+ l2_down[*num_l2_down] = l2_cache;
+ (*num_l2_down)++;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group,
+ group_tmp,
+ mali_pm_domain_get_group_list(domain),
+ struct mali_group,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_groups_down <
+ MALI_MAX_NUMBER_OF_GROUPS);
+ groups_down[*num_groups_down] = group;
+ (*num_groups_down)++;
+ }
+
+ /* Remove current bit and find next */
+ notify_mask &= ~(1 << (domain_id));
+ domain_bit = _mali_osk_fls(notify_mask);
+ }
+}
+
+/*
+ * Execute pending power domain changes
+ * pm_lock_exec lock must be taken by caller.
+ */
+static void mali_pm_update_sync_internal(void)
+{
+ /*
+ * This should only be called in non-atomic context
+ * (normally as deferred work)
+ *
+ * Look at the pending power domain changes, and execute these.
+ * Make sure group and schedulers are notified about changes.
+ */
+
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ u32 power_down_mask;
+ u32 power_up_mask;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+
+#if defined(DEBUG)
+ ++num_pm_updates;
+#endif
+
+ /* Hold PM state lock while we look at (and obey) the wanted state */
+ mali_pm_state_lock();
+
+ MALI_DEBUG_PRINT(5, ("PM update pre: Wanted domain mask: .. [%s]\n",
+ mali_pm_mask_to_string(pd_mask_wanted)));
+ MALI_DEBUG_PRINT(5, ("PM update pre: Current domain mask: . [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update pre: Current PMU mask: .... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update pre: Group power stats: ... <%s>\n",
+ mali_pm_group_stats_to_string()));
+
+ /* Figure out which cores we need to power on */
+ power_up_mask = pd_mask_wanted &
+ (pd_mask_wanted ^ pd_mask_current);
+
+ if (0 != power_up_mask) {
+ u32 power_up_mask_pmu;
+ struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS];
+ u32 num_groups_up = 0;
+ struct mali_l2_cache_core *
+ l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+ u32 num_l2_up = 0;
+ u32 i;
+
+#if defined(DEBUG)
+ ++num_pm_updates_up;
+#endif
+
+ /*
+ * Make sure dummy/global domain is always included when
+ * powering up, since this is controlled by runtime PM,
+ * and device power is on at this stage.
+ */
+ power_up_mask |= MALI_PM_DOMAIN_DUMMY_MASK;
+
+ /* Power up only real PMU domains */
+ power_up_mask_pmu = power_up_mask & ~MALI_PM_DOMAIN_DUMMY_MASK;
+
+ /* But not those that happen to be powered on already */
+ power_up_mask_pmu &= (power_up_mask ^ pmu_mask_current) &
+ power_up_mask;
+
+ if (0 != power_up_mask_pmu) {
+ MALI_DEBUG_ASSERT(NULL != pmu);
+ pmu_mask_current |= power_up_mask_pmu;
+ mali_pmu_power_up(pmu, power_up_mask_pmu);
+ }
+
+ /*
+ * Put the domains themselves in power up state.
+ * We get the groups and L2s to notify in return.
+ */
+ mali_pm_domain_power_up(power_up_mask,
+ groups_up, &num_groups_up,
+ l2_up, &num_l2_up);
+
+ /* Need to unlock PM state lock before notifying L2 + groups */
+ mali_pm_state_unlock();
+
+ /* Notify each L2 cache that we have be powered up */
+ for (i = 0; i < num_l2_up; i++) {
+ mali_l2_cache_power_up(l2_up[i]);
+ }
+
+ /*
+ * Tell execution module about all the groups we have
+ * powered up. Groups will be notified as a result of this.
+ */
+ mali_executor_group_power_up(groups_up, num_groups_up);
+
+ /* Lock state again before checking for power down */
+ mali_pm_state_lock();
+ }
+
+ /* Figure out which cores we need to power off */
+ power_down_mask = pd_mask_current &
+ (pd_mask_wanted ^ pd_mask_current);
+
+ /*
+ * Never power down the dummy/global domain here. This is to be done
+ * from a suspend request (since this domain is only physicall powered
+ * down at that point)
+ */
+ power_down_mask &= ~MALI_PM_DOMAIN_DUMMY_MASK;
+
+ if (0 != power_down_mask) {
+ u32 power_down_mask_pmu;
+ struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
+ u32 num_groups_down = 0;
+ struct mali_l2_cache_core *
+ l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+ u32 num_l2_down = 0;
+ u32 i;
+
+#if defined(DEBUG)
+ ++num_pm_updates_down;
+#endif
+
+ /*
+ * Put the domains themselves in power down state.
+ * We get the groups and L2s to notify in return.
+ */
+ mali_pm_domain_power_down(power_down_mask,
+ groups_down, &num_groups_down,
+ l2_down, &num_l2_down);
+
+ /* Need to unlock PM state lock before notifying L2 + groups */
+ mali_pm_state_unlock();
+
+ /*
+ * Tell execution module about all the groups we will be
+ * powering down. Groups will be notified as a result of this.
+ */
+ if (0 < num_groups_down) {
+ mali_executor_group_power_down(groups_down, num_groups_down);
+ }
+
+ /* Notify each L2 cache that we will be powering down */
+ for (i = 0; i < num_l2_down; i++) {
+ mali_l2_cache_power_down(l2_down[i]);
+ }
+
+ /*
+ * Power down only PMU domains which should not stay on
+ * Some domains might for instance currently be incorrectly
+ * powered up if default domain power state is all on.
+ */
+ power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
+
+ if (0 != power_down_mask_pmu) {
+ MALI_DEBUG_ASSERT(NULL != pmu);
+ pmu_mask_current &= ~power_down_mask_pmu;
+ mali_pmu_power_down(pmu, power_down_mask_pmu);
+
+ }
+ } else {
+ /*
+ * Power down only PMU domains which should not stay on
+ * Some domains might for instance currently be incorrectly
+ * powered up if default domain power state is all on.
+ */
+ u32 power_down_mask_pmu;
+
+ /* No need for state lock since we'll only update PMU */
+ mali_pm_state_unlock();
+
+ power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
+
+ if (0 != power_down_mask_pmu) {
+ MALI_DEBUG_ASSERT(NULL != pmu);
+ pmu_mask_current &= ~power_down_mask_pmu;
+ mali_pmu_power_down(pmu, power_down_mask_pmu);
+ }
+ }
+
+ MALI_DEBUG_PRINT(5, ("PM update post: Current domain mask: . [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update post: Current PMU mask: .... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update post: Group power stats: ... <%s>\n",
+ mali_pm_group_stats_to_string()));
+}
+
+static mali_bool mali_pm_common_suspend(void)
+{
+ mali_pm_state_lock();
+
+ if (0 != pd_mask_wanted) {
+ MALI_DEBUG_PRINT(5, ("PM: Aborting suspend operation\n\n\n"));
+ mali_pm_state_unlock();
+ return MALI_FALSE;
+ }
+
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Wanted domain mask: .. [%s]\n",
+ mali_pm_mask_to_string(pd_mask_wanted)));
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Current domain mask: . [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Current PMU mask: .... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Group power stats: ... <%s>\n",
+ mali_pm_group_stats_to_string()));
+
+ if (0 != pd_mask_current) {
+ /*
+ * We have still some domains powered on.
+ * It is for instance very normal that at least the
+ * dummy/global domain is marked as powered on at this point.
+ * (because it is physically powered on until this function
+ * returns)
+ */
+
+ struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
+ u32 num_groups_down = 0;
+ struct mali_l2_cache_core *
+ l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+ u32 num_l2_down = 0;
+ u32 i;
+
+ /*
+ * Put the domains themselves in power down state.
+ * We get the groups and L2s to notify in return.
+ */
+ mali_pm_domain_power_down(pd_mask_current,
+ groups_down,
+ &num_groups_down,
+ l2_down,
+ &num_l2_down);
+
+ MALI_DEBUG_ASSERT(0 == pd_mask_current);
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+ /* Need to unlock PM state lock before notifying L2 + groups */
+ mali_pm_state_unlock();
+
+ /*
+ * Tell execution module about all the groups we will be
+ * powering down. Groups will be notified as a result of this.
+ */
+ if (0 < num_groups_down) {
+ mali_executor_group_power_down(groups_down, num_groups_down);
+ }
+
+ /* Notify each L2 cache that we will be powering down */
+ for (i = 0; i < num_l2_down; i++) {
+ mali_l2_cache_power_down(l2_down[i]);
+ }
+
+ pmu_mask_current = 0;
+ } else {
+ MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+ mali_pm_state_unlock();
+ }
+
+ MALI_DEBUG_PRINT(5, ("PM suspend post: Current domain mask: [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend post: Current PMU mask: ... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend post: Group power stats: .. <%s>\n",
+ mali_pm_group_stats_to_string()));
+
+ return MALI_TRUE;
+}
+
+static void mali_pm_update_work(void *data)
+{
+ MALI_IGNORE(data);
+ mali_pm_update_sync();
+}
+
+static _mali_osk_errcode_t mali_pm_create_pm_domains(void)
+{
+ int i;
+
+ /* Create all domains (including dummy domain) */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0x0 == domain_config[i]) continue;
+
+ if (NULL == mali_pm_domain_create(domain_config[i])) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_pm_set_default_pm_domain_config(void)
+{
+ MALI_DEBUG_ASSERT(0 != _mali_osk_resource_base_address());
+
+ /* GP core */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_GP, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_GP] = 0x01;
+ }
+
+ /* PP0 - PP3 core */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP0, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 2;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 1;
+ } else if (mali_is_mali470()) {
+ domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 0;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP1, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 3;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 2;
+ } else if (mali_is_mali470()) {
+ domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 1;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP2, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 4;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 2;
+ } else if (mali_is_mali470()) {
+ domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 1;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP3, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 5;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 2;
+ } else if (mali_is_mali470()) {
+ domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 1;
+ }
+ }
+
+ /* PP4 - PP7 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP4, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP4] = 0x01 << 3;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP5, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP5] = 0x01 << 3;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP6, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP6] = 0x01 << 3;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP7, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP7] = 0x01 << 3;
+ }
+
+ /* L2gp/L2PP0/L2PP4 */
+ if (mali_is_mali400()) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI400_OFFSET_L2_CACHE0, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 1;
+ }
+ } else if (mali_is_mali450()) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI450_OFFSET_L2_CACHE0, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 0;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI450_OFFSET_L2_CACHE1, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 1;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI450_OFFSET_L2_CACHE2, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L22] = 0x01 << 3;
+ }
+ } else if (mali_is_mali470()) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI470_OFFSET_L2_CACHE1, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 0;
+ }
+ }
+}
+
+static u32 mali_pm_get_registered_cores_mask(void)
+{
+ int i = 0;
+ u32 mask = 0;
+
+ for (i = 0; i < MALI_DOMAIN_INDEX_DUMMY; i++) {
+ mask |= domain_config[i];
+ }
+
+ return mask;
+}
+
+static void mali_pm_set_pmu_domain_config(void)
+{
+ int i = 0;
+
+ _mali_osk_device_data_pmu_config_get(domain_config, MALI_MAX_NUMBER_OF_DOMAINS - 1);
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) {
+ if (0 != domain_config[i]) {
+ MALI_DEBUG_PRINT(2, ("Using customer pmu config:\n"));
+ break;
+ }
+ }
+
+ if (MALI_MAX_NUMBER_OF_DOMAINS - 1 == i) {
+ MALI_DEBUG_PRINT(2, ("Using hw detect pmu config:\n"));
+ mali_pm_set_default_pm_domain_config();
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) {
+ if (domain_config[i]) {
+ MALI_DEBUG_PRINT(2, ("domain_config[%d] = 0x%x \n", i, domain_config[i]));
+ }
+ }
+ /* Can't override dummy domain mask */
+ domain_config[MALI_DOMAIN_INDEX_DUMMY] =
+ 1 << MALI_DOMAIN_INDEX_DUMMY;
+}
+
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask)
+{
+ static char bit_str[MALI_MAX_NUMBER_OF_DOMAINS + 1];
+ int bit;
+ int str_pos = 0;
+
+ /* Must be protected by lock since we use shared string buffer */
+ if (NULL != pm_lock_exec) {
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ }
+
+ for (bit = MALI_MAX_NUMBER_OF_DOMAINS - 1; bit >= 0; bit--) {
+ if (mask & (1 << bit)) {
+ bit_str[str_pos] = 'X';
+ } else {
+ bit_str[str_pos] = '-';
+ }
+ str_pos++;
+ }
+
+ bit_str[MALI_MAX_NUMBER_OF_DOMAINS] = '\0';
+
+ return bit_str;
+}
+
+const char *mali_pm_group_stats_to_string(void)
+{
+ static char bit_str[MALI_MAX_NUMBER_OF_GROUPS + 1];
+ u32 num_groups = mali_group_get_glob_num_groups();
+ u32 i;
+
+ /* Must be protected by lock since we use shared string buffer */
+ if (NULL != pm_lock_exec) {
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ }
+
+ for (i = 0; i < num_groups && i < MALI_MAX_NUMBER_OF_GROUPS; i++) {
+ struct mali_group *group;
+
+ group = mali_group_get_glob_group(i);
+
+ if (MALI_TRUE == mali_group_power_is_on(group)) {
+ bit_str[i] = 'X';
+ } else {
+ bit_str[i] = '-';
+ }
+ }
+
+ bit_str[i] = '\0';
+
+ return bit_str;
+}
+#endif
+
+/*
+ * num_pp is the number of PP cores which will be powered on given this mask
+ * cost is the total power cost of cores which will be powered on given this mask
+ */
+static void mali_pm_stat_from_mask(u32 mask, u32 *num_pp, u32 *cost)
+{
+ u32 i;
+
+ /* loop through all cores */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (!(domain_config[i] & mask)) {
+ continue;
+ }
+
+ switch (i) {
+ case MALI_DOMAIN_INDEX_GP:
+ *cost += MALI_GP_COST;
+
+ break;
+ case MALI_DOMAIN_INDEX_PP0: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP1: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP2: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP3:
+ if (mali_is_mali400()) {
+ if ((domain_config[MALI_DOMAIN_INDEX_L20] & mask)
+ || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+ == domain_config[MALI_DOMAIN_INDEX_L20])) {
+ *num_pp += 1;
+ }
+ } else {
+ if ((domain_config[MALI_DOMAIN_INDEX_L21] & mask)
+ || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+ == domain_config[MALI_DOMAIN_INDEX_L21])) {
+ *num_pp += 1;
+ }
+ }
+
+ *cost += MALI_PP_COST;
+ break;
+ case MALI_DOMAIN_INDEX_PP4: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP5: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP6: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP7:
+ MALI_DEBUG_ASSERT(mali_is_mali450());
+
+ if ((domain_config[MALI_DOMAIN_INDEX_L22] & mask)
+ || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+ == domain_config[MALI_DOMAIN_INDEX_L22])) {
+ *num_pp += 1;
+ }
+
+ *cost += MALI_PP_COST;
+ break;
+ case MALI_DOMAIN_INDEX_L20: /* Fall through */
+ case MALI_DOMAIN_INDEX_L21: /* Fall through */
+ case MALI_DOMAIN_INDEX_L22:
+ *cost += MALI_L2_COST;
+
+ break;
+ }
+ }
+}
+
+void mali_pm_power_cost_setup(void)
+{
+ /*
+ * Two parallel arrays which store the best domain mask and its cost
+ * The index is the number of PP cores, E.g. Index 0 is for 1 PP option,
+ * might have mask 0x2 and with cost of 1, lower cost is better
+ */
+ u32 best_mask[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
+ u32 best_cost[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
+ /* Array cores_in_domain is used to store the total pp cores in each pm domain. */
+ u32 cores_in_domain[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+ /* Domain_count is used to represent the max domain we have.*/
+ u32 max_domain_mask = 0;
+ u32 max_domain_id = 0;
+ u32 always_on_pp_cores = 0;
+
+ u32 num_pp, cost, mask;
+ u32 i, j , k;
+
+ /* Initialize statistics */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; i++) {
+ best_mask[i] = 0;
+ best_cost[i] = 0xFFFFFFFF; /* lower cost is better */
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1; i++) {
+ for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
+ mali_pm_domain_power_cost_result[i][j] = 0;
+ }
+ }
+
+ /* Caculate number of pp cores of a given domain config. */
+ for (i = MALI_DOMAIN_INDEX_PP0; i <= MALI_DOMAIN_INDEX_PP7; i++) {
+ if (0 < domain_config[i]) {
+ /* Get the max domain mask value used to caculate power cost
+ * and we don't count in always on pp cores. */
+ if (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i]
+ && max_domain_mask < domain_config[i]) {
+ max_domain_mask = domain_config[i];
+ }
+
+ if (MALI_PM_DOMAIN_DUMMY_MASK == domain_config[i]) {
+ always_on_pp_cores++;
+ }
+ }
+ }
+ max_domain_id = _mali_osk_fls(max_domain_mask);
+
+ /*
+ * Try all combinations of power domains and check how many PP cores
+ * they have and their power cost.
+ */
+ for (mask = 0; mask < (1 << max_domain_id); mask++) {
+ num_pp = 0;
+ cost = 0;
+
+ mali_pm_stat_from_mask(mask, &num_pp, &cost);
+
+ /* This mask is usable for all MP1 up to num_pp PP cores, check statistics for all */
+ for (i = 0; i < num_pp; i++) {
+ if (best_cost[i] >= cost) {
+ best_cost[i] = cost;
+ best_mask[i] = mask;
+ }
+ }
+ }
+
+ /*
+ * If we want to enable x pp cores, if x is less than number of always_on pp cores,
+ * all of pp cores we will enable must be always_on pp cores.
+ */
+ for (i = 0; i < mali_executor_get_num_cores_total(); i++) {
+ if (i < always_on_pp_cores) {
+ mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
+ = i + 1;
+ } else {
+ mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
+ = always_on_pp_cores;
+ }
+ }
+
+ /* In this loop, variable i represent for the number of non-always on pp cores we want to enabled. */
+ for (i = 0; i < (mali_executor_get_num_cores_total() - always_on_pp_cores); i++) {
+ if (best_mask[i] == 0) {
+ /* This MP variant is not available */
+ continue;
+ }
+
+ for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
+ cores_in_domain[j] = 0;
+ }
+
+ for (j = MALI_DOMAIN_INDEX_PP0; j <= MALI_DOMAIN_INDEX_PP7; j++) {
+ if (0 < domain_config[j]
+ && (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i])) {
+ cores_in_domain[_mali_osk_fls(domain_config[j]) - 1]++;
+ }
+ }
+
+ /* In this loop, j represent for the number we have already enabled.*/
+ for (j = 0; j <= i;) {
+ /* j used to visit all of domain to get the number of pp cores remained in it. */
+ for (k = 0; k < max_domain_id; k++) {
+ /* If domain k in best_mask[i] is enabled and this domain has extra pp cores,
+ * we know we must pick at least one pp core from this domain.
+ * And then we move to next enabled pm domain. */
+ if ((best_mask[i] & (0x1 << k)) && (0 < cores_in_domain[k])) {
+ cores_in_domain[k]--;
+ mali_pm_domain_power_cost_result[always_on_pp_cores + i + 1][k]++;
+ j++;
+ if (j > i) {
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ * When we are doing core scaling,
+ * this function is called to return the best mask to
+ * achieve the best pp group power cost.
+ */
+void mali_pm_get_best_power_cost_mask(int num_requested, int *dst)
+{
+ MALI_DEBUG_ASSERT((mali_executor_get_num_cores_total() >= num_requested) && (0 <= num_requested));
+
+ _mali_osk_memcpy(dst, mali_pm_domain_power_cost_result[num_requested], MALI_MAX_NUMBER_OF_DOMAINS * sizeof(int));
+}
+
+u32 mali_pm_get_current_mask(void)
+{
+ return pd_mask_current;
+}
+
+u32 mali_pm_get_wanted_mask(void)
+{
+ return pd_mask_wanted;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_pm.h b/drivers/gpu/arm/utgard/common/mali_pm.h
new file mode 100644
index 000000000000..dac69958e034
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pm.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_H__
+#define __MALI_PM_H__
+
+#include "mali_osk.h"
+#include "mali_pm_domain.h"
+
+#define MALI_DOMAIN_INDEX_GP 0
+#define MALI_DOMAIN_INDEX_PP0 1
+#define MALI_DOMAIN_INDEX_PP1 2
+#define MALI_DOMAIN_INDEX_PP2 3
+#define MALI_DOMAIN_INDEX_PP3 4
+#define MALI_DOMAIN_INDEX_PP4 5
+#define MALI_DOMAIN_INDEX_PP5 6
+#define MALI_DOMAIN_INDEX_PP6 7
+#define MALI_DOMAIN_INDEX_PP7 8
+#define MALI_DOMAIN_INDEX_L20 9
+#define MALI_DOMAIN_INDEX_L21 10
+#define MALI_DOMAIN_INDEX_L22 11
+/*
+ * The dummy domain is used when there is no physical power domain
+ * (e.g. no PMU or always on cores)
+ */
+#define MALI_DOMAIN_INDEX_DUMMY 12
+#define MALI_MAX_NUMBER_OF_DOMAINS 13
+
+/**
+ * Initialize the Mali PM module
+ *
+ * PM module covers Mali PM core, PM domains and Mali PMU
+ */
+_mali_osk_errcode_t mali_pm_initialize(void);
+
+/**
+ * Terminate the Mali PM module
+ */
+void mali_pm_terminate(void);
+
+void mali_pm_exec_lock(void);
+void mali_pm_exec_unlock(void);
+
+
+struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index,
+ struct mali_l2_cache_core *l2_cache);
+struct mali_pm_domain *mali_pm_register_group(u32 domain_index,
+ struct mali_group *group);
+
+mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains,
+ struct mali_group **groups,
+ u32 num_domains);
+mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains,
+ u32 num_domains);
+
+void mali_pm_init_begin(void);
+void mali_pm_init_end(void);
+
+void mali_pm_update_sync(void);
+void mali_pm_update_async(void);
+
+/* Callback functions for system power management */
+void mali_pm_os_suspend(mali_bool os_suspend);
+void mali_pm_os_resume(void);
+
+mali_bool mali_pm_runtime_suspend(void);
+void mali_pm_runtime_resume(void);
+
+#if MALI_STATE_TRACKING
+u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain,
+ char *buf, u32 size);
+#endif
+
+void mali_pm_power_cost_setup(void);
+
+void mali_pm_get_best_power_cost_mask(int num_requested, int *dst);
+
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask);
+#endif
+
+u32 mali_pm_get_current_mask(void);
+u32 mali_pm_get_wanted_mask(void);
+#endif /* __MALI_PM_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_pm_domain.c b/drivers/gpu/arm/utgard/common/mali_pm_domain.c
new file mode 100644
index 000000000000..8290f7d88f6a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pm_domain.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+
+static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] =
+{ NULL, };
+
+void mali_pm_domain_initialize(void)
+{
+ /* Domains will be initialized/created on demand */
+}
+
+void mali_pm_domain_terminate(void)
+{
+ int i;
+
+ /* Delete all domains that has been created */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ mali_pm_domain_delete(mali_pm_domains[i]);
+ mali_pm_domains[i] = NULL;
+ }
+}
+
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask)
+{
+ struct mali_pm_domain *domain = NULL;
+ u32 domain_id = 0;
+
+ domain = mali_pm_domain_get_from_mask(pmu_mask);
+ if (NULL != domain) return domain;
+
+ MALI_DEBUG_PRINT(2,
+ ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n",
+ pmu_mask));
+
+ domain = (struct mali_pm_domain *)_mali_osk_malloc(
+ sizeof(struct mali_pm_domain));
+ if (NULL != domain) {
+ domain->power_is_on = MALI_FALSE;
+ domain->pmu_mask = pmu_mask;
+ domain->use_count = 0;
+ _mali_osk_list_init(&domain->group_list);
+ _mali_osk_list_init(&domain->l2_cache_list);
+
+ domain_id = _mali_osk_fls(pmu_mask) - 1;
+ /* Verify the domain_id */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > domain_id);
+ /* Verify that pmu_mask only one bit is set */
+ MALI_DEBUG_ASSERT((1 << domain_id) == pmu_mask);
+ mali_pm_domains[domain_id] = domain;
+
+ return domain;
+ } else {
+ MALI_DEBUG_PRINT_ERROR(("Unable to create PM domain\n"));
+ }
+
+ return NULL;
+}
+
+void mali_pm_domain_delete(struct mali_pm_domain *domain)
+{
+ if (NULL == domain) {
+ return;
+ }
+
+ _mali_osk_list_delinit(&domain->group_list);
+ _mali_osk_list_delinit(&domain->l2_cache_list);
+
+ _mali_osk_free(domain);
+}
+
+void mali_pm_domain_add_group(struct mali_pm_domain *domain,
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ /*
+ * Use addtail because virtual group is created last and it needs
+ * to be at the end of the list (in order to be activated after
+ * all children.
+ */
+ _mali_osk_list_addtail(&group->pm_domain_list, &domain->group_list);
+}
+
+void mali_pm_domain_add_l2_cache(struct mali_pm_domain *domain,
+ struct mali_l2_cache_core *l2_cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ MALI_DEBUG_ASSERT_POINTER(l2_cache);
+ _mali_osk_list_add(&l2_cache->pm_domain_list, &domain->l2_cache_list);
+}
+
+struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask)
+{
+ u32 id = 0;
+
+ if (0 == mask) {
+ return NULL;
+ }
+
+ id = _mali_osk_fls(mask) - 1;
+
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+ /* Verify that pmu_mask only one bit is set */
+ MALI_DEBUG_ASSERT((1 << id) == mask);
+
+ return mali_pm_domains[id];
+}
+
+struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id)
+{
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+
+ return mali_pm_domains[id];
+}
+
+u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+
+ if (0 == domain->use_count) {
+ _mali_osk_pm_dev_ref_get_async();
+ }
+
+ ++domain->use_count;
+ MALI_DEBUG_PRINT(4, ("PM domain %p: ref_get, use_count => %u\n", domain, domain->use_count));
+
+ /* Return our mask so caller can check this against wanted mask */
+ return domain->pmu_mask;
+}
+
+u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+
+ --domain->use_count;
+ MALI_DEBUG_PRINT(4, ("PM domain %p: ref_put, use_count => %u\n", domain, domain->use_count));
+
+ if (0 == domain->use_count) {
+ _mali_osk_pm_dev_ref_put();
+ }
+
+ /*
+ * Return the PMU mask which now could be be powered down
+ * (the bit for this domain).
+ * This is the responsibility of the caller (mali_pm)
+ */
+ return (0 == domain->use_count ? domain->pmu_mask : 0);
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pm_domain_get_id(struct mali_pm_domain *domain)
+{
+ u32 id = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ MALI_DEBUG_ASSERT(0 != domain->pmu_mask);
+
+ id = _mali_osk_fls(domain->pmu_mask) - 1;
+
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+ /* Verify that pmu_mask only one bit is set */
+ MALI_DEBUG_ASSERT((1 << id) == domain->pmu_mask);
+ /* Verify that we have stored the domain at right id/index */
+ MALI_DEBUG_ASSERT(domain == mali_pm_domains[id]);
+
+ return id;
+}
+#endif
+
+#if defined(DEBUG)
+mali_bool mali_pm_domain_all_unused(void)
+{
+ int i;
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (NULL == mali_pm_domains[i]) {
+ /* Nothing to check */
+ continue;
+ }
+
+ if (MALI_TRUE == mali_pm_domains[i]->power_is_on) {
+ /* Not ready for suspend! */
+ return MALI_FALSE;
+ }
+
+ if (0 != mali_pm_domains[i]->use_count) {
+ /* Not ready for suspend! */
+ return MALI_FALSE;
+ }
+ }
+
+ return MALI_TRUE;
+}
+#endif
diff --git a/drivers/gpu/arm/utgard/common/mali_pm_domain.h b/drivers/gpu/arm/utgard/common/mali_pm_domain.h
new file mode 100644
index 000000000000..5776abe39f3d
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pm_domain.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_DOMAIN_H__
+#define __MALI_PM_DOMAIN_H__
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+#include "mali_l2_cache.h"
+#include "mali_group.h"
+#include "mali_pmu.h"
+
+/* Instances are protected by PM state lock */
+struct mali_pm_domain {
+ mali_bool power_is_on;
+ s32 use_count;
+ u32 pmu_mask;
+
+ /* Zero or more groups can belong to this domain */
+ _mali_osk_list_t group_list;
+
+ /* Zero or more L2 caches can belong to this domain */
+ _mali_osk_list_t l2_cache_list;
+};
+
+
+void mali_pm_domain_initialize(void);
+void mali_pm_domain_terminate(void);
+
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask);
+void mali_pm_domain_delete(struct mali_pm_domain *domain);
+
+void mali_pm_domain_add_l2_cache(
+ struct mali_pm_domain *domain,
+ struct mali_l2_cache_core *l2_cache);
+void mali_pm_domain_add_group(struct mali_pm_domain *domain,
+ struct mali_group *group);
+
+struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask);
+struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id);
+
+/* Ref counting */
+u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain);
+u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_group_list(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return &domain->group_list;
+}
+
+MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_l2_cache_list(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return &domain->l2_cache_list;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pm_domain_power_is_on(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return domain->power_is_on;
+}
+
+MALI_STATIC_INLINE void mali_pm_domain_set_power_on(
+ struct mali_pm_domain *domain,
+ mali_bool power_is_on)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ domain->power_is_on = power_is_on;
+}
+
+MALI_STATIC_INLINE u32 mali_pm_domain_get_use_count(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return domain->use_count;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pm_domain_get_id(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE u32 mali_pm_domain_get_mask(struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return domain->pmu_mask;
+}
+#endif
+
+#if defined(DEBUG)
+mali_bool mali_pm_domain_all_unused(void);
+#endif
+
+#endif /* __MALI_PM_DOMAIN_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_pm_metrics.c b/drivers/gpu/arm/utgard/common/mali_pm_metrics.c
new file mode 100644
index 000000000000..cf74823230f7
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pm_metrics.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_pm_metrics.h"
+#include "mali_osk_locks.h"
+#include "mali_osk_mali.h"
+#include <linux/ktime.h>
+
+#define MALI_PM_TIME_SHIFT 0
+#define MALI_UTILIZATION_MAX_PERIOD 80000000/* ns = 100ms */
+
+_mali_osk_errcode_t mali_pm_metrics_init(struct mali_device *mdev)
+{
+ int i = 0;
+
+ MALI_DEBUG_ASSERT(mdev != NULL);
+
+ mdev->mali_metrics.time_period_start = ktime_get();
+ mdev->mali_metrics.time_period_start_gp = mdev->mali_metrics.time_period_start;
+ mdev->mali_metrics.time_period_start_pp = mdev->mali_metrics.time_period_start;
+
+ mdev->mali_metrics.time_busy = 0;
+ mdev->mali_metrics.time_idle = 0;
+ mdev->mali_metrics.prev_busy = 0;
+ mdev->mali_metrics.prev_idle = 0;
+ mdev->mali_metrics.num_running_gp_cores = 0;
+ mdev->mali_metrics.num_running_pp_cores = 0;
+ mdev->mali_metrics.time_busy_gp = 0;
+ mdev->mali_metrics.time_idle_gp = 0;
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; i++) {
+ mdev->mali_metrics.time_busy_pp[i] = 0;
+ mdev->mali_metrics.time_idle_pp[i] = 0;
+ }
+ mdev->mali_metrics.gpu_active = MALI_FALSE;
+
+ mdev->mali_metrics.lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_UNORDERED, _MALI_OSK_LOCK_ORDER_FIRST);
+ if (NULL == mdev->mali_metrics.lock) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pm_metrics_term(struct mali_device *mdev)
+{
+ _mali_osk_spinlock_irq_term(mdev->mali_metrics.lock);
+}
+
+/*caller needs to hold mdev->mali_metrics.lock before calling this function*/
+void mali_pm_record_job_status(struct mali_device *mdev)
+{
+ ktime_t now;
+ ktime_t diff;
+ u64 ns_time;
+
+ MALI_DEBUG_ASSERT(mdev != NULL);
+
+ now = ktime_get();
+ diff = ktime_sub(now, mdev->mali_metrics.time_period_start);
+
+ ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+ mdev->mali_metrics.time_busy += ns_time;
+ mdev->mali_metrics.time_period_start = now;
+}
+
+void mali_pm_record_gpu_idle(mali_bool is_gp)
+{
+ ktime_t now;
+ ktime_t diff;
+ u64 ns_time;
+ struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev);
+
+ MALI_DEBUG_ASSERT(mdev != NULL);
+
+ _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock);
+ now = ktime_get();
+
+ if (MALI_TRUE == is_gp) {
+ --mdev->mali_metrics.num_running_gp_cores;
+ if (0 == mdev->mali_metrics.num_running_gp_cores) {
+ diff = ktime_sub(now, mdev->mali_metrics.time_period_start_gp);
+ ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+ mdev->mali_metrics.time_busy_gp += ns_time;
+ mdev->mali_metrics.time_period_start_gp = now;
+
+ if (0 == mdev->mali_metrics.num_running_pp_cores) {
+ MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE);
+ diff = ktime_sub(now, mdev->mali_metrics.time_period_start);
+ ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+ mdev->mali_metrics.time_busy += ns_time;
+ mdev->mali_metrics.time_period_start = now;
+ mdev->mali_metrics.gpu_active = MALI_FALSE;
+ }
+ }
+ } else {
+ --mdev->mali_metrics.num_running_pp_cores;
+ if (0 == mdev->mali_metrics.num_running_pp_cores) {
+ diff = ktime_sub(now, mdev->mali_metrics.time_period_start_pp);
+ ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+ mdev->mali_metrics.time_busy_pp[0] += ns_time;
+ mdev->mali_metrics.time_period_start_pp = now;
+
+ if (0 == mdev->mali_metrics.num_running_gp_cores) {
+ MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE);
+ diff = ktime_sub(now, mdev->mali_metrics.time_period_start);
+ ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+ mdev->mali_metrics.time_busy += ns_time;
+ mdev->mali_metrics.time_period_start = now;
+ mdev->mali_metrics.gpu_active = MALI_FALSE;
+ }
+ }
+ }
+
+ _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock);
+}
+
+void mali_pm_record_gpu_active(mali_bool is_gp)
+{
+ ktime_t now;
+ ktime_t diff;
+ struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev);
+
+ MALI_DEBUG_ASSERT(mdev != NULL);
+
+ _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock);
+ now = ktime_get();
+
+ if (MALI_TRUE == is_gp) {
+ mdev->mali_metrics.num_running_gp_cores++;
+ if (1 == mdev->mali_metrics.num_running_gp_cores) {
+ diff = ktime_sub(now, mdev->mali_metrics.time_period_start_gp);
+ mdev->mali_metrics.time_idle_gp += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+ mdev->mali_metrics.time_period_start_gp = now;
+ if (0 == mdev->mali_metrics.num_running_pp_cores) {
+ MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_FALSE);
+ diff = ktime_sub(now, mdev->mali_metrics.time_period_start);
+ mdev->mali_metrics.time_idle += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+ mdev->mali_metrics.time_period_start = now;
+ mdev->mali_metrics.gpu_active = MALI_TRUE;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE);
+ }
+ } else {
+ mdev->mali_metrics.num_running_pp_cores++;
+ if (1 == mdev->mali_metrics.num_running_pp_cores) {
+ diff = ktime_sub(now, mdev->mali_metrics.time_period_start_pp);
+ mdev->mali_metrics.time_idle_pp[0] += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+ mdev->mali_metrics.time_period_start_pp = now;
+ if (0 == mdev->mali_metrics.num_running_gp_cores) {
+ MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_FALSE);
+ diff = ktime_sub(now, mdev->mali_metrics.time_period_start);
+ mdev->mali_metrics.time_idle += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+ mdev->mali_metrics.time_period_start = now;
+ mdev->mali_metrics.gpu_active = MALI_TRUE;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE);
+ }
+ }
+
+ _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock);
+}
+
+
+/*caller needs to hold mdev->mali_metrics.lock before calling this function*/
+static void mali_pm_get_dvfs_utilisation_calc(struct mali_device *mdev, ktime_t now)
+{
+ ktime_t diff;
+
+ MALI_DEBUG_ASSERT(mdev != NULL);
+
+ diff = ktime_sub(now, mdev->mali_metrics.time_period_start);
+
+ if (mdev->mali_metrics.gpu_active) {
+ mdev->mali_metrics.time_busy += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+ } else {
+ mdev->mali_metrics.time_idle += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+ }
+}
+
+/* Caller needs to hold mdev->mali_metrics.lock before calling this function. */
+static void mali_pm_reset_dvfs_utilisation_unlocked(struct mali_device *mdev, ktime_t now)
+{
+ /* Store previous value */
+ mdev->mali_metrics.prev_idle = mdev->mali_metrics.time_idle;
+ mdev->mali_metrics.prev_busy = mdev->mali_metrics.time_busy;
+
+ /* Reset current values */
+ mdev->mali_metrics.time_period_start = now;
+ mdev->mali_metrics.time_period_start_gp = now;
+ mdev->mali_metrics.time_period_start_pp = now;
+ mdev->mali_metrics.time_idle = 0;
+ mdev->mali_metrics.time_busy = 0;
+
+ mdev->mali_metrics.time_busy_gp = 0;
+ mdev->mali_metrics.time_idle_gp = 0;
+ mdev->mali_metrics.time_busy_pp[0] = 0;
+ mdev->mali_metrics.time_idle_pp[0] = 0;
+}
+
+void mali_pm_reset_dvfs_utilisation(struct mali_device *mdev)
+{
+ _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock);
+ mali_pm_reset_dvfs_utilisation_unlocked(mdev, ktime_get());
+ _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock);
+}
+
+void mali_pm_get_dvfs_utilisation(struct mali_device *mdev,
+ unsigned long *total_out, unsigned long *busy_out)
+{
+ ktime_t now = ktime_get();
+ u64 busy = 0;
+ u64 total = 0;
+
+ _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock);
+
+ mali_pm_get_dvfs_utilisation_calc(mdev, now);
+
+ busy = mdev->mali_metrics.time_busy;
+ total = busy + mdev->mali_metrics.time_idle;
+
+ /* Reset stats if older than MALI_UTILIZATION_MAX_PERIOD (default
+ * 100ms) */
+ if (total >= MALI_UTILIZATION_MAX_PERIOD) {
+ mali_pm_reset_dvfs_utilisation_unlocked(mdev, now);
+ } else if (total < (MALI_UTILIZATION_MAX_PERIOD / 2)) {
+ total += mdev->mali_metrics.prev_idle +
+ mdev->mali_metrics.prev_busy;
+ busy += mdev->mali_metrics.prev_busy;
+ }
+
+ *total_out = (unsigned long)total;
+ *busy_out = (unsigned long)busy;
+ _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock);
+}
+
+void mali_pm_metrics_spin_lock(void)
+{
+ struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev);
+ _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock);
+}
+
+void mali_pm_metrics_spin_unlock(void)
+{
+ struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev);
+ _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock);
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_pm_metrics.h b/drivers/gpu/arm/utgard/common/mali_pm_metrics.h
new file mode 100644
index 000000000000..2b136b0de4e3
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pm_metrics.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_METRICS_H__
+#define __MALI_PM_METRICS_H__
+
+#ifdef CONFIG_MALI_DEVFREQ
+#include "mali_osk_locks.h"
+#include "mali_group.h"
+
+struct mali_device;
+
+/**
+ * Metrics data collected for use by the power management framework.
+ */
+struct mali_pm_metrics_data {
+ ktime_t time_period_start;
+ u64 time_busy;
+ u64 time_idle;
+ u64 prev_busy;
+ u64 prev_idle;
+ u32 num_running_gp_cores;
+ u32 num_running_pp_cores;
+ ktime_t time_period_start_gp;
+ u64 time_busy_gp;
+ u64 time_idle_gp;
+ ktime_t time_period_start_pp;
+ u64 time_busy_pp[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ u64 time_idle_pp[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ mali_bool gpu_active;
+ _mali_osk_spinlock_irq_t *lock;
+};
+
+/**
+ * Initialize/start the Mali GPU pm_metrics metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_pm_metrics_init(struct mali_device *mdev);
+
+/**
+ * Terminate the Mali GPU pm_metrics metrics reporting
+ */
+void mali_pm_metrics_term(struct mali_device *mdev);
+
+/**
+ * Should be called when a job is about to execute a GPU job
+ */
+void mali_pm_record_gpu_active(mali_bool is_gp);
+
+/**
+ * Should be called when a job is finished
+ */
+void mali_pm_record_gpu_idle(mali_bool is_gp);
+
+void mali_pm_reset_dvfs_utilisation(struct mali_device *mdev);
+
+void mali_pm_get_dvfs_utilisation(struct mali_device *mdev, unsigned long *total_out, unsigned long *busy_out);
+
+void mali_pm_metrics_spin_lock(void);
+
+void mali_pm_metrics_spin_unlock(void);
+#else
+void mali_pm_record_gpu_idle(mali_bool is_gp) {}
+void mali_pm_record_gpu_active(mali_bool is_gp) {}
+#endif
+#endif /* __MALI_PM_METRICS_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_pmu.c b/drivers/gpu/arm/utgard/common/mali_pmu.c
new file mode 100644
index 000000000000..6f0af59f6fd4
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pmu.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu.c
+ * Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_hw_core.h"
+#include "mali_pmu.h"
+#include "mali_pp.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm.h"
+#include "mali_osk_mali.h"
+
+struct mali_pmu_core *mali_global_pmu_core = NULL;
+
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(
+ struct mali_pmu_core *pmu);
+
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource)
+{
+ struct mali_pmu_core *pmu;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core);
+ MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n"));
+
+ pmu = (struct mali_pmu_core *)_mali_osk_malloc(
+ sizeof(struct mali_pmu_core));
+ if (NULL != pmu) {
+ pmu->registered_cores_mask = 0; /* to be set later */
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core,
+ resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) {
+
+ pmu->switch_delay = _mali_osk_get_pmu_switch_delay();
+
+ mali_global_pmu_core = pmu;
+
+ return pmu;
+ }
+ _mali_osk_free(pmu);
+ }
+
+ return NULL;
+}
+
+void mali_pmu_delete(struct mali_pmu_core *pmu)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu == mali_global_pmu_core);
+
+ MALI_DEBUG_PRINT(2, ("Mali PMU: Deleting Mali PMU core\n"));
+
+ mali_global_pmu_core = NULL;
+
+ mali_hw_core_delete(&pmu->hw_core);
+ _mali_osk_free(pmu);
+}
+
+void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask)
+{
+ pmu->registered_cores_mask = mask;
+}
+
+void mali_pmu_reset(struct mali_pmu_core *pmu)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ /* Setup the desired defaults */
+ mali_hw_core_register_write_relaxed(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ mali_hw_core_register_write_relaxed(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+}
+
+void mali_pmu_power_up_all(struct mali_pmu_core *pmu)
+{
+ u32 stat;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ mali_pm_exec_lock();
+
+ mali_pmu_reset(pmu);
+
+ /* Now simply power up the domains which are marked as powered down */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ mali_pmu_power_up(pmu, stat);
+
+ mali_pm_exec_unlock();
+}
+
+void mali_pmu_power_down_all(struct mali_pmu_core *pmu)
+{
+ u32 stat;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ mali_pm_exec_lock();
+
+ /* Now simply power down the domains which are marked as powered up */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ mali_pmu_power_down(pmu, (~stat) & pmu->registered_cores_mask);
+
+ mali_pm_exec_unlock();
+}
+
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask)
+{
+ u32 stat;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+ MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask);
+ MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT) &
+ PMU_REG_VAL_IRQ));
+
+ MALI_DEBUG_PRINT(3,
+ ("PMU power down: ...................... [%s]\n",
+ mali_pm_mask_to_string(mask)));
+
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+
+ /*
+ * Assert that we are not powering down domains which are already
+ * powered down.
+ */
+ MALI_DEBUG_ASSERT(0 == (stat & mask));
+
+ mask &= ~(0x1 << MALI_DOMAIN_INDEX_DUMMY);
+
+ if (0 == mask || 0 == ((~stat) & mask)) return _MALI_OSK_ERR_OK;
+
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_POWER_DOWN, mask);
+
+ /*
+ * Do not wait for interrupt on Mali-300/400 if all domains are
+ * powered off by our power down command, because the HW will simply
+ * not generate an interrupt in this case.
+ */
+ if (mali_is_mali450() || mali_is_mali470() || pmu->registered_cores_mask != (mask | stat)) {
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ } else {
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+ }
+
+#if defined(DEBUG)
+ /* Verify power status of domains after power down */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ MALI_DEBUG_ASSERT(mask == (stat & mask));
+#endif
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask)
+{
+ u32 stat;
+ _mali_osk_errcode_t err;
+#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+ u32 current_domain;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+ MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask);
+ MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT) &
+ PMU_REG_VAL_IRQ));
+
+ MALI_DEBUG_PRINT(3,
+ ("PMU power up: ........................ [%s]\n",
+ mali_pm_mask_to_string(mask)));
+
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+
+ mask &= ~(0x1 << MALI_DOMAIN_INDEX_DUMMY);
+ if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK;
+
+ /*
+ * Assert that we are only powering up domains which are currently
+ * powered down.
+ */
+ MALI_DEBUG_ASSERT(mask == (stat & mask));
+
+#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_POWER_UP, mask);
+
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+#else
+ for (current_domain = 1;
+ current_domain <= pmu->registered_cores_mask;
+ current_domain <<= 1) {
+ if (current_domain & mask & stat) {
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_POWER_UP,
+ current_domain);
+
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ }
+ }
+#endif
+
+#if defined(DEBUG)
+ /* Verify power status of domains after power up */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ MALI_DEBUG_ASSERT(0 == (stat & mask));
+#endif /* defined(DEBUG) */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(
+ struct mali_pmu_core *pmu)
+{
+ u32 rawstat;
+ u32 timeout = MALI_REG_POLL_COUNT_SLOW;
+
+ MALI_DEBUG_ASSERT(pmu);
+
+ /* Wait for the command to complete */
+ do {
+ rawstat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT);
+ --timeout;
+ } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout);
+
+ MALI_DEBUG_ASSERT(0 < timeout);
+
+ if (0 == timeout) {
+ return _MALI_OSK_ERR_TIMEOUT;
+ }
+
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_pmu.h b/drivers/gpu/arm/utgard/common/mali_pmu.h
new file mode 100644
index 000000000000..5b856240fdac
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pmu.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#ifndef __MALI_PMU_H__
+#define __MALI_PMU_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_hw_core.h"
+
+/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
+ */
+struct mali_pmu_core {
+ struct mali_hw_core hw_core;
+ u32 registered_cores_mask;
+ u32 switch_delay;
+};
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+ PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
+ PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
+ PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
+ PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */
+ PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */
+ PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Switch delay register */
+ PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+#define PMU_REG_VAL_IRQ 1
+
+extern struct mali_pmu_core *mali_global_pmu_core;
+
+/** @brief Initialisation of MALI PMU
+ *
+ * This is called from entry point of the driver in order to create and intialize the PMU resource
+ *
+ * @param resource it will be a pointer to a PMU resource
+ * @param number_of_pp_cores Number of found PP resources in configuration
+ * @param number_of_l2_caches Number of found L2 cache resources in configuration
+ * @return The created PMU object, or NULL in case of failure.
+ */
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource);
+
+/** @brief It deallocates the PMU resource
+ *
+ * This is called on the exit of the driver to terminate the PMU resource
+ *
+ * @param pmu Pointer to PMU core object to delete
+ */
+void mali_pmu_delete(struct mali_pmu_core *pmu);
+
+/** @brief Set registered cores mask
+ *
+ * @param pmu Pointer to PMU core object
+ * @param mask All available/valid domain bits
+ */
+void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief Retrieves the Mali PMU core object (if any)
+ *
+ * @return The Mali PMU object, or NULL if no PMU exists.
+ */
+MALI_STATIC_INLINE struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
+{
+ return mali_global_pmu_core;
+}
+
+/** @brief Reset PMU core
+ *
+ * @param pmu Pointer to PMU core object to reset
+ */
+void mali_pmu_reset(struct mali_pmu_core *pmu);
+
+void mali_pmu_power_up_all(struct mali_pmu_core *pmu);
+
+void mali_pmu_power_down_all(struct mali_pmu_core *pmu);
+
+/** @brief Returns a mask of the currently powered up domains
+ *
+ * @param pmu Pointer to PMU core object
+ */
+MALI_STATIC_INLINE u32 mali_pmu_get_mask(struct mali_pmu_core *pmu)
+{
+ u32 stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ return ((~stat) & pmu->registered_cores_mask);
+}
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * Called to power down the specified cores.
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @param mask Mask specifying which power domains to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * Called to power up the specified cores.
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @param mask Mask specifying which power domains to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask);
+
+#endif /* __MALI_PMU_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_pp.c b/drivers/gpu/arm/utgard/common/mali_pp.c
new file mode 100644
index 000000000000..2dd8b8766f8e
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pp.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp_job.h"
+#include "mali_pp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+/* Number of frame registers on Mali-200 */
+#define MALI_PP_MALI200_NUM_FRAME_REGISTERS ((0x04C/4)+1)
+/* Number of frame registers on Mali-300 and later */
+#define MALI_PP_MALI400_NUM_FRAME_REGISTERS ((0x058/4)+1)
+
+static struct mali_pp_core *mali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES] = { NULL };
+static u32 mali_global_num_pp_cores = 0;
+
+/* Interrupt handlers */
+static void mali_pp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id)
+{
+ struct mali_pp_core *core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base));
+
+ if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES) {
+ MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n"));
+ return NULL;
+ }
+
+ core = _mali_osk_calloc(1, sizeof(struct mali_pp_core));
+ if (NULL != core) {
+ core->core_id = mali_global_num_pp_cores;
+ core->bcast_id = bcast_id;
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK)) {
+ _mali_osk_errcode_t ret;
+
+ if (!is_virtual) {
+ ret = mali_pp_reset(core);
+ } else {
+ ret = _MALI_OSK_ERR_OK;
+ }
+
+ if (_MALI_OSK_ERR_OK == ret) {
+ ret = mali_group_add_pp_core(group, core);
+ if (_MALI_OSK_ERR_OK == ret) {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ MALI_DEBUG_ASSERT(!is_virtual || -1 != resource->irq);
+
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_pp,
+ group,
+ mali_pp_irq_probe_trigger,
+ mali_pp_irq_probe_ack,
+ core,
+ resource->description);
+ if (NULL != core->irq) {
+ mali_global_pp_cores[mali_global_num_pp_cores] = core;
+ mali_global_num_pp_cores++;
+
+ return core;
+ } else {
+ MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description));
+ }
+ mali_group_remove_pp_core(group);
+ } else {
+ MALI_PRINT_ERROR(("Mali PP: Failed to add core %s to group\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ } else {
+ MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_pp_delete(struct mali_pp_core *core)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+
+ /* Remove core from global list */
+ for (i = 0; i < mali_global_num_pp_cores; i++) {
+ if (mali_global_pp_cores[i] == core) {
+ mali_global_pp_cores[i] = NULL;
+ mali_global_num_pp_cores--;
+
+ if (i != mali_global_num_pp_cores) {
+ /* We removed a PP core from the middle of the array -- move the last
+ * PP core to the current position to close the gap */
+ mali_global_pp_cores[i] = mali_global_pp_cores[mali_global_num_pp_cores];
+ mali_global_pp_cores[mali_global_num_pp_cores] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ _mali_osk_free(core);
+}
+
+void mali_pp_stop_bus(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ /* Will only send the stop bus command, and not wait for it to complete */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core)
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Send the stop bus command. */
+ mali_pp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
+ break;
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali PP: Failed to stop bus on %s. Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Frame register reset values.
+ * Taken from the Mali400 TRM, 3.6. Pixel processor control register summary */
+static const u32 mali_frame_registers_reset_values[_MALI_PP_MAX_FRAME_REGISTERS] = {
+ 0x0, /* Renderer List Address Register */
+ 0x0, /* Renderer State Word Base Address Register */
+ 0x0, /* Renderer Vertex Base Register */
+ 0x2, /* Feature Enable Register */
+ 0x0, /* Z Clear Value Register */
+ 0x0, /* Stencil Clear Value Register */
+ 0x0, /* ABGR Clear Value 0 Register */
+ 0x0, /* ABGR Clear Value 1 Register */
+ 0x0, /* ABGR Clear Value 2 Register */
+ 0x0, /* ABGR Clear Value 3 Register */
+ 0x0, /* Bounding Box Left Right Register */
+ 0x0, /* Bounding Box Bottom Register */
+ 0x0, /* FS Stack Address Register */
+ 0x0, /* FS Stack Size and Initial Value Register */
+ 0x0, /* Reserved */
+ 0x0, /* Reserved */
+ 0x0, /* Origin Offset X Register */
+ 0x0, /* Origin Offset Y Register */
+ 0x75, /* Subpixel Specifier Register */
+ 0x0, /* Tiebreak mode Register */
+ 0x0, /* Polygon List Format Register */
+ 0x0, /* Scaling Register */
+ 0x0 /* Tilebuffer configuration Register */
+};
+
+/* WBx register reset values */
+static const u32 mali_wb_registers_reset_values[_MALI_PP_MAX_WB_REGISTERS] = {
+ 0x0, /* WBx Source Select Register */
+ 0x0, /* WBx Target Address Register */
+ 0x0, /* WBx Target Pixel Format Register */
+ 0x0, /* WBx Target AA Format Register */
+ 0x0, /* WBx Target Layout */
+ 0x0, /* WBx Target Scanline Length */
+ 0x0, /* WBx Target Flags Register */
+ 0x0, /* WBx MRT Enable Register */
+ 0x0, /* WBx MRT Offset Register */
+ 0x0, /* WBx Global Test Enable Register */
+ 0x0, /* WBx Global Test Reference Value Register */
+ 0x0 /* WBx Global Test Compare Function Register */
+};
+
+/* Performance Counter 0 Enable Register reset value */
+static const u32 mali_perf_cnt_enable_reset_value = 0;
+
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
+{
+ /* Bus must be stopped before calling this function */
+ const u32 reset_wait_target_register = MALI200_REG_ADDR_MGMT_PERF_CNT_0_LIMIT;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description));
+
+ /* Set register to a bogus value. The register will be used to detect when reset is complete */
+ mali_hw_core_register_write_relaxed(&core->hw_core, reset_wait_target_register, reset_invalid_value);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+
+ /* Force core to reset */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+ /* Wait for reset to be complete */
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, 0x00000000); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_reset_async(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+}
+
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core)
+{
+ int i;
+ u32 rawstat = 0;
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+ if (!(status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) {
+ rawstat = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
+ if (rawstat == MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) {
+ break;
+ }
+ }
+ }
+
+ if (i == MALI_REG_POLL_COUNT_FAST) {
+ MALI_PRINT_ERROR(("Mali PP: Failed to reset core %s, rawstat: 0x%08x\n",
+ core->hw_core.description, rawstat));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core)
+{
+ mali_pp_reset_async(core);
+ return mali_pp_reset_wait(core);
+}
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual)
+{
+ u32 relative_address;
+ u32 start_index;
+ u32 nr_of_regs;
+ u32 *frame_registers = mali_pp_job_get_frame_registers(job);
+ u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
+ u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
+ u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
+ u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job);
+ u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job);
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Write frame registers */
+
+ /*
+ * There are two frame registers which are different for each sub job:
+ * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
+ * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
+ */
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_FRAME / sizeof(u32)]);
+
+ /* For virtual jobs, the stack address shouldn't be broadcast but written individually */
+ if (!mali_pp_job_is_virtual(job) || restart_virtual) {
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK / sizeof(u32)]);
+ }
+
+ /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
+ relative_address = MALI200_REG_ADDR_RSW;
+ start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
+ nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
+
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* MALI200_REG_ADDR_STACK_SIZE */
+ relative_address = MALI200_REG_ADDR_STACK_SIZE;
+ start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
+
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core,
+ relative_address, frame_registers[start_index],
+ mali_frame_registers_reset_values[start_index]);
+
+ /* Skip 2 reserved registers */
+
+ /* Write remaining registers */
+ relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
+ start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+ nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* Write WBx registers */
+ if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+
+#ifdef CONFIG_MALI400_HEATMAPS_ENABLED
+ if (job->uargs.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE) {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_CONTR, ((job->uargs.tilesx & 0x3FF) << 16) | 1);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_BASE, job->uargs.heatmap_mem & 0xFFFFFFF8);
+ }
+#endif /* CONFIG_MALI400_HEATMAPS_ENABLED */
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));
+
+ /* Adding barrier to make sure all rester writes are finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core.
+ *
+ * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+ * force core to assert the completion interrupt.
+ */
+#if !defined(PROFILING_SKIP_PP_JOBS)
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+#else
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_END_OF_FRAME);
+#endif
+
+ /* Adding barrier to make sure previous rester writes is finished */
+ _mali_osk_write_mem_barrier();
+}
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_pp_core *mali_pp_get_global_pp_core(u32 index)
+{
+ if (mali_global_num_pp_cores > index) {
+ return mali_global_pp_cores[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_pp_get_glob_num_pp_cores(void)
+{
+ return mali_global_num_pp_cores;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_pp_irq_probe_trigger(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_BUS_ERROR);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+ if (MALI200_REG_VAL_IRQ_BUS_ERROR & irq_readout) {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_BUS_ERROR);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+#if 0
+static void mali_pp_print_registers(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_VERSION = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_MASK = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+}
+#endif
+
+#if 0
+void mali_pp_print_state(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+}
+#endif
+
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob)
+{
+ u32 val0 = 0;
+ u32 val1 = 0;
+ u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, subjob);
+ u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, subjob);
+#if defined(CONFIG_MALI400_PROFILING)
+ int counter_index = COUNTER_FP_0_C0 + (2 * child->core_id);
+#endif
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ val0 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ mali_pp_job_set_perf_counter_value0(job, subjob, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(counter_index, val0);
+ _mali_osk_profiling_record_global_counters(counter_index, val0);
+#endif
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ val1 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ mali_pp_job_set_perf_counter_value1(job, subjob, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(counter_index + 1, val1);
+ _mali_osk_profiling_record_global_counters(counter_index + 1, val1);
+#endif
+ }
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP #%d: %s\n", core->core_id, core->hw_core.description);
+
+ return n;
+}
+#endif
diff --git a/drivers/gpu/arm/utgard/common/mali_pp.h b/drivers/gpu/arm/utgard/common/mali_pp.h
new file mode 100644
index 000000000000..f98b29866ffa
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pp.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_H__
+#define __MALI_PP_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+#include "mali_hw_core.h"
+
+struct mali_group;
+
+#define MALI_MAX_NUMBER_OF_PP_CORES 9
+
+/**
+ * Definition of the PP core struct
+ * Used to track a PP core in the system.
+ */
+struct mali_pp_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+ u32 core_id; /**< Unique core ID */
+ u32 bcast_id; /**< The "flag" value used by the Mali-450 broadcast and DLBU unit */
+};
+
+_mali_osk_errcode_t mali_pp_initialize(void);
+void mali_pp_terminate(void);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id);
+void mali_pp_delete(struct mali_pp_core *core);
+
+void mali_pp_stop_bus(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core);
+void mali_pp_reset_async(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core);
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual);
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core);
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_id(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->core_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_bcast_id(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->bcast_id;
+}
+
+struct mali_pp_core *mali_pp_get_global_pp_core(u32 index);
+u32 mali_pp_get_glob_num_pp_cores(void);
+
+/* Debug */
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size);
+
+/**
+ * Put instrumented HW counters from the core(s) to the job object (if enabled)
+ *
+ * parent and child is always the same, except for virtual jobs on Mali-450.
+ * In this case, the counters will be enabled on the virtual core (parent),
+ * but values need to be read from the child cores.
+ *
+ * @param parent The core used to see if the counters was enabled
+ * @param child The core to actually read the values from
+ * @job Job object to update with counter values (if enabled)
+ * @subjob Which subjob the counters are applicable for (core ID for virtual jobs)
+ */
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob);
+
+MALI_STATIC_INLINE const char *mali_pp_core_description(struct mali_pp_core *core)
+{
+ return core->hw_core.description;
+}
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_pp_get_interrupt_result(struct mali_pp_core *core)
+{
+ u32 rawstat_used = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) &
+ MALI200_REG_VAL_IRQ_MASK_USED;
+ if (0 == rawstat_used) {
+ return MALI_INTERRUPT_RESULT_NONE;
+ } else if (MALI200_REG_VAL_IRQ_END_OF_FRAME == rawstat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS;
+ }
+
+ return MALI_INTERRUPT_RESULT_ERROR;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_get_rawstat(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core,
+ MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
+}
+
+
+MALI_STATIC_INLINE u32 mali_pp_is_active(struct mali_pp_core *core)
+{
+ u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+ return (status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+}
+
+MALI_STATIC_INLINE void mali_pp_write_addr_renderer_list(struct mali_pp_core *core,
+ struct mali_pp_job *job, u32 subjob)
+{
+ u32 addr = mali_pp_job_get_addr_frame(job, subjob);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, addr);
+}
+
+
+MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job)
+{
+ u32 addr = mali_pp_job_get_addr_stack(job, core->core_id);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, addr);
+}
+
+#endif /* __MALI_PP_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_pp_job.c b/drivers/gpu/arm/utgard/common/mali_pp_job.c
new file mode 100644
index 000000000000..b0216d4c1ac8
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pp_job.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_executor.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#include "linux/mali_memory_dma_buf.h"
+#endif
+#include "mali_memory_swap_alloc.h"
+#include "mali_scheduler.h"
+
+static u32 pp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 pp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+static _mali_osk_atomic_t pp_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+static u32 pp_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
+static u32 pp_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
+
+void mali_pp_job_initialize(void)
+{
+ _mali_osk_atomic_init(&pp_counter_per_sub_job_count, 0);
+}
+
+void mali_pp_job_terminate(void)
+{
+ _mali_osk_atomic_term(&pp_counter_per_sub_job_count);
+}
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session,
+ _mali_uk_pp_start_job_s __user *uargs, u32 id)
+{
+ struct mali_pp_job *job;
+ u32 perf_counter_flag;
+
+ job = _mali_osk_calloc(1, sizeof(struct mali_pp_job));
+ if (NULL != job) {
+
+ _mali_osk_list_init(&job->list);
+ _mali_osk_list_init(&job->session_fb_lookup_list);
+ _mali_osk_atomic_inc(&session->number_of_pp_jobs);
+
+ if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) {
+ goto fail;
+ }
+
+ if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) {
+ MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
+ goto fail;
+ }
+
+ if (!mali_pp_job_use_no_notification(job)) {
+ job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
+ if (NULL == job->finished_notification) goto fail;
+ }
+
+ perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);
+
+ /* case when no counters came from user space
+ * so pass the debugfs / DS-5 provided global ones to the job object */
+ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+ (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
+ u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count);
+
+ /* These counters apply for all virtual jobs, and where no per sub job counter is specified */
+ job->uargs.perf_counter_src0 = pp_counter_src0;
+ job->uargs.perf_counter_src1 = pp_counter_src1;
+
+ /* We only copy the per sub job array if it is enabled with at least one counter */
+ if (0 < sub_job_count) {
+ job->perf_counter_per_sub_job_count = sub_job_count;
+ _mali_osk_memcpy(job->perf_counter_per_sub_job_src0, pp_counter_per_sub_job_src0, sizeof(pp_counter_per_sub_job_src0));
+ _mali_osk_memcpy(job->perf_counter_per_sub_job_src1, pp_counter_per_sub_job_src1, sizeof(pp_counter_per_sub_job_src1));
+ }
+ }
+
+ job->session = session;
+ job->id = id;
+
+ job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+
+ _mali_osk_atomic_init(&job->sub_jobs_completed, 0);
+ _mali_osk_atomic_init(&job->sub_job_errors, 0);
+ job->swap_status = MALI_NO_SWAP_IN;
+ job->user_notification = MALI_FALSE;
+ job->num_pp_cores_in_virtual = 0;
+
+ if (job->uargs.num_memory_cookies > session->allocation_mgr.mali_allocation_num) {
+ MALI_PRINT_ERROR(("Mali PP job: The number of memory cookies is invalid !\n"));
+ goto fail;
+ }
+
+ if (job->uargs.num_memory_cookies > 0) {
+ u32 size;
+ u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies;
+
+ size = sizeof(*memory_cookies) * (job->uargs.num_memory_cookies);
+
+ job->memory_cookies = _mali_osk_malloc(size);
+ if (NULL == job->memory_cookies) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size));
+ goto fail;
+ }
+
+ if (0 != _mali_osk_copy_from_user(job->memory_cookies, memory_cookies, size)) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
+ goto fail;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) {
+ /* Not a valid job. */
+ goto fail;
+ }
+
+ mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job);
+ mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));
+
+ mali_mem_swap_in_pages(job);
+
+ return job;
+ }
+
+fail:
+ if (NULL != job) {
+ mali_pp_job_delete(job);
+ }
+
+ return NULL;
+}
+
+void mali_pp_job_delete(struct mali_pp_job *job)
+{
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
+ session = mali_pp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (NULL != job->memory_cookies) {
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ /* Unmap buffers attached to job */
+ mali_dma_buf_unmap_job(job);
+#endif
+ if (MALI_NO_SWAP_IN != job->swap_status) {
+ mali_mem_swap_out_pages(job);
+ }
+
+ _mali_osk_free(job->memory_cookies);
+ }
+
+ if (job->user_notification) {
+ mali_scheduler_return_pp_job_to_user(job,
+ job->num_pp_cores_in_virtual);
+ }
+
+ if (NULL != job->finished_notification) {
+ _mali_osk_notification_delete(job->finished_notification);
+ }
+
+ _mali_osk_atomic_term(&job->sub_jobs_completed);
+ _mali_osk_atomic_term(&job->sub_job_errors);
+ _mali_osk_atomic_dec(&session->number_of_pp_jobs);
+ _mali_osk_free(job);
+
+ _mali_osk_wait_queue_wake_up(session->wait_queue);
+}
+
+void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list)
+{
+ struct mali_pp_job *iter;
+ struct mali_pp_job *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ /* Find position in list/queue where job should be added. */
+ _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list,
+ struct mali_pp_job, list) {
+ /* job should be started after iter if iter is in progress. */
+ if (0 < iter->sub_jobs_started) {
+ break;
+ }
+
+ /*
+ * job should be started after iter if it has a higher
+ * job id. A span is used to handle job id wrapping.
+ */
+ if ((mali_pp_job_get_id(job) -
+ mali_pp_job_get_id(iter)) <
+ MALI_SCHEDULER_JOB_ID_SPAN) {
+ break;
+ }
+ }
+
+ _mali_osk_list_add(&job->list, &iter->list);
+}
+
+
+u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job)
+{
+ /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
+ if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) {
+ return job->uargs.perf_counter_src0;
+ }
+
+ /* Use per sub job counter if enabled... */
+ if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src0[sub_job]) {
+ return job->perf_counter_per_sub_job_src0[sub_job];
+ }
+
+ /* ...else default to global job counter */
+ return job->uargs.perf_counter_src0;
+}
+
+u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job)
+{
+ /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
+ if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) {
+ /* Virtual jobs always use the global job counter */
+ return job->uargs.perf_counter_src1;
+ }
+
+ /* Use per sub job counter if enabled... */
+ if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src1[sub_job]) {
+ return job->perf_counter_per_sub_job_src1[sub_job];
+ }
+
+ /* ...else default to global job counter */
+ return job->uargs.perf_counter_src1;
+}
+
+void mali_pp_job_set_pp_counter_global_src0(u32 counter)
+{
+ pp_counter_src0 = counter;
+}
+
+void mali_pp_job_set_pp_counter_global_src1(u32 counter)
+{
+ pp_counter_src1 = counter;
+}
+
+void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+
+ if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src0[sub_job]) {
+ /* increment count since existing counter was disabled */
+ _mali_osk_atomic_inc(&pp_counter_per_sub_job_count);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == counter) {
+ /* decrement count since new counter is disabled */
+ _mali_osk_atomic_dec(&pp_counter_per_sub_job_count);
+ }
+
+ /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
+
+ pp_counter_per_sub_job_src0[sub_job] = counter;
+}
+
+void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+
+ if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src1[sub_job]) {
+ /* increment count since existing counter was disabled */
+ _mali_osk_atomic_inc(&pp_counter_per_sub_job_count);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == counter) {
+ /* decrement count since new counter is disabled */
+ _mali_osk_atomic_dec(&pp_counter_per_sub_job_count);
+ }
+
+ /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
+
+ pp_counter_per_sub_job_src1[sub_job] = counter;
+}
+
+u32 mali_pp_job_get_pp_counter_global_src0(void)
+{
+ return pp_counter_src0;
+}
+
+u32 mali_pp_job_get_pp_counter_global_src1(void)
+{
+ return pp_counter_src1;
+}
+
+u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+ return pp_counter_per_sub_job_src0[sub_job];
+}
+
+u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+ return pp_counter_per_sub_job_src1[sub_job];
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_pp_job.h b/drivers/gpu/arm/utgard/common/mali_pp_job.h
new file mode 100644
index 000000000000..3d1cd13d863d
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pp_job.h
@@ -0,0 +1,591 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_JOB_H__
+#define __MALI_PP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_core.h"
+#include "mali_dlbu.h"
+#include "mali_timeline.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#include "linux/mali_memory_dma_buf.h"
+#endif
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+#include "linux/mali_dma_fence.h"
+#include <linux/fence.h>
+#endif
+
+typedef enum pp_job_status {
+ MALI_NO_SWAP_IN,
+ MALI_SWAP_IN_FAIL,
+ MALI_SWAP_IN_SUCC,
+} pp_job_status;
+
+/**
+ * This structure represents a PP job, including all sub jobs.
+ *
+ * The PP job object itself is not protected by any single lock,
+ * but relies on other locks instead (scheduler, executor and timeline lock).
+ * Think of the job object as moving between these sub systems through-out
+ * its lifetime. Different part of the PP job struct is used by different
+ * subsystems. Accessor functions ensure that correct lock is taken.
+ * Do NOT access any data members directly from outside this module!
+ */
+struct mali_pp_job {
+ /*
+ * These members are typically only set at creation,
+ * and only read later on.
+ * They do not require any lock protection.
+ */
+ _mali_uk_pp_start_job_s uargs; /**< Arguments from user space */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
+ u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
+ _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
+ u32 perf_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+ u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */
+ u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */
+ u32 sub_jobs_num; /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
+
+ pp_job_status swap_status; /**< Used to track each PP job swap status, if fail, we need to drop them in scheduler part */
+ mali_bool user_notification; /**< When we deferred delete PP job, we need to judge if we need to send job finish notification to user space */
+ u32 num_pp_cores_in_virtual; /**< How many PP cores we have when job finished */
+
+ /*
+ * These members are used by both scheduler and executor.
+ * They are "protected" by atomic operations.
+ */
+ _mali_osk_atomic_t sub_jobs_completed; /**< Number of completed sub-jobs in this superjob */
+ _mali_osk_atomic_t sub_job_errors; /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+
+ /*
+ * These members are used by scheduler, but only when no one else
+ * knows about this job object but the working function.
+ * No lock is thus needed for these.
+ */
+ u32 *memory_cookies; /**< Memory cookies attached to job */
+
+ /*
+ * These members are used by the scheduler,
+ * protected by scheduler lock
+ */
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ _mali_osk_list_t session_fb_lookup_list; /**< Used to link jobs together from the same frame builder in the session */
+
+ u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */
+
+ /*
+ * Set by executor/group on job completion, read by scheduler when
+ * returning job to user. Hold executor lock when setting,
+ * no lock needed when reading
+ */
+ u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
+ u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ struct mali_dma_fence_context dma_fence_context; /**< The mali dma fence context to record dma fence waiters that this job wait for */
+ struct fence *rendered_dma_fence; /**< the new dma fence link to this job */
+#endif
+};
+
+void mali_pp_job_initialize(void);
+void mali_pp_job_terminate(void);
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id);
+void mali_pp_job_delete(struct mali_pp_job *job);
+
+u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job);
+u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job);
+
+void mali_pp_job_set_pp_counter_global_src0(u32 counter);
+void mali_pp_job_set_pp_counter_global_src1(u32 counter);
+void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter);
+void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter);
+
+u32 mali_pp_job_get_pp_counter_global_src0(void);
+u32 mali_pp_job_get_pp_counter_global_src1(void);
+u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job);
+u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job);
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_cache_order(struct mali_pp_job *job,
+ u32 cache_order)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ job->cache_order = cache_order;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_cache_order(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (NULL == job) ? 0 : job->cache_order;
+}
+
+MALI_STATIC_INLINE u64 mali_pp_job_get_user_id(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_pid(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_tid(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->tid;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_frame_registers(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_dlbu_registers(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.dlbu_registers;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual(struct mali_pp_job *job)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (0 == job->uargs.num_cores) ? MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_FALSE;
+#endif
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (mali_pp_job_is_virtual(job)) {
+ return MALI_DLBU_VIRT_ADDR;
+ } else if (0 == sub_job) {
+ return job->uargs.frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)];
+ } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
+ return job->uargs.frame_registers_addr_frame[sub_job - 1];
+ }
+
+ return 0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (0 == sub_job) {
+ return job->uargs.frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)];
+ } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
+ return job->uargs.frame_registers_addr_stack[sub_job - 1];
+ }
+
+ return 0;
+}
+
+void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list);
+
+MALI_STATIC_INLINE void mali_pp_job_list_addtail(struct mali_pp_job *job,
+ _mali_osk_list_t *list)
+{
+ _mali_osk_list_addtail(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_list_move(struct mali_pp_job *job,
+ _mali_osk_list_t *list)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list));
+ _mali_osk_list_move(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_list_remove(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ _mali_osk_list_delinit(&job->list);
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb0_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb1_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb2_registers;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb0_source_addr(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb1_source_addr(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb2_source_addr(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_all_writeback_unit_disabled(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+ job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+ job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT]
+ ) {
+ /* At least one output unit active */
+ return MALI_FALSE;
+ }
+
+ /* All outputs are disabled - we can abort the job */
+ return MALI_TRUE;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_fb_lookup_add(struct mali_pp_job *job)
+{
+ u32 fb_lookup_id;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ fb_lookup_id = MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id;
+
+ MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id);
+
+ _mali_osk_list_addtail(&job->session_fb_lookup_list,
+ &job->session->pp_job_fb_lookup_list[fb_lookup_id]);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_fb_lookup_remove(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ _mali_osk_list_delinit(&job->session_fb_lookup_list);
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_started_sub_jobs(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ return (0 < job->sub_jobs_started) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ return (job->sub_jobs_started < job->sub_jobs_num) ? MALI_TRUE : MALI_FALSE;
+}
+
+/* Function used when we are terminating a session with jobs. Return TRUE if it has a rendering job.
+ Makes sure that no new subjobs are started. */
+MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_failed(struct mali_pp_job *job)
+{
+ u32 jobs_remaining;
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+ job->sub_jobs_started += jobs_remaining;
+
+ /* Not the most optimal way, but this is only used in error cases */
+ for (i = 0; i < jobs_remaining; i++) {
+ _mali_osk_atomic_inc(&job->sub_jobs_completed);
+ _mali_osk_atomic_inc(&job->sub_job_errors);
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->sub_jobs_num ==
+ _mali_osk_atomic_read(&job->sub_jobs_completed)) ?
+ MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ return job->sub_jobs_started;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->sub_jobs_num;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_unstarted_sub_job_count(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(job->sub_jobs_num >= job->sub_jobs_started);
+ return (job->sub_jobs_num - job->sub_jobs_started);
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_num_memory_cookies(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.num_memory_cookies;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_memory_cookie(
+ struct mali_pp_job *job, u32 index)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(index < job->uargs.num_memory_cookies);
+ MALI_DEBUG_ASSERT_POINTER(job->memory_cookies);
+ return job->memory_cookies[index];
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_needs_dma_buf_mapping(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (0 < job->uargs.num_memory_cookies) {
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ /* Assert that we are marking the "first unstarted sub job" as started */
+ MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job);
+
+ job->sub_jobs_started++;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_atomic_inc(&job->sub_jobs_completed);
+ if (MALI_FALSE == success) {
+ _mali_osk_atomic_inc(&job->sub_job_errors);
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ if (0 == _mali_osk_atomic_read(&job->sub_job_errors)) {
+ return MALI_TRUE;
+ }
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(
+ struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION) ?
+ MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_pilot_job(struct mali_pp_job *job)
+{
+ /*
+ * A pilot job is currently identified as jobs which
+ * require no callback notification.
+ */
+ return mali_pp_job_use_no_notification(job);
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *
+mali_pp_job_get_finished_notification(struct mali_pp_job *job)
+{
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->finished_notification);
+
+ notification = job->finished_notification;
+ job->finished_notification = NULL;
+
+ return notification;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_window_surface(
+ struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->uargs.flags & _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE)
+ ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_protected_job(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->uargs.flags & _MALI_PP_JOB_FLAG_PROTECTED)
+ ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->perf_counter_value0[sub_job];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->perf_counter_value1[sub_job];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ job->perf_counter_value0[sub_job] = value;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ job->perf_counter_value1[sub_job] = value;
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ if (mali_pp_job_is_virtual(job) && job->sub_jobs_num != 1) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Returns MALI_TRUE if this job has more than two sub jobs and all sub jobs are unstarted.
+ *
+ * @param job Job to check.
+ * @return MALI_TRUE if job has more than two sub jobs and all sub jobs are unstarted, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_large_and_unstarted(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
+
+ return (0 == job->sub_jobs_started && 2 < job->sub_jobs_num);
+}
+
+/**
+ * Get PP job's Timeline tracker.
+ *
+ * @param job PP job.
+ * @return Pointer to Timeline tracker for the job.
+ */
+MALI_STATIC_INLINE struct mali_timeline_tracker *mali_pp_job_get_tracker(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return &(job->tracker);
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_timeline_point_ptr(
+ struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+}
+
+
+#endif /* __MALI_PP_JOB_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_scheduler.c b/drivers/gpu/arm/utgard/common/mali_scheduler.c
new file mode 100644
index 000000000000..b5e6cfddbb0e
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_scheduler.c
@@ -0,0 +1,1548 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_profiling.h"
+#include "mali_kernel_utilization.h"
+#include "mali_timeline.h"
+#include "mali_gp_job.h"
+#include "mali_pp_job.h"
+#include "mali_executor.h"
+#include "mali_group.h"
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include "mali_pm_metrics.h"
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+#include "mali_dma_fence.h"
+#include <linux/dma-buf.h>
+#endif
+#endif
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+/*
+ * ---------- static defines/constants ----------
+ */
+
+/*
+ * If dma_buf with map on demand is used, we defer job queue
+ * if in atomic context, since both might sleep.
+ */
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE 1
+#endif
+#endif
+
+
+/*
+ * ---------- global variables (exported due to inline functions) ----------
+ */
+
+/* Lock protecting this module */
+_mali_osk_spinlock_irq_t *mali_scheduler_lock_obj = NULL;
+
+/* Queue of jobs to be executed on the GP group */
+struct mali_scheduler_job_queue job_queue_gp;
+
+/* Queue of PP jobs */
+struct mali_scheduler_job_queue job_queue_pp;
+
+_mali_osk_atomic_t mali_job_id_autonumber;
+_mali_osk_atomic_t mali_job_cache_order_autonumber;
+/*
+ * ---------- static variables ----------
+ */
+
+_mali_osk_wq_work_t *scheduler_wq_pp_job_delete = NULL;
+_mali_osk_spinlock_irq_t *scheduler_pp_job_delete_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_deletion_queue);
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+static _mali_osk_wq_work_t *scheduler_wq_pp_job_queue = NULL;
+static _mali_osk_spinlock_irq_t *scheduler_pp_job_queue_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_queue_list);
+#endif
+
+/*
+ * ---------- Forward declaration of static functions ----------
+ */
+
+static mali_timeline_point mali_scheduler_submit_gp_job(
+ struct mali_session_data *session, struct mali_gp_job *job);
+static _mali_osk_errcode_t mali_scheduler_submit_pp_job(
+ struct mali_session_data *session, struct mali_pp_job *job, mali_timeline_point *point);
+
+static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job);
+static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job);
+
+static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
+ mali_bool success);
+
+static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job);
+void mali_scheduler_do_pp_job_delete(void *arg);
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job);
+static void mali_scheduler_do_pp_job_queue(void *arg);
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+/*
+ * ---------- Actual implementation ----------
+ */
+
+_mali_osk_errcode_t mali_scheduler_initialize(void)
+{
+ _mali_osk_atomic_init(&mali_job_id_autonumber, 0);
+ _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0);
+
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.normal_pri);
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.high_pri);
+ job_queue_gp.depth = 0;
+ job_queue_gp.big_job_num = 0;
+
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.normal_pri);
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.high_pri);
+ job_queue_pp.depth = 0;
+ job_queue_pp.big_job_num = 0;
+
+ mali_scheduler_lock_obj = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ if (NULL == mali_scheduler_lock_obj) {
+ mali_scheduler_terminate();
+ }
+
+ scheduler_wq_pp_job_delete = _mali_osk_wq_create_work(
+ mali_scheduler_do_pp_job_delete, NULL);
+ if (NULL == scheduler_wq_pp_job_delete) {
+ mali_scheduler_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ scheduler_pp_job_delete_lock = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+ if (NULL == scheduler_pp_job_delete_lock) {
+ mali_scheduler_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+ scheduler_wq_pp_job_queue = _mali_osk_wq_create_work(
+ mali_scheduler_do_pp_job_queue, NULL);
+ if (NULL == scheduler_wq_pp_job_queue) {
+ mali_scheduler_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ scheduler_pp_job_queue_lock = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+ if (NULL == scheduler_pp_job_queue_lock) {
+ mali_scheduler_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_scheduler_terminate(void)
+{
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+ if (NULL != scheduler_pp_job_queue_lock) {
+ _mali_osk_spinlock_irq_term(scheduler_pp_job_queue_lock);
+ scheduler_pp_job_queue_lock = NULL;
+ }
+
+ if (NULL != scheduler_wq_pp_job_queue) {
+ _mali_osk_wq_delete_work(scheduler_wq_pp_job_queue);
+ scheduler_wq_pp_job_queue = NULL;
+ }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+ if (NULL != scheduler_pp_job_delete_lock) {
+ _mali_osk_spinlock_irq_term(scheduler_pp_job_delete_lock);
+ scheduler_pp_job_delete_lock = NULL;
+ }
+
+ if (NULL != scheduler_wq_pp_job_delete) {
+ _mali_osk_wq_delete_work(scheduler_wq_pp_job_delete);
+ scheduler_wq_pp_job_delete = NULL;
+ }
+
+ if (NULL != mali_scheduler_lock_obj) {
+ _mali_osk_spinlock_irq_term(mali_scheduler_lock_obj);
+ mali_scheduler_lock_obj = NULL;
+ }
+
+ _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
+ _mali_osk_atomic_term(&mali_job_id_autonumber);
+}
+
+u32 mali_scheduler_job_physical_head_count(mali_bool gpu_mode_is_secure)
+{
+ /*
+ * Count how many physical sub jobs are present from the head of queue
+ * until the first virtual job is present.
+ * Early out when we have reached maximum number of PP cores (8)
+ */
+ u32 count = 0;
+ struct mali_pp_job *job;
+ struct mali_pp_job *temp;
+
+ /* Check for partially started normal pri jobs */
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+ struct mali_pp_job, list);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) {
+ /*
+ * Remember; virtual jobs can't be queued and started
+ * at the same time, so this must be a physical job
+ */
+ if ((MALI_FALSE == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job))
+ || (MALI_TRUE == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job))) {
+
+ count += mali_pp_job_unstarted_sub_job_count(job);
+ if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+ return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+ }
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri,
+ struct mali_pp_job, list) {
+ if ((MALI_FALSE == mali_pp_job_is_virtual(job))
+ && ((MALI_FALSE == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job))
+ || (MALI_TRUE == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job)))) {
+
+ count += mali_pp_job_unstarted_sub_job_count(job);
+ if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+ return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+ }
+ } else {
+ /* Came across a virtual job, so stop counting */
+ return count;
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri,
+ struct mali_pp_job, list) {
+ if ((MALI_FALSE == mali_pp_job_is_virtual(job))
+ && (MALI_FALSE == mali_pp_job_has_started_sub_jobs(job))
+ && ((MALI_FALSE == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job))
+ || (MALI_TRUE == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job)))) {
+
+ count += mali_pp_job_unstarted_sub_job_count(job);
+ if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+ return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+ }
+ } else {
+ /* Came across a virtual job, so stop counting */
+ return count;
+ }
+ }
+ return count;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_next(void)
+{
+ struct mali_pp_job *job;
+ struct mali_pp_job *temp;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+ /* Check for partially started normal pri jobs */
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+ struct mali_pp_job, list);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) {
+ return job;
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri,
+ struct mali_pp_job, list) {
+ return job;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri,
+ struct mali_pp_job, list) {
+ return job;
+ }
+
+ return NULL;
+}
+
+mali_bool mali_scheduler_job_next_is_virtual(void)
+{
+ struct mali_pp_job *job;
+
+ job = mali_scheduler_job_pp_virtual_peek();
+ if (NULL != job) {
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+struct mali_gp_job *mali_scheduler_job_gp_get(void)
+{
+ _mali_osk_list_t *queue;
+ struct mali_gp_job *job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+ MALI_DEBUG_ASSERT(0 < job_queue_gp.depth);
+ MALI_DEBUG_ASSERT(job_queue_gp.big_job_num <= job_queue_gp.depth);
+
+ if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) {
+ queue = &job_queue_gp.high_pri;
+ } else {
+ queue = &job_queue_gp.normal_pri;
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(queue));
+ }
+
+ job = _MALI_OSK_LIST_ENTRY(queue->next, struct mali_gp_job, list);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ mali_gp_job_list_remove(job);
+ job_queue_gp.depth--;
+ if (job->big_job) {
+ job_queue_gp.big_job_num --;
+ if (job_queue_gp.big_job_num < MALI_MAX_PENDING_BIG_JOB) {
+ /* wake up process */
+ wait_queue_head_t *queue = mali_session_get_wait_queue();
+ wake_up(queue);
+ }
+ }
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void)
+{
+ struct mali_pp_job *job = NULL;
+ struct mali_pp_job *tmp_job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+ /*
+ * For PP jobs we favour partially started jobs in normal
+ * priority queue over unstarted jobs in high priority queue
+ */
+
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+ struct mali_pp_job, list);
+ MALI_DEBUG_ASSERT(NULL != tmp_job);
+
+ if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+
+ if (NULL == job ||
+ MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) {
+ /*
+ * There isn't a partially started job in normal queue, so
+ * look in high priority queue.
+ */
+ if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
+ struct mali_pp_job, list);
+ MALI_DEBUG_ASSERT(NULL != tmp_job);
+
+ if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+ }
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void)
+{
+ struct mali_pp_job *job = NULL;
+ struct mali_pp_job *tmp_job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+ if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
+ struct mali_pp_job, list);
+
+ if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+
+ if (NULL == job) {
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+ struct mali_pp_job, list);
+
+ if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+ }
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job)
+{
+ struct mali_pp_job *job = mali_scheduler_job_pp_physical_peek();
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_virtual(job));
+
+ if (NULL != job) {
+ *sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+
+ mali_pp_job_mark_sub_job_started(job, *sub_job);
+ if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(job)) {
+ /* Remove from queue when last sub job has been retrieved */
+ mali_pp_job_list_remove(job);
+ }
+
+ job_queue_pp.depth--;
+
+ /*
+ * Job about to start so it is no longer be
+ * possible to discard WB
+ */
+ mali_pp_job_fb_lookup_remove(job);
+ }
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void)
+{
+ struct mali_pp_job *job = mali_scheduler_job_pp_virtual_peek();
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_virtual(job));
+
+ if (NULL != job) {
+ MALI_DEBUG_ASSERT(0 ==
+ mali_pp_job_get_first_unstarted_sub_job(job));
+ MALI_DEBUG_ASSERT(1 ==
+ mali_pp_job_get_sub_job_count(job));
+
+ mali_pp_job_mark_sub_job_started(job, 0);
+
+ mali_pp_job_list_remove(job);
+
+ job_queue_pp.depth--;
+
+ /*
+ * Job about to start so it is no longer be
+ * possible to discard WB
+ */
+ mali_pp_job_fb_lookup_remove(job);
+ }
+
+ return job;
+}
+
+mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Timeline activation for job %u (0x%08X).\n",
+ mali_gp_job_get_id(job), job));
+
+ mali_scheduler_lock();
+
+ if (!mali_scheduler_queue_gp_job(job)) {
+ /* Failed to enqueue job, release job (with error) */
+
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(mali_gp_job_get_tracker(job));
+ mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_gp_job(job, MALI_FALSE,
+ MALI_TRUE, MALI_FALSE);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ mali_scheduler_unlock();
+
+ return MALI_SCHEDULER_MASK_GP;
+}
+
+mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n",
+ mali_pp_job_get_id(job), job));
+
+ if (MALI_TRUE == mali_timeline_tracker_activation_error(
+ mali_pp_job_get_tracker(job))) {
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n",
+ mali_pp_job_get_id(job), job));
+
+ mali_scheduler_lock();
+ mali_pp_job_fb_lookup_remove(job);
+ mali_pp_job_mark_unstarted_failed(job);
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+ if (mali_pp_job_needs_dma_buf_mapping(job)) {
+ mali_scheduler_deferred_pp_job_queue(job);
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+ mali_scheduler_lock();
+
+ if (!mali_scheduler_queue_pp_job(job)) {
+ /* Failed to enqueue job, release job (with error) */
+ mali_pp_job_fb_lookup_remove(job);
+ mali_pp_job_mark_unstarted_failed(job);
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ mali_scheduler_unlock();
+ return MALI_SCHEDULER_MASK_PP;
+}
+
+void mali_scheduler_complete_gp_job(struct mali_gp_job *job,
+ mali_bool success,
+ mali_bool user_notification,
+ mali_bool dequeued)
+{
+ if (user_notification) {
+ mali_scheduler_return_gp_job_to_user(job, success);
+ }
+
+ if (dequeued) {
+ _mali_osk_pm_dev_ref_put();
+
+ if (mali_utilization_enabled()) {
+ mali_utilization_gp_end();
+ }
+ mali_pm_record_gpu_idle(MALI_TRUE);
+ }
+
+ mali_gp_job_delete(job);
+}
+
+void mali_scheduler_complete_pp_job(struct mali_pp_job *job,
+ u32 num_cores_in_virtual,
+ mali_bool user_notification,
+ mali_bool dequeued)
+{
+ job->user_notification = user_notification;
+ job->num_pp_cores_in_virtual = num_cores_in_virtual;
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ if (NULL != job->rendered_dma_fence)
+ mali_dma_fence_signal_and_put(&job->rendered_dma_fence);
+#endif
+
+ if (dequeued) {
+#if defined(CONFIG_MALI_DVFS)
+ if (mali_pp_job_is_window_surface(job)) {
+ struct mali_session_data *session;
+ session = mali_pp_job_get_session(job);
+ mali_session_inc_num_window_jobs(session);
+ }
+#endif
+ _mali_osk_pm_dev_ref_put();
+
+ if (mali_utilization_enabled()) {
+ mali_utilization_pp_end();
+ }
+ mali_pm_record_gpu_idle(MALI_FALSE);
+ }
+
+ /* With ZRAM feature enabled, all pp jobs will be force to use deferred delete. */
+ mali_scheduler_deferred_pp_job_delete(job);
+}
+
+void mali_scheduler_abort_session(struct mali_session_data *session)
+{
+ struct mali_gp_job *gp_job;
+ struct mali_gp_job *gp_tmp;
+ struct mali_pp_job *pp_job;
+ struct mali_pp_job *pp_tmp;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_gp);
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_pp);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali scheduler: Aborting all queued jobs from session 0x%08X.\n",
+ session));
+
+ mali_scheduler_lock();
+
+ /* Remove from GP normal priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.normal_pri,
+ struct mali_gp_job, list) {
+ if (mali_gp_job_get_session(gp_job) == session) {
+ mali_gp_job_list_move(gp_job, &removed_jobs_gp);
+ job_queue_gp.depth--;
+ job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0;
+ }
+ }
+
+ /* Remove from GP high priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.high_pri,
+ struct mali_gp_job, list) {
+ if (mali_gp_job_get_session(gp_job) == session) {
+ mali_gp_job_list_move(gp_job, &removed_jobs_gp);
+ job_queue_gp.depth--;
+ job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0;
+ }
+ }
+
+ /* Remove from PP normal priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
+ &job_queue_pp.normal_pri,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_get_session(pp_job) == session) {
+ mali_pp_job_fb_lookup_remove(pp_job);
+
+ job_queue_pp.depth -=
+ mali_pp_job_unstarted_sub_job_count(
+ pp_job);
+ mali_pp_job_mark_unstarted_failed(pp_job);
+
+ if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) {
+ if (mali_pp_job_is_complete(pp_job)) {
+ mali_pp_job_list_move(pp_job,
+ &removed_jobs_pp);
+ } else {
+ mali_pp_job_list_remove(pp_job);
+ }
+ }
+ }
+ }
+
+ /* Remove from PP high priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
+ &job_queue_pp.high_pri,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_get_session(pp_job) == session) {
+ mali_pp_job_fb_lookup_remove(pp_job);
+
+ job_queue_pp.depth -=
+ mali_pp_job_unstarted_sub_job_count(
+ pp_job);
+ mali_pp_job_mark_unstarted_failed(pp_job);
+
+ if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) {
+ if (mali_pp_job_is_complete(pp_job)) {
+ mali_pp_job_list_move(pp_job,
+ &removed_jobs_pp);
+ } else {
+ mali_pp_job_list_remove(pp_job);
+ }
+ }
+ }
+ }
+
+ /*
+ * Release scheduler lock so we can release trackers
+ * (which will potentially queue new jobs)
+ */
+ mali_scheduler_unlock();
+
+ /* Release and complete all (non-running) found GP jobs */
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &removed_jobs_gp,
+ struct mali_gp_job, list) {
+ mali_timeline_tracker_release(mali_gp_job_get_tracker(gp_job));
+ mali_gp_job_signal_pp_tracker(gp_job, MALI_FALSE);
+ _mali_osk_list_delinit(&gp_job->list);
+ mali_scheduler_complete_gp_job(gp_job,
+ MALI_FALSE, MALI_FALSE, MALI_TRUE);
+ }
+
+ /* Release and complete non-running PP jobs */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp, &removed_jobs_pp,
+ struct mali_pp_job, list) {
+ mali_timeline_tracker_release(mali_pp_job_get_tracker(pp_job));
+ _mali_osk_list_delinit(&pp_job->list);
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx,
+ _mali_uk_gp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *job;
+ mali_timeline_point point;
+ u32 __user *point_ptr = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)ctx;
+
+ job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id(),
+ NULL);
+ if (NULL == job) {
+ MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ point_ptr = (u32 __user *)(uintptr_t)mali_gp_job_get_timeline_point_ptr(job);
+
+ point = mali_scheduler_submit_gp_job(session, job);
+
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the job was started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx,
+ _mali_uk_pp_start_job_s *uargs)
+{
+ _mali_osk_errcode_t ret;
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ mali_timeline_point point;
+ u32 __user *point_ptr = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)ctx;
+
+ job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
+ if (NULL == job) {
+ MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(job);
+
+ /* Submit PP job. */
+ ret = mali_scheduler_submit_pp_job(session, job, &point);
+ job = NULL;
+
+ if (_MALI_OSK_ERR_OK == ret) {
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the jobs were started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+ }
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx,
+ _mali_uk_pp_and_gp_start_job_s *uargs)
+{
+ _mali_osk_errcode_t ret;
+ struct mali_session_data *session;
+ _mali_uk_pp_and_gp_start_job_s kargs;
+ struct mali_pp_job *pp_job;
+ struct mali_gp_job *gp_job;
+ u32 __user *point_ptr = NULL;
+ mali_timeline_point point;
+ _mali_uk_pp_start_job_s __user *pp_args;
+ _mali_uk_gp_start_job_s __user *gp_args;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+
+ session = (struct mali_session_data *) ctx;
+
+ if (0 != _mali_osk_copy_from_user(&kargs, uargs,
+ sizeof(_mali_uk_pp_and_gp_start_job_s))) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ pp_args = (_mali_uk_pp_start_job_s __user *)(uintptr_t)kargs.pp_args;
+ gp_args = (_mali_uk_gp_start_job_s __user *)(uintptr_t)kargs.gp_args;
+
+ pp_job = mali_pp_job_create(session, pp_args,
+ mali_scheduler_get_new_id());
+ if (NULL == pp_job) {
+ MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ gp_job = mali_gp_job_create(session, gp_args,
+ mali_scheduler_get_new_id(),
+ mali_pp_job_get_tracker(pp_job));
+ if (NULL == gp_job) {
+ MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+ mali_pp_job_delete(pp_job);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(pp_job);
+
+ /* Submit GP job. */
+ mali_scheduler_submit_gp_job(session, gp_job);
+ gp_job = NULL;
+
+ /* Submit PP job. */
+ ret = mali_scheduler_submit_pp_job(session, pp_job, &point);
+ pp_job = NULL;
+
+ if (_MALI_OSK_ERR_OK == ret) {
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the jobs were started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+ }
+
+ return ret;
+}
+
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+ u32 fb_lookup_id;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
+
+ mali_scheduler_lock();
+
+ /* Iterate over all jobs for given frame builder_id. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp,
+ &session->pp_job_fb_lookup_list[fb_lookup_id],
+ struct mali_pp_job, session_fb_lookup_list) {
+ MALI_DEBUG_CODE(u32 disable_mask = 0);
+
+ if (mali_pp_job_get_frame_builder_id(job) !=
+ (u32) args->fb_id) {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n"));
+ continue;
+ }
+
+ MALI_DEBUG_CODE(disable_mask |= 0xD << (4 * 3));
+
+ if (mali_pp_job_get_wb0_source_addr(job) == args->wb0_memory) {
+ MALI_DEBUG_CODE(disable_mask |= 0x1 << (4 * 1));
+ mali_pp_job_disable_wb0(job);
+ }
+
+ if (mali_pp_job_get_wb1_source_addr(job) == args->wb1_memory) {
+ MALI_DEBUG_CODE(disable_mask |= 0x2 << (4 * 2));
+ mali_pp_job_disable_wb1(job);
+ }
+
+ if (mali_pp_job_get_wb2_source_addr(job) == args->wb2_memory) {
+ MALI_DEBUG_CODE(disable_mask |= 0x3 << (4 * 3));
+ mali_pp_job_disable_wb2(job);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n",
+ disable_mask));
+ }
+
+ mali_scheduler_unlock();
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "GP queues\n");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tQueue depth: %u\n", job_queue_gp.depth);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tNormal priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_gp.normal_pri) ?
+ "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tHigh priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_gp.high_pri) ?
+ "empty" : "not empty");
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "PP queues\n");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tQueue depth: %u\n", job_queue_pp.depth);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tNormal priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_pp.normal_pri)
+ ? "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tHigh priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_pp.high_pri)
+ ? "empty" : "not empty");
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ return n;
+}
+#endif
+
+/*
+ * ---------- Implementation of static functions ----------
+ */
+
+static mali_timeline_point mali_scheduler_submit_gp_job(
+ struct mali_session_data *session, struct mali_gp_job *job)
+{
+ mali_timeline_point point;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Add job to Timeline system. */
+ point = mali_timeline_system_add_tracker(session->timeline_system,
+ mali_gp_job_get_tracker(job), MALI_TIMELINE_GP);
+
+ return point;
+}
+
+static _mali_osk_errcode_t mali_scheduler_submit_pp_job(
+ struct mali_session_data *session, struct mali_pp_job *job, mali_timeline_point *point)
+
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ struct ww_acquire_ctx ww_actx;
+ u32 i;
+ u32 num_memory_cookies = 0;
+ struct reservation_object **reservation_object_list = NULL;
+ unsigned int num_reservation_object = 0;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ mali_scheduler_lock();
+ /*
+ * Adding job to the lookup list used to quickly discard
+ * writeback units of queued jobs.
+ */
+ mali_pp_job_fb_lookup_add(job);
+ mali_scheduler_unlock();
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+
+ /* Allocate the reservation_object_list to list the dma reservation object of dependent dma buffer */
+ num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+ if (0 < num_memory_cookies) {
+ reservation_object_list = kzalloc(sizeof(struct reservation_object *) * num_memory_cookies, GFP_KERNEL);
+ if (NULL == reservation_object_list) {
+ MALI_PRINT_ERROR(("Failed to alloc the reservation object list.\n"));
+ ret = _MALI_OSK_ERR_NOMEM;
+ goto failed_to_alloc_reservation_object_list;
+ }
+ }
+
+ /* Add the dma reservation object into reservation_object_list*/
+ for (i = 0; i < num_memory_cookies; i++) {
+ mali_mem_backend *mem_backend = NULL;
+ struct reservation_object *tmp_reservation_object = NULL;
+ u32 mali_addr = mali_pp_job_get_memory_cookie(job, i);
+
+ mem_backend = mali_mem_backend_struct_search(session, mali_addr);
+
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+
+ if (NULL == mem_backend) {
+ MALI_PRINT_ERROR(("Failed to find the memory backend for memory cookie[%d].\n", i));
+ goto failed_to_find_mem_backend;
+ }
+
+ if (MALI_MEM_DMA_BUF != mem_backend->type)
+ continue;
+
+ tmp_reservation_object = mem_backend->dma_buf.attachment->buf->resv;
+
+ if (NULL != tmp_reservation_object) {
+ mali_dma_fence_add_reservation_object_list(tmp_reservation_object,
+ reservation_object_list, &num_reservation_object);
+ }
+ }
+
+ /*
+ * Add the mali dma fence callback to wait for all dependent dma buf,
+ * and extend the timeline system to support dma fence,
+ * then create the new internal dma fence to replace all last dma fence for dependent dma buf.
+ */
+ if (0 < num_reservation_object) {
+ int error;
+ int num_dma_fence_waiter = 0;
+ /* Create one new dma fence.*/
+ job->rendered_dma_fence = mali_dma_fence_new(job->session->fence_context,
+ _mali_osk_atomic_inc_return(&job->session->fence_seqno));
+
+ if (NULL == job->rendered_dma_fence) {
+ MALI_PRINT_ERROR(("Failed to creat one new dma fence.\n"));
+ ret = _MALI_OSK_ERR_FAULT;
+ goto failed_to_create_dma_fence;
+ }
+
+ /* In order to avoid deadlock, wait/wound mutex lock to lock all dma buffers*/
+
+ error = mali_dma_fence_lock_reservation_object_list(reservation_object_list,
+ num_reservation_object, &ww_actx);
+
+ if (0 != error) {
+ MALI_PRINT_ERROR(("Failed to lock all reservation objects.\n"));
+ ret = _MALI_OSK_ERR_FAULT;
+ goto failed_to_lock_reservation_object_list;
+ }
+
+ mali_dma_fence_context_init(&job->dma_fence_context,
+ mali_timeline_dma_fence_callback, (void *)job);
+
+ /* Add dma fence waiters and dma fence callback. */
+ for (i = 0; i < num_reservation_object; i++) {
+ ret = mali_dma_fence_context_add_waiters(&job->dma_fence_context, reservation_object_list[i]);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to add waiter into mali dma fence context.\n"));
+ goto failed_to_add_dma_fence_waiter;
+ }
+ }
+
+ for (i = 0; i < num_reservation_object; i++) {
+ reservation_object_add_excl_fence(reservation_object_list[i], job->rendered_dma_fence);
+ }
+
+ num_dma_fence_waiter = job->dma_fence_context.num_dma_fence_waiter;
+
+ /* Add job to Timeline system. */
+ (*point) = mali_timeline_system_add_tracker(session->timeline_system,
+ mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
+
+ if (0 != num_dma_fence_waiter) {
+ mali_dma_fence_context_dec_count(&job->dma_fence_context);
+ }
+
+ /* Unlock all wait/wound mutex lock. */
+ mali_dma_fence_unlock_reservation_object_list(reservation_object_list,
+ num_reservation_object, &ww_actx);
+ } else {
+ /* Add job to Timeline system. */
+ (*point) = mali_timeline_system_add_tracker(session->timeline_system,
+ mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
+ }
+
+ kfree(reservation_object_list);
+ return ret;
+#else
+ /* Add job to Timeline system. */
+ (*point) = mali_timeline_system_add_tracker(session->timeline_system,
+ mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
+#endif
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+failed_to_add_dma_fence_waiter:
+ mali_dma_fence_context_term(&job->dma_fence_context);
+ mali_dma_fence_unlock_reservation_object_list(reservation_object_list,
+ num_reservation_object, &ww_actx);
+failed_to_lock_reservation_object_list:
+ mali_dma_fence_signal_and_put(&job->rendered_dma_fence);
+failed_to_create_dma_fence:
+failed_to_find_mem_backend:
+ if (NULL != reservation_object_list)
+ kfree(reservation_object_list);
+failed_to_alloc_reservation_object_list:
+ mali_pp_job_fb_lookup_remove(job);
+#endif
+ return ret;
+}
+
+static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job)
+{
+ struct mali_session_data *session;
+ _mali_osk_list_t *queue;
+
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_gp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (unlikely(session->is_aborting)) {
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
+ mali_gp_job_get_id(job), job));
+ return MALI_FALSE; /* job not queued */
+ }
+
+ mali_gp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
+
+ /* Determine which queue the job should be added to. */
+ if (session->use_high_priority_job_queue) {
+ queue = &job_queue_gp.high_pri;
+ } else {
+ queue = &job_queue_gp.normal_pri;
+ }
+
+ job_queue_gp.depth += 1;
+ job_queue_gp.big_job_num += (job->big_job) ? 1 : 0;
+
+ /* Add job to queue (mali_gp_job_queue_add find correct place). */
+ mali_gp_job_list_add(job, queue);
+
+ /*
+ * We hold a PM reference for every job we hold queued (and running)
+ * It is important that we take this reference after job has been
+ * added the the queue so that any runtime resume could schedule this
+ * job right there and then.
+ */
+ _mali_osk_pm_dev_ref_get_async();
+
+ if (mali_utilization_enabled()) {
+ /*
+ * We cheat a little bit by counting the GP as busy from the
+ * time a GP job is queued. This will be fine because we only
+ * loose the tiny idle gap between jobs, but we will instead
+ * get less utilization work to do (less locks taken)
+ */
+ mali_utilization_gp_start();
+ }
+
+ mali_pm_record_gpu_active(MALI_TRUE);
+
+ /* Add profiling events for job enqueued */
+ _mali_osk_profiling_add_event(
+ MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE,
+ mali_gp_job_get_pid(job),
+ mali_gp_job_get_tid(job),
+ mali_gp_job_get_frame_builder_id(job),
+ mali_gp_job_get_flush_id(job),
+ 0);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_gp_job_get_tid(job),
+ mali_gp_job_get_id(job), "GP");
+#endif
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n",
+ mali_gp_job_get_id(job), job));
+
+ return MALI_TRUE; /* job queued */
+}
+
+static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job)
+{
+ struct mali_session_data *session;
+ _mali_osk_list_t *queue = NULL;
+
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_pp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (unlikely(session->is_aborting)) {
+ MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
+ mali_pp_job_get_id(job), job));
+ return MALI_FALSE; /* job not queued */
+ } else if (unlikely(MALI_SWAP_IN_FAIL == job->swap_status)) {
+ MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while swap in failed.\n",
+ mali_pp_job_get_id(job), job));
+ return MALI_FALSE;
+ }
+
+ mali_pp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
+
+ if (session->use_high_priority_job_queue) {
+ queue = &job_queue_pp.high_pri;
+ } else {
+ queue = &job_queue_pp.normal_pri;
+ }
+
+ job_queue_pp.depth +=
+ mali_pp_job_get_sub_job_count(job);
+
+ /* Add job to queue (mali_gp_job_queue_add find correct place). */
+ mali_pp_job_list_add(job, queue);
+
+ /*
+ * We hold a PM reference for every job we hold queued (and running)
+ * It is important that we take this reference after job has been
+ * added the the queue so that any runtime resume could schedule this
+ * job right there and then.
+ */
+ _mali_osk_pm_dev_ref_get_async();
+
+ if (mali_utilization_enabled()) {
+ /*
+ * We cheat a little bit by counting the PP as busy from the
+ * time a PP job is queued. This will be fine because we only
+ * loose the tiny idle gap between jobs, but we will instead
+ * get less utilization work to do (less locks taken)
+ */
+ mali_utilization_pp_start();
+ }
+
+ mali_pm_record_gpu_active(MALI_FALSE);
+
+ /* Add profiling events for job enqueued */
+ _mali_osk_profiling_add_event(
+ MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE,
+ mali_pp_job_get_pid(job),
+ mali_pp_job_get_tid(job),
+ mali_pp_job_get_frame_builder_id(job),
+ mali_pp_job_get_flush_id(job),
+ 0);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_pp_job_get_tid(job),
+ mali_pp_job_get_id(job), "PP");
+#endif
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n",
+ mali_pp_job_is_virtual(job)
+ ? "Virtual" : "Physical",
+ mali_pp_job_get_id(job), job,
+ mali_pp_job_get_sub_job_count(job)));
+
+ return MALI_TRUE; /* job queued */
+}
+
+static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
+ mali_bool success)
+{
+ _mali_uk_gp_job_finished_s *jobres;
+ struct mali_session_data *session;
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_gp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ notification = mali_gp_job_get_finished_notification(job);
+ MALI_DEBUG_ASSERT_POINTER(notification);
+
+ jobres = notification->result_buffer;
+ MALI_DEBUG_ASSERT_POINTER(jobres);
+
+ jobres->pending_big_job_num = mali_scheduler_job_gp_big_job_count();
+
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ if (MALI_TRUE == success) {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ } else {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+ jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
+ jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
+ jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
+
+ mali_session_send_notification(session, notification);
+}
+
+void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job,
+ u32 num_cores_in_virtual)
+{
+ u32 i;
+ u32 num_counters_to_copy;
+ _mali_uk_pp_job_finished_s *jobres;
+ struct mali_session_data *session;
+ _mali_osk_notification_t *notification;
+
+ if (MALI_TRUE == mali_pp_job_use_no_notification(job)) {
+ return;
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_pp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ notification = mali_pp_job_get_finished_notification(job);
+ MALI_DEBUG_ASSERT_POINTER(notification);
+
+ jobres = notification->result_buffer;
+ MALI_DEBUG_ASSERT_POINTER(jobres);
+
+ jobres->user_job_ptr = mali_pp_job_get_user_id(job);
+ if (MALI_TRUE == mali_pp_job_was_success(job)) {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ } else {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ if (mali_pp_job_is_virtual(job)) {
+ num_counters_to_copy = num_cores_in_virtual;
+ } else {
+ num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
+ }
+
+ for (i = 0; i < num_counters_to_copy; i++) {
+ jobres->perf_counter0[i] =
+ mali_pp_job_get_perf_counter_value0(job, i);
+ jobres->perf_counter1[i] =
+ mali_pp_job_get_perf_counter_value1(job, i);
+ jobres->perf_counter_src0 =
+ mali_pp_job_get_pp_counter_global_src0();
+ jobres->perf_counter_src1 =
+ mali_pp_job_get_pp_counter_global_src1();
+ }
+
+ mali_session_send_notification(session, notification);
+}
+
+static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
+ mali_pp_job_list_addtail(job, &scheduler_pp_job_deletion_queue);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
+
+ _mali_osk_wq_schedule_work(scheduler_wq_pp_job_delete);
+}
+
+void mali_scheduler_do_pp_job_delete(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+
+ MALI_IGNORE(arg);
+
+ /*
+ * Quickly "unhook" the jobs pending to be deleted, so we can release
+ * the lock before we start deleting the job objects
+ * (without any locks held)
+ */
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
+ _mali_osk_list_move_list(&scheduler_pp_job_deletion_queue, &list);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+ struct mali_pp_job, list) {
+ _mali_osk_list_delinit(&job->list);
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ mali_dma_fence_context_term(&job->dma_fence_context);
+#endif
+
+ mali_pp_job_delete(job); /* delete the job object itself */
+ }
+}
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+
+static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
+ mali_pp_job_list_addtail(job, &scheduler_pp_job_queue_list);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
+
+ _mali_osk_wq_schedule_work(scheduler_wq_pp_job_queue);
+}
+
+static void mali_scheduler_do_pp_job_queue(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_IGNORE(arg);
+
+ /*
+ * Quickly "unhook" the jobs pending to be queued, so we can release
+ * the lock before we start queueing the job objects
+ * (without any locks held)
+ */
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
+ _mali_osk_list_move_list(&scheduler_pp_job_queue_list, &list);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
+
+ /* First loop through all jobs and do the pre-work (no locks needed) */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_needs_dma_buf_mapping(job)) {
+ /*
+ * This operation could fail, but we continue anyway,
+ * because the worst that could happen is that this
+ * job will fail due to a Mali page fault.
+ */
+ mali_dma_buf_map_job(job);
+ }
+ }
+
+ mali_scheduler_lock();
+
+ /* Then loop through all jobs again to queue them (lock needed) */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+ struct mali_pp_job, list) {
+
+ /* Remove from scheduler_pp_job_queue_list before queueing */
+ mali_pp_job_list_remove(job);
+
+ if (mali_scheduler_queue_pp_job(job)) {
+ /* Job queued successfully */
+ schedule_mask |= MALI_SCHEDULER_MASK_PP;
+ } else {
+ /* Failed to enqueue job, release job (with error) */
+ mali_pp_job_fb_lookup_remove(job);
+ mali_pp_job_mark_unstarted_failed(job);
+
+ /* unlock scheduler in this uncommon case */
+ mali_scheduler_unlock();
+
+ schedule_mask |= mali_timeline_tracker_release(
+ mali_pp_job_get_tracker(job));
+
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job, 0, MALI_TRUE,
+ MALI_FALSE);
+
+ mali_scheduler_lock();
+ }
+ }
+
+ mali_scheduler_unlock();
+
+ /* Trigger scheduling of jobs */
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+void mali_scheduler_gp_pp_job_queue_print(void)
+{
+ struct mali_gp_job *gp_job = NULL;
+ struct mali_gp_job *tmp_gp_job = NULL;
+ struct mali_pp_job *pp_job = NULL;
+ struct mali_pp_job *tmp_pp_job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj);
+
+ /* dump job queup status */
+ if ((0 == job_queue_gp.depth) && (0 == job_queue_pp.depth)) {
+ MALI_PRINT(("No GP&PP job in the job queue.\n"));
+ return;
+ }
+
+ MALI_PRINT(("Total (%d) GP job in the job queue.\n", job_queue_gp.depth));
+ if (job_queue_gp.depth > 0) {
+ if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) {
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.high_pri,
+ struct mali_gp_job, list) {
+ MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job high_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid));
+ }
+ }
+
+ if (!_mali_osk_list_empty(&job_queue_gp.normal_pri)) {
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.normal_pri,
+ struct mali_gp_job, list) {
+ MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job normal_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid));
+ }
+ }
+ }
+
+ MALI_PRINT(("Total (%d) PP job in the job queue.\n", job_queue_pp.depth));
+ if (job_queue_pp.depth > 0) {
+ if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.high_pri,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_is_virtual(pp_job)) {
+ MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+ } else {
+ MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+ }
+ }
+ }
+
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.normal_pri,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_is_virtual(pp_job)) {
+ MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+ } else {
+ MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+ }
+ }
+ }
+ }
+
+ /* dump group running job status */
+ mali_executor_running_status_print();
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_scheduler.h b/drivers/gpu/arm/utgard/common/mali_scheduler.h
new file mode 100644
index 000000000000..de81a421ea9a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_scheduler.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_H__
+#define __MALI_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler_types.h"
+#include "mali_session.h"
+
+struct mali_scheduler_job_queue {
+ _MALI_OSK_LIST_HEAD(normal_pri); /* Queued jobs with normal priority */
+ _MALI_OSK_LIST_HEAD(high_pri); /* Queued jobs with high priority */
+ u32 depth; /* Depth of combined queues. */
+ u32 big_job_num;
+};
+
+extern _mali_osk_spinlock_irq_t *mali_scheduler_lock_obj;
+
+/* Queue of jobs to be executed on the GP group */
+extern struct mali_scheduler_job_queue job_queue_gp;
+
+/* Queue of PP jobs */
+extern struct mali_scheduler_job_queue job_queue_pp;
+
+extern _mali_osk_atomic_t mali_job_id_autonumber;
+extern _mali_osk_atomic_t mali_job_cache_order_autonumber;
+
+#define MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+_mali_osk_errcode_t mali_scheduler_initialize(void);
+void mali_scheduler_terminate(void);
+
+MALI_STATIC_INLINE void mali_scheduler_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(mali_scheduler_lock_obj);
+ MALI_DEBUG_PRINT(5, ("Mali scheduler: scheduler lock taken.\n"));
+}
+
+MALI_STATIC_INLINE void mali_scheduler_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Mali scheduler: Releasing scheduler lock.\n"));
+ _mali_osk_spinlock_irq_unlock(mali_scheduler_lock_obj);
+}
+
+MALI_STATIC_INLINE u32 mali_scheduler_job_gp_count(void)
+{
+ return job_queue_gp.depth;
+}
+MALI_STATIC_INLINE u32 mali_scheduler_job_gp_big_job_count(void)
+{
+ return job_queue_gp.big_job_num;
+}
+
+u32 mali_scheduler_job_physical_head_count(mali_bool gpu_mode_is_secure);
+
+mali_bool mali_scheduler_job_next_is_virtual(void);
+struct mali_pp_job *mali_scheduler_job_pp_next(void);
+
+struct mali_gp_job *mali_scheduler_job_gp_get(void);
+struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void);
+struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void);
+struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job);
+struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void);
+
+MALI_STATIC_INLINE u32 mali_scheduler_get_new_id(void)
+{
+ return _mali_osk_atomic_inc_return(&mali_job_id_autonumber);
+}
+
+MALI_STATIC_INLINE u32 mali_scheduler_get_new_cache_order(void)
+{
+ return _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber);
+}
+
+/**
+ * @brief Used by the Timeline system to queue a GP job.
+ *
+ * @note @ref mali_executor_schedule_from_mask() should be called if this
+ * function returns non-zero.
+ *
+ * @param job The GP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is
+ * necessary after this call.
+ */
+mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job);
+
+/**
+ * @brief Used by the Timeline system to queue a PP job.
+ *
+ * @note @ref mali_executor_schedule_from_mask() should be called if this
+ * function returns non-zero.
+ *
+ * @param job The PP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is
+ * necessary after this call.
+ */
+mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job);
+
+void mali_scheduler_complete_gp_job(struct mali_gp_job *job,
+ mali_bool success,
+ mali_bool user_notification,
+ mali_bool dequeued);
+
+void mali_scheduler_complete_pp_job(struct mali_pp_job *job,
+ u32 num_cores_in_virtual,
+ mali_bool user_notification,
+ mali_bool dequeued);
+
+void mali_scheduler_abort_session(struct mali_session_data *session);
+
+void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job,
+ u32 num_cores_in_virtual);
+
+#if MALI_STATE_TRACKING
+u32 mali_scheduler_dump_state(char *buf, u32 size);
+#endif
+
+void mali_scheduler_gp_pp_job_queue_print(void);
+
+#endif /* __MALI_SCHEDULER_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_scheduler_types.h b/drivers/gpu/arm/utgard/common/mali_scheduler_types.h
new file mode 100644
index 000000000000..ba1d71d01d46
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_scheduler_types.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_TYPES_H__
+#define __MALI_SCHEDULER_TYPES_H__
+
+#include "mali_osk.h"
+
+#define MALI_SCHEDULER_JOB_ID_SPAN 65535
+
+/**
+ * Bitmask used for defered scheduling of subsystems.
+ */
+typedef u32 mali_scheduler_mask;
+
+#define MALI_SCHEDULER_MASK_GP (1<<0)
+#define MALI_SCHEDULER_MASK_PP (1<<1)
+
+#define MALI_SCHEDULER_MASK_EMPTY 0
+#define MALI_SCHEDULER_MASK_ALL (MALI_SCHEDULER_MASK_GP | MALI_SCHEDULER_MASK_PP)
+
+#endif /* __MALI_SCHEDULER_TYPES_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_session.c b/drivers/gpu/arm/utgard/common/mali_session.c
new file mode 100644
index 000000000000..7504fb108779
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_session.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_session.h"
+#include "mali_ukk.h"
+#ifdef MALI_MEM_SWAP_TRACKING
+#include "mali_memory_swap_alloc.h"
+#endif
+
+_MALI_OSK_LIST_HEAD(mali_sessions);
+static u32 mali_session_count = 0;
+
+_mali_osk_spinlock_irq_t *mali_sessions_lock = NULL;
+wait_queue_head_t pending_queue;
+
+_mali_osk_errcode_t mali_session_initialize(void)
+{
+ _MALI_OSK_INIT_LIST_HEAD(&mali_sessions);
+ /* init wait queue for big varying job */
+ init_waitqueue_head(&pending_queue);
+
+ mali_sessions_lock = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SESSIONS);
+ if (NULL == mali_sessions_lock) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_session_terminate(void)
+{
+ if (NULL != mali_sessions_lock) {
+ _mali_osk_spinlock_irq_term(mali_sessions_lock);
+ mali_sessions_lock = NULL;
+ }
+}
+
+void mali_session_add(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_add(&session->link, &mali_sessions);
+ mali_session_count++;
+ mali_session_unlock();
+}
+
+void mali_session_remove(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_delinit(&session->link);
+ mali_session_count--;
+ mali_session_unlock();
+}
+
+u32 mali_session_get_count(void)
+{
+ return mali_session_count;
+}
+
+mali_bool mali_session_pp_job_is_empty(void *data)
+{
+ struct mali_session_data *session = (struct mali_session_data *)data;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if ( 0 == _mali_osk_atomic_read(&session->number_of_pp_jobs)) {
+ return MALI_TRUE;
+ }
+ return MALI_FALSE;
+}
+
+wait_queue_head_t *mali_session_get_wait_queue(void)
+{
+ return &pending_queue;
+}
+
+/*
+ * Get the max completed window jobs from all active session,
+ * which will be used in window render frame per sec calculate
+ */
+#if defined(CONFIG_MALI_DVFS)
+u32 mali_session_max_window_num(void)
+{
+ struct mali_session_data *session, *tmp;
+ u32 max_window_num = 0;
+ u32 tmp_number = 0;
+
+ mali_session_lock();
+
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ tmp_number = _mali_osk_atomic_xchg(
+ &session->number_of_window_jobs, 0);
+ if (max_window_num < tmp_number) {
+ max_window_num = tmp_number;
+ }
+ }
+
+ mali_session_unlock();
+
+ return max_window_num;
+}
+#endif
+
+void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx)
+{
+ struct mali_session_data *session, *tmp;
+ u32 mali_mem_usage;
+ u32 total_mali_mem_size;
+#ifdef MALI_MEM_SWAP_TRACKING
+ u32 swap_pool_size;
+ u32 swap_unlock_size;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(print_ctx);
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link) {
+#ifdef MALI_MEM_SWAP_TRACKING
+ _mali_osk_ctxprintf(print_ctx, " %-25s %-10u %-10u %-15u %-15u %-10u %-10u %-10u\n",
+ session->comm, session->pid,
+ (atomic_read(&session->mali_mem_allocated_pages)) * _MALI_OSK_MALI_PAGE_SIZE,
+ (unsigned int)session->max_mali_mem_allocated_size,
+ (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_EXTERNAL])) * _MALI_OSK_MALI_PAGE_SIZE),
+ (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_UMP])) * _MALI_OSK_MALI_PAGE_SIZE),
+ (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_DMA_BUF])) * _MALI_OSK_MALI_PAGE_SIZE),
+ (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_SWAP])) * _MALI_OSK_MALI_PAGE_SIZE)
+ );
+#else
+ _mali_osk_ctxprintf(print_ctx, " %-25s %-10u %-10u %-15u %-15u %-10u %-10u \n",
+ session->comm, session->pid,
+ (unsigned int)((atomic_read(&session->mali_mem_allocated_pages)) * _MALI_OSK_MALI_PAGE_SIZE),
+ (unsigned int)session->max_mali_mem_allocated_size,
+ (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_EXTERNAL])) * _MALI_OSK_MALI_PAGE_SIZE),
+ (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_UMP])) * _MALI_OSK_MALI_PAGE_SIZE),
+ (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_DMA_BUF])) * _MALI_OSK_MALI_PAGE_SIZE)
+ );
+#endif
+ }
+ mali_session_unlock();
+ mali_mem_usage = _mali_ukk_report_memory_usage();
+ total_mali_mem_size = _mali_ukk_report_total_memory_size();
+ _mali_osk_ctxprintf(print_ctx, "Mali mem usage: %u\nMali mem limit: %u\n", mali_mem_usage, total_mali_mem_size);
+#ifdef MALI_MEM_SWAP_TRACKING
+ mali_mem_swap_tracking(&swap_pool_size, &swap_unlock_size);
+ _mali_osk_ctxprintf(print_ctx, "Mali swap mem pool : %u\nMali swap mem unlock: %u\n", swap_pool_size, swap_unlock_size);
+#endif
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_session.h b/drivers/gpu/arm/utgard/common/mali_session.h
new file mode 100644
index 000000000000..da8b9927ee60
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_session.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SESSION_H__
+#define __MALI_SESSION_H__
+
+#include "mali_mmu_page_directory.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_memory_types.h"
+#include "mali_memory_manager.h"
+
+struct mali_timeline_system;
+struct mali_soft_system;
+
+/* Number of frame builder job lists per session. */
+#define MALI_PP_JOB_FB_LOOKUP_LIST_SIZE 16
+#define MALI_PP_JOB_FB_LOOKUP_LIST_MASK (MALI_PP_JOB_FB_LOOKUP_LIST_SIZE - 1)
+/*Max pending big job allowed in kernel*/
+#define MALI_MAX_PENDING_BIG_JOB (2)
+
+struct mali_session_data {
+ _mali_osk_notification_queue_t *ioctl_queue;
+
+ _mali_osk_wait_queue_t *wait_queue; /**The wait queue to wait for the number of pp job become 0.*/
+
+ _mali_osk_mutex_t *memory_lock; /**< Lock protecting the vm manipulation */
+ _mali_osk_mutex_t *cow_lock; /** < Lock protecting the cow memory free manipulation */
+#if 0
+ _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+#endif
+ struct mali_page_directory *page_directory; /**< MMU page directory for this session */
+
+ _MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */
+ _MALI_OSK_LIST_HEAD(pp_job_list); /**< List of all PP jobs on this session */
+
+#if defined(CONFIG_MALI_DVFS)
+ _mali_osk_atomic_t number_of_window_jobs; /**< Record the window jobs completed on this session in a period */
+#endif
+ _mali_osk_atomic_t number_of_pp_jobs; /** < Record the pp jobs on this session */
+
+ _mali_osk_list_t pp_job_fb_lookup_list[MALI_PP_JOB_FB_LOOKUP_LIST_SIZE]; /**< List of PP job lists per frame builder id. Used to link jobs from same frame builder. */
+ struct mali_soft_job_system *soft_job_system; /**< Soft job system for this session. */
+ struct mali_timeline_system *timeline_system; /**< Timeline system for this session. */
+
+ mali_bool is_aborting; /**< MALI_TRUE if the session is aborting, MALI_FALSE if not. */
+ mali_bool use_high_priority_job_queue; /**< If MALI_TRUE, jobs added from this session will use the high priority job queues. */
+ u32 pid;
+ char *comm;
+ atomic_t mali_mem_array[MALI_MEM_TYPE_MAX]; /**< The array to record mem types' usage for this session. */
+ atomic_t mali_mem_allocated_pages; /** The current allocated mali memory pages, which include mali os memory and mali dedicated memory.*/
+ size_t max_mali_mem_allocated_size; /**< The past max mali memory allocated size, which include mali os memory and mali dedicated memory. */
+ /* Added for new memroy system */
+ struct mali_allocation_manager allocation_mgr;
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ u32 fence_context; /** < The execution dma fence context this fence is run on. */
+ _mali_osk_atomic_t fence_seqno; /** < Alinear increasing sequence number for this dma fence context. */
+#endif
+};
+
+_mali_osk_errcode_t mali_session_initialize(void);
+void mali_session_terminate(void);
+
+/* List of all sessions. Actual list head in mali_kernel_core.c */
+extern _mali_osk_list_t mali_sessions;
+/* Lock to protect modification and access to the mali_sessions list */
+extern _mali_osk_spinlock_irq_t *mali_sessions_lock;
+
+MALI_STATIC_INLINE void mali_session_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(mali_sessions_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_unlock(void)
+{
+ _mali_osk_spinlock_irq_unlock(mali_sessions_lock);
+}
+
+void mali_session_add(struct mali_session_data *session);
+void mali_session_remove(struct mali_session_data *session);
+u32 mali_session_get_count(void);
+mali_bool mali_session_pp_job_is_empty(void *data);
+wait_queue_head_t *mali_session_get_wait_queue(void);
+
+#define MALI_SESSION_FOREACH(session, tmp, link) \
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp, &mali_sessions, struct mali_session_data, link)
+
+MALI_STATIC_INLINE struct mali_page_directory *mali_session_get_page_directory(struct mali_session_data *session)
+{
+ return session->page_directory;
+}
+
+MALI_STATIC_INLINE void mali_session_memory_lock(struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+ _mali_osk_mutex_wait(session->memory_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_memory_unlock(struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+ _mali_osk_mutex_signal(session->memory_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object)
+{
+ _mali_osk_notification_queue_send(session->ioctl_queue, object);
+}
+
+#if defined(CONFIG_MALI_DVFS)
+
+MALI_STATIC_INLINE void mali_session_inc_num_window_jobs(struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+ _mali_osk_atomic_inc(&session->number_of_window_jobs);
+}
+
+/*
+ * Get the max completed window jobs from all active session,
+ * which will be used in window render frame per sec calculate
+ */
+u32 mali_session_max_window_num(void);
+
+#endif
+
+void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx);
+
+#endif /* __MALI_SESSION_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_soft_job.c b/drivers/gpu/arm/utgard/common/mali_soft_job.c
new file mode 100644
index 000000000000..35cd830bc83a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_soft_job.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (C) 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_soft_job.h"
+#include "mali_osk.h"
+#include "mali_timeline.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
+
+MALI_STATIC_INLINE void mali_soft_job_system_lock(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ _mali_osk_spinlock_irq_lock(system->lock);
+ MALI_DEBUG_PRINT(5, ("Mali Soft Job: soft system %p lock taken\n", system));
+ MALI_DEBUG_ASSERT(0 == system->lock_owner);
+ MALI_DEBUG_CODE(system->lock_owner = _mali_osk_get_tid());
+}
+
+MALI_STATIC_INLINE void mali_soft_job_system_unlock(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_PRINT(5, ("Mali Soft Job: releasing soft system %p lock\n", system));
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner);
+ MALI_DEBUG_CODE(system->lock_owner = 0);
+ _mali_osk_spinlock_irq_unlock(system->lock);
+}
+
+#if defined(DEBUG)
+MALI_STATIC_INLINE void mali_soft_job_system_assert_locked(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner);
+}
+#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system) mali_soft_job_system_assert_locked(system)
+#else
+#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system)
+#endif /* defined(DEBUG) */
+
+struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session)
+{
+ struct mali_soft_job_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ system = (struct mali_soft_job_system *) _mali_osk_calloc(1, sizeof(struct mali_soft_job_system));
+ if (NULL == system) {
+ return NULL;
+ }
+
+ system->session = session;
+
+ system->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ if (NULL == system->lock) {
+ mali_soft_job_system_destroy(system);
+ return NULL;
+ }
+ system->lock_owner = 0;
+ system->last_job_id = 0;
+
+ _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used));
+
+ return system;
+}
+
+void mali_soft_job_system_destroy(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ /* All jobs should be free at this point. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&(system->jobs_used)));
+
+ if (NULL != system) {
+ if (NULL != system->lock) {
+ _mali_osk_spinlock_irq_term(system->lock);
+ }
+ _mali_osk_free(system);
+ }
+}
+
+static void mali_soft_job_system_free_job(struct mali_soft_job_system *system, struct mali_soft_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_soft_job_system_lock(job->system);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+ MALI_DEBUG_ASSERT(system == job->system);
+
+ _mali_osk_list_del(&(job->system_list));
+
+ mali_soft_job_system_unlock(job->system);
+
+ _mali_osk_free(job);
+}
+
+MALI_STATIC_INLINE struct mali_soft_job *mali_soft_job_system_lookup_job(struct mali_soft_job_system *system, u32 job_id)
+{
+ struct mali_soft_job *job, *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+ if (job->id == job_id)
+ return job;
+ }
+
+ return NULL;
+}
+
+void mali_soft_job_destroy(struct mali_soft_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: destroying soft job %u (0x%08X)\n", job->id, job));
+
+ if (NULL != job) {
+ if (0 < _mali_osk_atomic_dec_return(&job->refcount)) return;
+
+ _mali_osk_atomic_term(&job->refcount);
+
+ if (NULL != job->activated_notification) {
+ _mali_osk_notification_delete(job->activated_notification);
+ job->activated_notification = NULL;
+ }
+
+ mali_soft_job_system_free_job(job->system, job);
+ }
+}
+
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job)
+{
+ struct mali_soft_job *job;
+ _mali_osk_notification_t *notification = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT((MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) ||
+ (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == type));
+
+ notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s));
+ if (unlikely(NULL == notification)) {
+ MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification"));
+ return NULL;
+ }
+
+ job = _mali_osk_malloc(sizeof(struct mali_soft_job));
+ if (unlikely(NULL == job)) {
+ MALI_DEBUG_PRINT(2, ("Mali Soft Job: system alloc job failed. \n"));
+ return NULL;
+ }
+
+ mali_soft_job_system_lock(system);
+
+ job->system = system;
+ job->id = system->last_job_id++;
+ job->state = MALI_SOFT_JOB_STATE_ALLOCATED;
+
+ _mali_osk_list_add(&(job->system_list), &(system->jobs_used));
+
+ job->type = type;
+ job->user_job = user_job;
+ job->activated = MALI_FALSE;
+
+ job->activated_notification = notification;
+
+ _mali_osk_atomic_init(&job->refcount, 1);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state);
+ MALI_DEBUG_ASSERT(system == job->system);
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+
+ mali_soft_job_system_unlock(system);
+
+ return job;
+}
+
+mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence)
+{
+ mali_timeline_point point;
+ struct mali_soft_job_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+ system = job->system;
+
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT_POINTER(system->session->timeline_system);
+
+ mali_soft_job_system_lock(system);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state);
+ job->state = MALI_SOFT_JOB_STATE_STARTED;
+
+ mali_soft_job_system_unlock(system);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: starting soft job %u (0x%08X)\n", job->id, job));
+
+ mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_SOFT, fence, job);
+ point = mali_timeline_system_add_tracker(system->session->timeline_system, &job->tracker, MALI_TIMELINE_SOFT);
+
+ return point;
+}
+
+static mali_bool mali_soft_job_is_activated(void *data)
+{
+ struct mali_soft_job *job;
+
+ job = (struct mali_soft_job *) data;
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ return job->activated;
+}
+
+_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id)
+{
+ struct mali_soft_job *job;
+ struct mali_timeline_system *timeline_system;
+ mali_scheduler_mask schedule_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_soft_job_system_lock(system);
+
+ job = mali_soft_job_system_lookup_job(system, job_id);
+
+ if ((NULL == job) || (MALI_SOFT_JOB_TYPE_USER_SIGNALED != job->type)
+ || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) {
+ mali_soft_job_system_unlock(system);
+ MALI_PRINT_ERROR(("Mali Soft Job: invalid soft job id %u", job_id));
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) {
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ mali_soft_job_system_unlock(system);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == job->activated);
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: soft job %u (0x%08X) was timed out\n", job->id, job));
+ mali_soft_job_destroy(job);
+
+ return _MALI_OSK_ERR_TIMEOUT;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ mali_soft_job_system_unlock(system);
+
+ /* Since the job now is in signaled state, timeouts from the timeline system will be
+ * ignored, and it is not possible to signal this job again. */
+
+ timeline_system = system->session->timeline_system;
+ MALI_DEBUG_ASSERT_POINTER(timeline_system);
+
+ /* Wait until activated. */
+ _mali_osk_wait_queue_wait_event(timeline_system->wait_queue, mali_soft_job_is_activated, (void *) job);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: signaling soft job %u (0x%08X)\n", job->id, job));
+
+ schedule_mask = mali_timeline_tracker_release(&job->tracker);
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
+
+ mali_soft_job_destroy(job);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_soft_job_send_activated_notification(struct mali_soft_job *job)
+{
+ if (NULL != job->activated_notification) {
+ _mali_uk_soft_job_activated_s *res = job->activated_notification->result_buffer;
+ res->user_job = job->user_job;
+ mali_session_send_notification(job->system->session, job->activated_notification);
+ }
+ job->activated_notification = NULL;
+}
+
+mali_scheduler_mask mali_soft_job_system_activate_job(struct mali_soft_job *job)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+ MALI_DEBUG_ASSERT_POINTER(job->system->session);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline activation for soft job %u (0x%08X).\n", job->id, job));
+
+ mali_soft_job_system_lock(job->system);
+
+ if (unlikely(job->system->session->is_aborting)) {
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Soft job %u (0x%08X) activated while session is aborting.\n", job->id, job));
+
+ mali_soft_job_system_unlock(job->system);
+
+ /* Since we are in shutdown, we can ignore the scheduling bitmask. */
+ mali_timeline_tracker_release(&job->tracker);
+ mali_soft_job_destroy(job);
+ return schedule_mask;
+ }
+
+ /* Send activated notification. */
+ mali_soft_job_send_activated_notification(job);
+
+ /* Wake up sleeping signaler. */
+ job->activated = MALI_TRUE;
+
+ /* If job type is self signaled, release tracker, move soft job to free list, and scheduler at once */
+ if (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == job->type) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ mali_soft_job_system_unlock(job->system);
+
+ schedule_mask |= mali_timeline_tracker_release(&job->tracker);
+
+ mali_soft_job_destroy(job);
+ } else {
+ _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue);
+
+ mali_soft_job_system_unlock(job->system);
+ }
+
+ return schedule_mask;
+}
+
+mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+ MALI_DEBUG_ASSERT_POINTER(job->system->session);
+ MALI_DEBUG_ASSERT(MALI_TRUE == job->activated);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline timeout for soft job %u (0x%08X).\n", job->id, job));
+
+ mali_soft_job_system_lock(job->system);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state ||
+ MALI_SOFT_JOB_STATE_SIGNALED == job->state);
+
+ if (unlikely(job->system->session->is_aborting)) {
+ /* The session is aborting. This job will be released and destroyed by @ref
+ * mali_soft_job_system_abort(). */
+ mali_soft_job_system_unlock(job->system);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ if (MALI_SOFT_JOB_STATE_STARTED != job->state) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state);
+
+ /* The job is about to be signaled, ignore timeout. */
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeout on soft job %u (0x%08X) in signaled state.\n", job->id, job));
+ mali_soft_job_system_unlock(job->system);
+ return schedule_mask;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+ job->state = MALI_SOFT_JOB_STATE_TIMED_OUT;
+ _mali_osk_atomic_inc(&job->refcount);
+
+ mali_soft_job_system_unlock(job->system);
+
+ schedule_mask = mali_timeline_tracker_release(&job->tracker);
+
+ mali_soft_job_destroy(job);
+
+ return schedule_mask;
+}
+
+void mali_soft_job_system_abort(struct mali_soft_job_system *system)
+{
+ struct mali_soft_job *job, *tmp;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(jobs);
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting soft job system for session 0x%08X.\n", system->session));
+
+ mali_soft_job_system_lock(system);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state ||
+ MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+
+ if (MALI_SOFT_JOB_STATE_STARTED == job->state) {
+ /* If the job has been activated, we have to release the tracker and destroy
+ * the job. If not, the tracker will be released and the job destroyed when
+ * it is activated. */
+ if (MALI_TRUE == job->activated) {
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting unsignaled soft job %u (0x%08X).\n", job->id, job));
+
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ _mali_osk_list_move(&job->system_list, &jobs);
+ }
+ } else if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) {
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting timed out soft job %u (0x%08X).\n", job->id, job));
+
+ /* We need to destroy this soft job. */
+ _mali_osk_list_move(&job->system_list, &jobs);
+ }
+ }
+
+ mali_soft_job_system_unlock(system);
+
+ /* Release and destroy jobs. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &jobs, struct mali_soft_job, system_list) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state ||
+ MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+
+ if (MALI_SOFT_JOB_STATE_SIGNALED == job->state) {
+ mali_timeline_tracker_release(&job->tracker);
+ }
+
+ /* Move job back to used list before destroying. */
+ _mali_osk_list_move(&job->system_list, &system->jobs_used);
+
+ mali_soft_job_destroy(job);
+ }
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_soft_job.h b/drivers/gpu/arm/utgard/common/mali_soft_job.h
new file mode 100644
index 000000000000..018ef4c527d9
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_soft_job.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SOFT_JOB_H__
+#define __MALI_SOFT_JOB_H__
+
+#include "mali_osk.h"
+
+#include "mali_timeline.h"
+
+struct mali_timeline_fence;
+struct mali_session_data;
+struct mali_soft_job;
+struct mali_soft_job_system;
+
+/**
+ * Soft job types.
+ *
+ * Soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED will only complete after activation if either
+ * they are signaled by user-space (@ref mali_soft_job_system_signaled_job) or if they are timed out
+ * by the Timeline system.
+ * Soft jobs of type MALI_SOFT_JOB_TYPE_SELF_SIGNALED will release job resource automatically
+ * in kernel when the job is activated.
+ */
+typedef enum mali_soft_job_type {
+ MALI_SOFT_JOB_TYPE_SELF_SIGNALED,
+ MALI_SOFT_JOB_TYPE_USER_SIGNALED,
+} mali_soft_job_type;
+
+/**
+ * Soft job state.
+ *
+ * mali_soft_job_system_start_job a job will first be allocated.The job's state set to MALI_SOFT_JOB_STATE_ALLOCATED.
+ * Once the job is added to the timeline system, the state changes to MALI_SOFT_JOB_STATE_STARTED.
+ *
+ * For soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED the state is changed to
+ * MALI_SOFT_JOB_STATE_SIGNALED when @ref mali_soft_job_system_signal_job is called and the soft
+ * job's state is MALI_SOFT_JOB_STATE_STARTED or MALI_SOFT_JOB_STATE_TIMED_OUT.
+ *
+ * If a soft job of type MALI_SOFT_JOB_TYPE_USER_SIGNALED is timed out before being signaled, the
+ * state is changed to MALI_SOFT_JOB_STATE_TIMED_OUT. This can only happen to soft jobs in state
+ * MALI_SOFT_JOB_STATE_STARTED.
+ *
+ */
+typedef enum mali_soft_job_state {
+ MALI_SOFT_JOB_STATE_ALLOCATED,
+ MALI_SOFT_JOB_STATE_STARTED,
+ MALI_SOFT_JOB_STATE_SIGNALED,
+ MALI_SOFT_JOB_STATE_TIMED_OUT,
+} mali_soft_job_state;
+
+#define MALI_SOFT_JOB_INVALID_ID ((u32) -1)
+
+/**
+ * Soft job struct.
+ *
+ * Soft job can be used to represent any kind of CPU work done in kernel-space.
+ */
+typedef struct mali_soft_job {
+ mali_soft_job_type type; /**< Soft job type. Must be one of MALI_SOFT_JOB_TYPE_*. */
+ u64 user_job; /**< Identifier for soft job in user space. */
+ _mali_osk_atomic_t refcount; /**< Soft jobs are reference counted to prevent premature deletion. */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for soft job. */
+ mali_bool activated; /**< MALI_TRUE if the job has been activated, MALI_FALSE if not. */
+ _mali_osk_notification_t *activated_notification; /**< Pre-allocated notification object for ACTIVATED_NOTIFICATION. */
+
+ /* Protected by soft job system lock. */
+ u32 id; /**< Used by user-space to find corresponding soft job in kernel-space. */
+ mali_soft_job_state state; /**< State of soft job, must be one of MALI_SOFT_JOB_STATE_*. */
+ struct mali_soft_job_system *system; /**< The soft job system this job is in. */
+ _mali_osk_list_t system_list; /**< List element used by soft job system. */
+} mali_soft_job;
+
+/**
+ * Per-session soft job system.
+ *
+ * The soft job system is used to manage all soft jobs that belongs to a session.
+ */
+typedef struct mali_soft_job_system {
+ struct mali_session_data *session; /**< The session this soft job system belongs to. */
+ _MALI_OSK_LIST_HEAD(jobs_used); /**< List of all allocated soft jobs. */
+
+ _mali_osk_spinlock_irq_t *lock; /**< Lock used to protect soft job system and its soft jobs. */
+ u32 lock_owner; /**< Contains tid of thread that locked the system or 0, if not locked. */
+ u32 last_job_id; /**< Recored the last job id protected by lock. */
+} mali_soft_job_system;
+
+/**
+ * Create a soft job system.
+ *
+ * @param session The session this soft job system will belong to.
+ * @return The new soft job system, or NULL if unsuccessful.
+ */
+struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session);
+
+/**
+ * Destroy a soft job system.
+ *
+ * @note The soft job must not have any started or activated jobs. Call @ref
+ * mali_soft_job_system_abort first.
+ *
+ * @param system The soft job system we are destroying.
+ */
+void mali_soft_job_system_destroy(struct mali_soft_job_system *system);
+
+/**
+ * Create a soft job.
+ *
+ * @param system Soft job system to create soft job from.
+ * @param type Type of the soft job.
+ * @param user_job Identifier for soft job in user space.
+ * @return New soft job if successful, NULL if not.
+ */
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job);
+
+/**
+ * Destroy soft job.
+ *
+ * @param job Soft job to destroy.
+ */
+void mali_soft_job_destroy(struct mali_soft_job *job);
+
+/**
+ * Start a soft job.
+ *
+ * The soft job will be added to the Timeline system which will then activate it after all
+ * dependencies have been resolved.
+ *
+ * Create soft jobs with @ref mali_soft_job_create before starting them.
+ *
+ * @param job Soft job to start.
+ * @param fence Fence representing dependencies for this soft job.
+ * @return Point on soft job timeline.
+ */
+mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence);
+
+/**
+ * Use by user-space to signal that a soft job has completed.
+ *
+ * @note Only valid for soft jobs with type MALI_SOFT_JOB_TYPE_USER_SIGNALED.
+ *
+ * @note The soft job must be in state MALI_SOFT_JOB_STATE_STARTED for the signal to be successful.
+ *
+ * @note If the soft job was signaled successfully, or it received a time out, the soft job will be
+ * destroyed after this call and should no longer be used.
+ *
+ * @note This function will block until the soft job has been activated.
+ *
+ * @param system The soft job system the job was started in.
+ * @param job_id ID of soft job we are signaling.
+ *
+ * @return _MALI_OSK_ERR_ITEM_NOT_FOUND if the soft job ID was invalid, _MALI_OSK_ERR_TIMEOUT if the
+ * soft job was timed out or _MALI_OSK_ERR_OK if we successfully signaled the soft job.
+ */
+_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id);
+
+/**
+ * Used by the Timeline system to activate a soft job.
+ *
+ * @param job The soft job that is being activated.
+ * @return A scheduling bitmask.
+ */
+mali_scheduler_mask mali_soft_job_system_activate_job(struct mali_soft_job *job);
+
+/**
+ * Used by the Timeline system to timeout a soft job.
+ *
+ * A soft job is timed out if it completes or is signaled later than MALI_TIMELINE_TIMEOUT_HZ after
+ * activation.
+ *
+ * @param job The soft job that is being timed out.
+ * @return A scheduling bitmask.
+ */
+mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job);
+
+/**
+ * Used to cleanup activated soft jobs in the soft job system on session abort.
+ *
+ * @param system The soft job system that is being aborted.
+ */
+void mali_soft_job_system_abort(struct mali_soft_job_system *system);
+
+#endif /* __MALI_SOFT_JOB_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.c b/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.c
new file mode 100644
index 000000000000..f829e99f02ab
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_spinlock_reentrant.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order)
+{
+ struct mali_spinlock_reentrant *spinlock;
+
+ spinlock = _mali_osk_calloc(1, sizeof(struct mali_spinlock_reentrant));
+ if (NULL == spinlock) {
+ return NULL;
+ }
+
+ spinlock->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, lock_order);
+ if (NULL == spinlock->lock) {
+ mali_spinlock_reentrant_term(spinlock);
+ return NULL;
+ }
+
+ return spinlock;
+}
+
+void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock);
+ MALI_DEBUG_ASSERT(0 == spinlock->counter && 0 == spinlock->owner);
+
+ if (NULL != spinlock->lock) {
+ _mali_osk_spinlock_irq_term(spinlock->lock);
+ }
+
+ _mali_osk_free(spinlock);
+}
+
+void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock);
+ MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+ MALI_DEBUG_ASSERT(0 != tid);
+
+ MALI_DEBUG_PRINT(5, ("%s ^\n", __FUNCTION__));
+
+ if (tid != spinlock->owner) {
+ _mali_osk_spinlock_irq_lock(spinlock->lock);
+ MALI_DEBUG_ASSERT(0 == spinlock->owner && 0 == spinlock->counter);
+ spinlock->owner = tid;
+ }
+
+ MALI_DEBUG_PRINT(5, ("%s v\n", __FUNCTION__));
+
+ ++spinlock->counter;
+}
+
+void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock);
+ MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+ MALI_DEBUG_ASSERT(0 != tid && tid == spinlock->owner);
+
+ --spinlock->counter;
+ if (0 == spinlock->counter) {
+ spinlock->owner = 0;
+ MALI_DEBUG_PRINT(5, ("%s release last\n", __FUNCTION__));
+ _mali_osk_spinlock_irq_unlock(spinlock->lock);
+ }
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.h b/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.h
new file mode 100644
index 000000000000..4d788ec1bbe4
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SPINLOCK_REENTRANT_H__
+#define __MALI_SPINLOCK_REENTRANT_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * Reentrant spinlock.
+ */
+struct mali_spinlock_reentrant {
+ _mali_osk_spinlock_irq_t *lock;
+ u32 owner;
+ u32 counter;
+};
+
+/**
+ * Create a new reentrant spinlock.
+ *
+ * @param lock_order Lock order.
+ * @return New reentrant spinlock.
+ */
+struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order);
+
+/**
+ * Terminate reentrant spinlock and free any associated resources.
+ *
+ * @param spinlock Reentrant spinlock to terminate.
+ */
+void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock);
+
+/**
+ * Wait for reentrant spinlock to be signaled.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ */
+void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid);
+
+/**
+ * Signal reentrant spinlock.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ */
+void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid);
+
+/**
+ * Check if thread is holding reentrant spinlock.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ * @return MALI_TRUE if thread is holding spinlock, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_spinlock_reentrant_is_held(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+ return (tid == spinlock->owner && 0 < spinlock->counter);
+}
+
+#endif /* __MALI_SPINLOCK_REENTRANT_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_timeline.c b/drivers/gpu/arm/utgard/common/mali_timeline.c
new file mode 100644
index 000000000000..63282c3f3b67
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_timeline.c
@@ -0,0 +1,1816 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/file.h>
+#include "mali_timeline.h"
+#include "mali_kernel_common.h"
+#include "mali_scheduler.h"
+#include "mali_soft_job.h"
+#include "mali_timeline_fence_wait.h"
+#include "mali_timeline_sync_fence.h"
+#include "mali_executor.h"
+#include "mali_pp_job.h"
+
+#define MALI_TIMELINE_SYSTEM_LOCKED(system) (mali_spinlock_reentrant_is_held((system)->spinlock, _mali_osk_get_tid()))
+
+/*
+ * Following three elements are used to record how many
+ * gp, physical pp or virtual pp jobs are delayed in the whole
+ * timeline system, we can use these three value to decide
+ * if need to deactivate idle group.
+ */
+_mali_osk_atomic_t gp_tracker_count;
+_mali_osk_atomic_t phy_pp_tracker_count;
+_mali_osk_atomic_t virt_pp_tracker_count;
+
+static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
+ struct mali_timeline_waiter *waiter);
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+
+struct mali_deferred_fence_put_entry {
+ struct hlist_node list;
+ struct sync_fence *fence;
+};
+
+static HLIST_HEAD(mali_timeline_sync_fence_to_free_list);
+static DEFINE_SPINLOCK(mali_timeline_sync_fence_to_free_lock);
+
+static void put_sync_fences(struct work_struct *ignore)
+{
+ struct hlist_head list;
+ struct hlist_node *tmp, *pos;
+ unsigned long flags;
+ struct mali_deferred_fence_put_entry *o;
+
+ spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags);
+ hlist_move_list(&mali_timeline_sync_fence_to_free_list, &list);
+ spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
+
+ hlist_for_each_entry_safe(o, pos, tmp, &list, list) {
+ sync_fence_put(o->fence);
+ kfree(o);
+ }
+}
+
+static DECLARE_DELAYED_WORK(delayed_sync_fence_put, put_sync_fences);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
+
+/* Callback that is called when a sync fence a tracker is waiting on is signaled. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static void mali_timeline_sync_fence_callback(struct sync_fence *sync_fence, struct sync_fence_waiter *sync_fence_waiter)
+#else
+static void mali_timeline_sync_fence_callback(struct mali_internal_sync_fence *sync_fence, struct mali_internal_sync_fence_waiter *sync_fence_waiter)
+#endif
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_waiter *waiter;
+ struct mali_timeline_tracker *tracker;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+ mali_bool is_aborting = MALI_FALSE;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ int fence_status = sync_fence->status;
+#else
+ int fence_status = atomic_read(&sync_fence->status);
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence);
+ MALI_DEBUG_ASSERT_POINTER(sync_fence_waiter);
+
+ tracker = _MALI_OSK_CONTAINER_OF(sync_fence_waiter, struct mali_timeline_tracker, sync_fence_waiter);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ system = tracker->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ is_aborting = system->session->is_aborting;
+ if (!is_aborting && (0 > fence_status)) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, fence_status));
+ tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
+ }
+
+ waiter = tracker->waiter_sync;
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+
+ tracker->sync_fence = NULL;
+ tracker->fence.sync_fd = -1;
+
+ schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
+
+ /* If aborting, wake up sleepers that are waiting for sync fence callbacks to complete. */
+ if (is_aborting) {
+ _mali_osk_wait_queue_wake_up(system->wait_queue);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ /*
+ * Older versions of Linux, before 3.5, doesn't support fput() in interrupt
+ * context. For those older kernels, allocate a list object and put the
+ * fence object on that and defer the call to sync_fence_put() to a workqueue.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
+ {
+ struct mali_deferred_fence_put_entry *obj;
+
+ obj = kzalloc(sizeof(struct mali_deferred_fence_put_entry), GFP_ATOMIC);
+ if (obj) {
+ unsigned long flags;
+ mali_bool schedule = MALI_FALSE;
+
+ obj->fence = sync_fence;
+
+ spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags);
+ if (hlist_empty(&mali_timeline_sync_fence_to_free_list))
+ schedule = MALI_TRUE;
+ hlist_add_head(&obj->list, &mali_timeline_sync_fence_to_free_list);
+ spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
+
+ if (schedule)
+ schedule_delayed_work(&delayed_sync_fence_put, 0);
+ }
+ }
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ sync_fence_put(sync_fence);
+#else
+ fput(sync_fence->file);
+#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
+
+ if (!is_aborting) {
+ mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE);
+ }
+}
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+static mali_scheduler_mask mali_timeline_tracker_time_out(struct mali_timeline_tracker *tracker)
+{
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_SOFT == tracker->type);
+
+ return mali_soft_job_system_timeout_job((struct mali_soft_job *) tracker->job);
+}
+
+static void mali_timeline_timer_callback(void *data)
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_tracker *tracker;
+ struct mali_timeline *timeline;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+
+ timeline = (struct mali_timeline *) data;
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ if (!system->timer_enabled) {
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ return;
+ }
+
+ tracker = timeline->tracker_tail;
+ timeline->timer_active = MALI_FALSE;
+
+ if (NULL != tracker && MALI_TRUE == tracker->timer_active) {
+ /* This is likely the delayed work that has been schedule out before cancelled. */
+ if (MALI_TIMELINE_TIMEOUT_HZ > (_mali_osk_time_tickcount() - tracker->os_tick_activate)) {
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ return;
+ }
+
+ schedule_mask = mali_timeline_tracker_time_out(tracker);
+ tracker->timer_active = MALI_FALSE;
+ } else {
+ MALI_PRINT_ERROR(("Mali Timeline: Soft job timer callback without a waiting tracker.\n"));
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+void mali_timeline_system_stop_timer(struct mali_timeline_system *system)
+{
+ u32 i;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ system->timer_enabled = MALI_FALSE;
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (NULL != timeline->delayed_work) {
+ _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
+ timeline->timer_active = MALI_FALSE;
+ }
+ }
+}
+
+static void mali_timeline_destroy(struct mali_timeline *timeline)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ if (NULL != timeline) {
+ /* Assert that the timeline object has been properly cleaned up before destroying it. */
+ MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail);
+ MALI_DEBUG_ASSERT(NULL != timeline->system);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_MAX > timeline->id);
+
+ if (NULL != timeline->delayed_work) {
+ _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
+ _mali_osk_wq_delayed_delete_work_nonflush(timeline->delayed_work);
+ }
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ if (NULL != timeline->sync_tl) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ sync_timeline_destroy(timeline->sync_tl);
+#else
+ mali_internal_sync_timeline_destroy(timeline->sync_tl);
+#endif
+ }
+#else
+ _mali_osk_free(timeline);
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+ }
+}
+
+static struct mali_timeline *mali_timeline_create(struct mali_timeline_system *system, enum mali_timeline_id id)
+{
+ struct mali_timeline *timeline;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(id < MALI_TIMELINE_MAX);
+
+ timeline = (struct mali_timeline *) _mali_osk_calloc(1, sizeof(struct mali_timeline));
+ if (NULL == timeline) {
+ return NULL;
+ }
+
+ /* Initially the timeline is empty. */
+#if defined(MALI_TIMELINE_DEBUG_START_POINT)
+ /* Start the timeline a bit before wrapping when debugging. */
+ timeline->point_next = UINT_MAX - MALI_TIMELINE_MAX_POINT_SPAN - 128;
+#else
+ timeline->point_next = 1;
+#endif
+ timeline->point_oldest = timeline->point_next;
+
+ /* The tracker and waiter lists will initially be empty. */
+
+ timeline->system = system;
+ timeline->id = id;
+
+ timeline->delayed_work = _mali_osk_wq_delayed_create_work(mali_timeline_timer_callback, timeline);
+ if (NULL == timeline->delayed_work) {
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+
+ timeline->timer_active = MALI_FALSE;
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ {
+ char timeline_name[32];
+
+ switch (id) {
+ case MALI_TIMELINE_GP:
+ _mali_osk_snprintf(timeline_name, 32, "mali-%u-gp", _mali_osk_get_pid());
+ break;
+ case MALI_TIMELINE_PP:
+ _mali_osk_snprintf(timeline_name, 32, "mali-%u-pp", _mali_osk_get_pid());
+ break;
+ case MALI_TIMELINE_SOFT:
+ _mali_osk_snprintf(timeline_name, 32, "mali-%u-soft", _mali_osk_get_pid());
+ break;
+ default:
+ MALI_PRINT_ERROR(("Mali Timeline: Invalid timeline id %d\n", id));
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+
+ timeline->destroyed = MALI_FALSE;
+
+ timeline->sync_tl = mali_sync_timeline_create(timeline, timeline_name);
+ if (NULL == timeline->sync_tl) {
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+
+ timeline->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM);
+ if (NULL == timeline->spinlock) {
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+ }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+ return timeline;
+}
+
+static void mali_timeline_insert_tracker(struct mali_timeline *timeline, struct mali_timeline_tracker *tracker)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ if (mali_timeline_is_full(timeline)) {
+ /* Don't add tracker if timeline is full. */
+ tracker->point = MALI_TIMELINE_NO_POINT;
+ return;
+ }
+
+ tracker->timeline = timeline;
+ tracker->point = timeline->point_next;
+
+ /* Find next available point. */
+ timeline->point_next++;
+ if (MALI_TIMELINE_NO_POINT == timeline->point_next) {
+ timeline->point_next++;
+ }
+
+ MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+
+ if (MALI_TIMELINE_TRACKER_GP == tracker->type) {
+ _mali_osk_atomic_inc(&gp_tracker_count);
+ } else if (MALI_TIMELINE_TRACKER_PP == tracker->type) {
+ if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) {
+ _mali_osk_atomic_inc(&virt_pp_tracker_count);
+ } else {
+ _mali_osk_atomic_inc(&phy_pp_tracker_count);
+ }
+ }
+
+ /* Add tracker as new head on timeline's tracker list. */
+ if (NULL == timeline->tracker_head) {
+ /* Tracker list is empty. */
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+
+ timeline->tracker_tail = tracker;
+
+ MALI_DEBUG_ASSERT(NULL == tracker->timeline_next);
+ MALI_DEBUG_ASSERT(NULL == tracker->timeline_prev);
+ } else {
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next);
+
+ tracker->timeline_prev = timeline->tracker_head;
+ timeline->tracker_head->timeline_next = tracker;
+
+ MALI_DEBUG_ASSERT(NULL == tracker->timeline_next);
+ }
+ timeline->tracker_head = tracker;
+
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail->timeline_prev);
+}
+
+/* Inserting the waiter object into the given timeline */
+static void mali_timeline_insert_waiter(struct mali_timeline *timeline, struct mali_timeline_waiter *waiter_new)
+{
+ struct mali_timeline_waiter *waiter_prev;
+ struct mali_timeline_waiter *waiter_next;
+
+ /* Waiter time must be between timeline head and tail, and there must
+ * be less than MALI_TIMELINE_MAX_POINT_SPAN elements between */
+ MALI_DEBUG_ASSERT((waiter_new->point - timeline->point_oldest) < MALI_TIMELINE_MAX_POINT_SPAN);
+ MALI_DEBUG_ASSERT((-waiter_new->point + timeline->point_next) < MALI_TIMELINE_MAX_POINT_SPAN);
+
+ /* Finding out where to put this waiter, in the linked waiter list of the given timeline **/
+ waiter_prev = timeline->waiter_head; /* Insert new after waiter_prev */
+ waiter_next = NULL; /* Insert new before waiter_next */
+
+ /* Iterating backwards from head (newest) to tail (oldest) until we
+ * find the correct spot to insert the new waiter */
+ while (waiter_prev && mali_timeline_point_after(waiter_prev->point, waiter_new->point)) {
+ waiter_next = waiter_prev;
+ waiter_prev = waiter_prev->timeline_prev;
+ }
+
+ if (NULL == waiter_prev && NULL == waiter_next) {
+ /* list is empty */
+ timeline->waiter_head = waiter_new;
+ timeline->waiter_tail = waiter_new;
+ } else if (NULL == waiter_next) {
+ /* insert at head */
+ waiter_new->timeline_prev = timeline->waiter_head;
+ timeline->waiter_head->timeline_next = waiter_new;
+ timeline->waiter_head = waiter_new;
+ } else if (NULL == waiter_prev) {
+ /* insert at tail */
+ waiter_new->timeline_next = timeline->waiter_tail;
+ timeline->waiter_tail->timeline_prev = waiter_new;
+ timeline->waiter_tail = waiter_new;
+ } else {
+ /* insert between */
+ waiter_new->timeline_next = waiter_next;
+ waiter_new->timeline_prev = waiter_prev;
+ waiter_next->timeline_prev = waiter_new;
+ waiter_prev->timeline_next = waiter_new;
+ }
+}
+
+static void mali_timeline_update_delayed_work(struct mali_timeline *timeline)
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_tracker *oldest_tracker;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SOFT == timeline->id);
+
+ system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ /* Timer is disabled, early out. */
+ if (!system->timer_enabled) return;
+
+ oldest_tracker = timeline->tracker_tail;
+ if (NULL != oldest_tracker && 0 == oldest_tracker->trigger_ref_count) {
+ if (MALI_FALSE == oldest_tracker->timer_active) {
+ if (MALI_TRUE == timeline->timer_active) {
+ _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work);
+ }
+ _mali_osk_wq_delayed_schedule_work(timeline->delayed_work, MALI_TIMELINE_TIMEOUT_HZ);
+ oldest_tracker->timer_active = MALI_TRUE;
+ timeline->timer_active = MALI_TRUE;
+ }
+ } else if (MALI_TRUE == timeline->timer_active) {
+ _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work);
+ timeline->timer_active = MALI_FALSE;
+ }
+}
+
+static mali_scheduler_mask mali_timeline_update_oldest_point(struct mali_timeline *timeline)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ MALI_DEBUG_CODE({
+ struct mali_timeline_system *system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+ });
+
+ if (NULL != timeline->tracker_tail) {
+ /* Set oldest point to oldest tracker's point */
+ timeline->point_oldest = timeline->tracker_tail->point;
+ } else {
+ /* No trackers, mark point list as empty */
+ timeline->point_oldest = timeline->point_next;
+ }
+
+ /* Release all waiters no longer on the timeline's point list.
+ * Releasing a waiter can trigger this function to be called again, so
+ * we do not store any pointers on stack. */
+ while (NULL != timeline->waiter_tail) {
+ u32 waiter_time_relative;
+ u32 time_head_relative;
+ struct mali_timeline_waiter *waiter = timeline->waiter_tail;
+
+ time_head_relative = timeline->point_next - timeline->point_oldest;
+ waiter_time_relative = waiter->point - timeline->point_oldest;
+
+ if (waiter_time_relative < time_head_relative) {
+ /* This and all following waiters are on the point list, so we are done. */
+ break;
+ }
+
+ /* Remove waiter from timeline's waiter list. */
+ if (NULL != waiter->timeline_next) {
+ waiter->timeline_next->timeline_prev = NULL;
+ } else {
+ /* This was the last waiter */
+ timeline->waiter_head = NULL;
+ }
+ timeline->waiter_tail = waiter->timeline_next;
+
+ /* Release waiter. This could activate a tracker, if this was
+ * the last waiter for the tracker. */
+ schedule_mask |= mali_timeline_system_release_waiter(timeline->system, waiter);
+ }
+
+ return schedule_mask;
+}
+
+void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
+ mali_timeline_tracker_type type,
+ struct mali_timeline_fence *fence,
+ void *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > type);
+
+ /* Zero out all tracker members. */
+ _mali_osk_memset(tracker, 0, sizeof(*tracker));
+
+ tracker->type = type;
+ tracker->job = job;
+ tracker->trigger_ref_count = 1; /* Prevents any callback from trigging while adding it */
+ tracker->os_tick_create = _mali_osk_time_tickcount();
+ MALI_DEBUG_CODE(tracker->magic = MALI_TIMELINE_TRACKER_MAGIC);
+
+ tracker->activation_error = MALI_TIMELINE_ACTIVATION_ERROR_NONE;
+
+ /* Copy fence. */
+ if (NULL != fence) {
+ _mali_osk_memcpy(&tracker->fence, fence, sizeof(struct mali_timeline_fence));
+ }
+}
+
+mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker)
+{
+ struct mali_timeline *timeline;
+ struct mali_timeline_system *system;
+ struct mali_timeline_tracker *tracker_next, *tracker_prev;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+
+ /* Upon entry a group lock will be held, but not a scheduler lock. */
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+ /* Tracker should have been triggered */
+ MALI_DEBUG_ASSERT(0 == tracker->trigger_ref_count);
+
+ /* All waiters should have been released at this point */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: releasing tracker for job 0x%08X\n", tracker->job));
+
+ timeline = tracker->timeline;
+ if (NULL == timeline) {
+ /* Tracker was not on a timeline, there is nothing to release. */
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ /* Tracker should still be on timeline */
+ MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+ MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, tracker->point));
+
+ /* Tracker is no longer valid. */
+ MALI_DEBUG_CODE(tracker->magic = 0);
+
+ tracker_next = tracker->timeline_next;
+ tracker_prev = tracker->timeline_prev;
+ tracker->timeline_next = NULL;
+ tracker->timeline_prev = NULL;
+
+ /* Removing tracker from timeline's tracker list */
+ if (NULL == tracker_next) {
+ /* This tracker was the head */
+ timeline->tracker_head = tracker_prev;
+ } else {
+ tracker_next->timeline_prev = tracker_prev;
+ }
+
+ if (NULL == tracker_prev) {
+ /* This tracker was the tail */
+ timeline->tracker_tail = tracker_next;
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+ /* Update the timeline's oldest time and release any waiters */
+ schedule_mask |= mali_timeline_update_oldest_point(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+ } else {
+ tracker_prev->timeline_next = tracker_next;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ /* Update delayed work only when it is the soft job timeline */
+ if (MALI_TIMELINE_SOFT == tracker->timeline->id) {
+ mali_timeline_update_delayed_work(tracker->timeline);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ return schedule_mask;
+}
+
+void mali_timeline_system_release_waiter_list(struct mali_timeline_system *system,
+ struct mali_timeline_waiter *tail,
+ struct mali_timeline_waiter *head)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(head);
+ MALI_DEBUG_ASSERT_POINTER(tail);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ head->tracker_next = system->waiter_empty_list;
+ system->waiter_empty_list = tail;
+}
+
+static mali_scheduler_mask mali_timeline_tracker_activate(struct mali_timeline_tracker *tracker)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ struct mali_timeline_system *system;
+ struct mali_timeline *timeline;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+ system = tracker->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ tracker->os_tick_activate = _mali_osk_time_tickcount();
+
+ if (NULL != tracker->waiter_head) {
+ mali_timeline_system_release_waiter_list(system, tracker->waiter_tail, tracker->waiter_head);
+ tracker->waiter_head = NULL;
+ tracker->waiter_tail = NULL;
+ }
+
+ switch (tracker->type) {
+ case MALI_TIMELINE_TRACKER_GP:
+ schedule_mask = mali_scheduler_activate_gp_job((struct mali_gp_job *) tracker->job);
+
+ _mali_osk_atomic_dec(&gp_tracker_count);
+ break;
+ case MALI_TIMELINE_TRACKER_PP:
+ if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) {
+ _mali_osk_atomic_dec(&virt_pp_tracker_count);
+ } else {
+ _mali_osk_atomic_dec(&phy_pp_tracker_count);
+ }
+ schedule_mask = mali_scheduler_activate_pp_job((struct mali_pp_job *) tracker->job);
+ break;
+ case MALI_TIMELINE_TRACKER_SOFT:
+ timeline = tracker->timeline;
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ schedule_mask |= mali_soft_job_system_activate_job((struct mali_soft_job *) tracker->job);
+
+ /* Start a soft timer to make sure the soft job be released in a limited time */
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ mali_timeline_update_delayed_work(timeline);
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ break;
+ case MALI_TIMELINE_TRACKER_WAIT:
+ mali_timeline_fence_wait_activate((struct mali_timeline_fence_wait_tracker *) tracker->job);
+ break;
+ case MALI_TIMELINE_TRACKER_SYNC:
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ mali_timeline_sync_fence_activate((struct mali_timeline_sync_fence_tracker *) tracker->job);
+#else
+ MALI_PRINT_ERROR(("Mali Timeline: sync tracker not supported\n", tracker->type));
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+ break;
+ default:
+ MALI_PRINT_ERROR(("Mali Timeline - Illegal tracker type: %d\n", tracker->type));
+ break;
+ }
+
+ return schedule_mask;
+}
+
+void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker)
+{
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+ tracker->trigger_ref_count++;
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+}
+
+mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error)
+{
+ u32 tid = _mali_osk_get_tid();
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+ tracker->trigger_ref_count--;
+
+ tracker->activation_error |= activation_error;
+
+ if (0 == tracker->trigger_ref_count) {
+ schedule_mask |= mali_timeline_tracker_activate(tracker);
+ tracker = NULL;
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ return schedule_mask;
+}
+
+void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+ MALI_DEBUG_ASSERT_POINTER(uk_fence);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ fence->points[i] = uk_fence->points[i];
+ }
+
+ fence->sync_fd = uk_fence->sync_fd;
+}
+
+struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session)
+{
+ u32 i;
+ struct mali_timeline_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: creating timeline system\n"));
+
+ system = (struct mali_timeline_system *) _mali_osk_calloc(1, sizeof(struct mali_timeline_system));
+ if (NULL == system) {
+ return NULL;
+ }
+
+ system->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM);
+ if (NULL == system->spinlock) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ system->timelines[i] = mali_timeline_create(system, (enum mali_timeline_id)i);
+ if (NULL == system->timelines[i]) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+ }
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ system->signaled_sync_tl = mali_sync_timeline_create(NULL, "mali-always-signaled");
+ if (NULL == system->signaled_sync_tl) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+ system->waiter_empty_list = NULL;
+ system->session = session;
+ system->timer_enabled = MALI_TRUE;
+
+ system->wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == system->wait_queue) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+
+ return system;
+}
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE) ||defined(CONFIG_SYNC) ||defined(CONFIG_SYNC_FILE)
+/**
+ * Check if there are any trackers left on timeline.
+ *
+ * Used as a wait queue conditional.
+ *
+ * @param data Timeline.
+ * @return MALI_TRUE if there are no trackers on timeline, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_has_no_trackers(void *data)
+{
+ struct mali_timeline *timeline = (struct mali_timeline *) data;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ return mali_timeline_is_empty(timeline);
+}
+#if defined(CONFIG_SYNC) ||defined(CONFIG_SYNC_FILE)
+/**
+ * Cancel sync fence waiters waited upon by trackers on all timelines.
+ *
+ * Will return after all timelines have no trackers left.
+ *
+ * @param system Timeline system.
+ */
+static void mali_timeline_cancel_sync_fence_waiters(struct mali_timeline_system *system)
+{
+ u32 i;
+ u32 tid = _mali_osk_get_tid();
+ struct mali_timeline_tracker *tracker, *tracker_next;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(tracker_list);
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ /* Cancel sync fence waiters. */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ tracker_next = timeline->tracker_tail;
+ while (NULL != tracker_next) {
+ tracker = tracker_next;
+ tracker_next = tracker->timeline_next;
+
+ if (NULL == tracker->sync_fence) continue;
+
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: Cancelling sync fence wait for tracker 0x%08X.\n", tracker));
+
+ /* Cancel sync fence waiter. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ if (0 == sync_fence_cancel_async(tracker->sync_fence, &tracker->sync_fence_waiter)) {
+#else
+ if (0 == mali_internal_sync_fence_cancel_async(tracker->sync_fence, &tracker->sync_fence_waiter)) {
+#endif
+ /* Callback was not called, move tracker to local list. */
+ _mali_osk_list_add(&tracker->sync_fence_cancel_list, &tracker_list);
+ }
+ }
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ /* Manually call sync fence callback in order to release waiter and trigger activation of tracker. */
+ _MALI_OSK_LIST_FOREACHENTRY(tracker, tracker_next, &tracker_list, struct mali_timeline_tracker, sync_fence_cancel_list) {
+ mali_timeline_sync_fence_callback(tracker->sync_fence, &tracker->sync_fence_waiter);
+ }
+
+ /* Sleep until all sync fence callbacks are done and all timelines are empty. */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_has_no_trackers, (void *) timeline);
+ }
+}
+
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+static void mali_timeline_cancel_dma_fence_waiters(struct mali_timeline_system *system)
+{
+ u32 i, j;
+ u32 tid = _mali_osk_get_tid();
+ struct mali_pp_job *pp_job = NULL;
+ struct mali_pp_job *next_pp_job = NULL;
+ struct mali_timeline *timeline = NULL;
+ struct mali_timeline_tracker *tracker, *tracker_next;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_job_list);
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ /* Cancel dma fence waiters. */
+ timeline = system->timelines[MALI_TIMELINE_PP];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ tracker_next = timeline->tracker_tail;
+ while (NULL != tracker_next) {
+ mali_bool fence_is_signaled = MALI_TRUE;
+ tracker = tracker_next;
+ tracker_next = tracker->timeline_next;
+
+ if (NULL == tracker->waiter_dma_fence) continue;
+ pp_job = (struct mali_pp_job *)tracker->job;
+ MALI_DEBUG_ASSERT_POINTER(pp_job);
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: Cancelling dma fence waiter for tracker 0x%08X.\n", tracker));
+
+ for (j = 0; j < pp_job->dma_fence_context.num_dma_fence_waiter; j++) {
+ if (pp_job->dma_fence_context.mali_dma_fence_waiters[j]) {
+ /* Cancel a previously callback from the fence.
+ * This function returns true if the callback is successfully removed,
+ * or false if the fence has already been signaled.
+ */
+ bool ret = fence_remove_callback(pp_job->dma_fence_context.mali_dma_fence_waiters[j]->fence,
+ &pp_job->dma_fence_context.mali_dma_fence_waiters[j]->base);
+ if (ret) {
+ fence_is_signaled = MALI_FALSE;
+ }
+ }
+ }
+
+ /* Callbacks were not called, move pp job to local list. */
+ if (MALI_FALSE == fence_is_signaled)
+ _mali_osk_list_add(&pp_job->list, &pp_job_list);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ /* Manually call dma fence callback in order to release waiter and trigger activation of tracker. */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, next_pp_job, &pp_job_list, struct mali_pp_job, list) {
+ mali_timeline_dma_fence_callback((void *)pp_job);
+ }
+
+ /* Sleep until all dma fence callbacks are done and all timelines are empty. */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_has_no_trackers, (void *) timeline);
+ }
+}
+#endif
+#endif
+void mali_timeline_system_abort(struct mali_timeline_system *system)
+{
+ MALI_DEBUG_CODE(u32 tid = _mali_osk_get_tid(););
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: Aborting timeline system for session 0x%08X.\n", system->session));
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ mali_timeline_cancel_sync_fence_waiters(system);
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ mali_timeline_cancel_dma_fence_waiters(system);
+#endif
+
+ /* Should not be any waiters or trackers left at this point. */
+ MALI_DEBUG_CODE({
+ u32 i;
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i)
+ {
+ struct mali_timeline *timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail);
+ }
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ });
+}
+
+void mali_timeline_system_destroy(struct mali_timeline_system *system)
+{
+ u32 i;
+ struct mali_timeline_waiter *waiter, *next;
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ u32 tid = _mali_osk_get_tid();
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: destroying timeline system\n"));
+
+ if (NULL != system) {
+
+ /* There should be no waiters left on this queue. */
+ if (NULL != system->wait_queue) {
+ _mali_osk_wait_queue_term(system->wait_queue);
+ system->wait_queue = NULL;
+ }
+
+ /* Free all waiters in empty list */
+ waiter = system->waiter_empty_list;
+ while (NULL != waiter) {
+ next = waiter->tracker_next;
+ _mali_osk_free(waiter);
+ waiter = next;
+ }
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ if (NULL != system->signaled_sync_tl) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ sync_timeline_destroy(system->signaled_sync_tl);
+#else
+ mali_internal_sync_timeline_destroy(system->signaled_sync_tl);
+#endif
+ }
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ if ((NULL != system->timelines[i]) && (NULL != system->timelines[i]->spinlock)) {
+ mali_spinlock_reentrant_wait(system->timelines[i]->spinlock, tid);
+ system->timelines[i]->destroyed = MALI_TRUE;
+ mali_spinlock_reentrant_signal(system->timelines[i]->spinlock, tid);
+ }
+ }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ if (NULL != system->timelines[i]) {
+ mali_timeline_destroy(system->timelines[i]);
+ }
+ }
+
+ if (NULL != system->spinlock) {
+ mali_spinlock_reentrant_term(system->spinlock);
+ }
+
+ _mali_osk_free(system);
+ }
+}
+
+/**
+ * Find how many waiters are needed for a given fence.
+ *
+ * @param fence The fence to check.
+ * @return Number of waiters needed for fence.
+ */
+static u32 mali_timeline_fence_num_waiters(struct mali_timeline_fence *fence)
+{
+ u32 i, num_waiters = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ if (MALI_TIMELINE_NO_POINT != fence->points[i]) {
+ ++num_waiters;
+ }
+ }
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ if (-1 != fence->sync_fd) ++num_waiters;
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+ return num_waiters;
+}
+
+static struct mali_timeline_waiter *mali_timeline_system_get_zeroed_waiter(struct mali_timeline_system *system)
+{
+ struct mali_timeline_waiter *waiter;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ waiter = system->waiter_empty_list;
+ if (NULL != waiter) {
+ /* Remove waiter from empty list and zero it */
+ system->waiter_empty_list = waiter->tracker_next;
+ _mali_osk_memset(waiter, 0, sizeof(*waiter));
+ }
+
+ /* Return NULL if list was empty. */
+ return waiter;
+}
+
+static void mali_timeline_system_allocate_waiters(struct mali_timeline_system *system,
+ struct mali_timeline_waiter **tail,
+ struct mali_timeline_waiter **head,
+ int max_num_waiters)
+{
+ u32 i, tid = _mali_osk_get_tid();
+ mali_bool do_alloc;
+ struct mali_timeline_waiter *waiter;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(tail);
+ MALI_DEBUG_ASSERT_POINTER(head);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ *head = *tail = NULL;
+ do_alloc = MALI_FALSE;
+ i = 0;
+ while (i < max_num_waiters) {
+ if (MALI_FALSE == do_alloc) {
+ waiter = mali_timeline_system_get_zeroed_waiter(system);
+ if (NULL == waiter) {
+ do_alloc = MALI_TRUE;
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ continue;
+ }
+ } else {
+ waiter = _mali_osk_calloc(1, sizeof(struct mali_timeline_waiter));
+ if (NULL == waiter) break;
+ }
+ ++i;
+ if (NULL == *tail) {
+ *tail = waiter;
+ *head = waiter;
+ } else {
+ (*head)->tracker_next = waiter;
+ *head = waiter;
+ }
+ }
+ if (MALI_TRUE == do_alloc) {
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ }
+}
+
+/**
+ * Create waiters for the given tracker. The tracker is activated when all waiters are release.
+ *
+ * @note Tracker can potentially be activated before this function returns.
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker we will create waiters for.
+ * @param waiter_tail List of pre-allocated waiters.
+ * @param waiter_head List of pre-allocated waiters.
+ */
+static void mali_timeline_system_create_waiters_and_unlock(struct mali_timeline_system *system,
+ struct mali_timeline_tracker *tracker,
+ struct mali_timeline_waiter *waiter_tail,
+ struct mali_timeline_waiter *waiter_head)
+{
+ int i;
+ u32 tid = _mali_osk_get_tid();
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_fence *sync_fence = NULL;
+#else
+ struct mali_internal_sync_fence *sync_fence = NULL;
+#endif
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ MALI_DEBUG_ASSERT(NULL != tracker->job);
+
+ /* Creating waiter object for all the timelines the fence is put on. Inserting this waiter
+ * into the timelines sorted list of waiters */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ mali_timeline_point point;
+ struct mali_timeline *timeline;
+ struct mali_timeline_waiter *waiter;
+
+ /* Get point on current timeline from tracker's fence. */
+ point = tracker->fence.points[i];
+
+ if (likely(MALI_TIMELINE_NO_POINT == point)) {
+ /* Fence contains no point on this timeline so we don't need a waiter. */
+ continue;
+ }
+
+ timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
+ MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n",
+ point, timeline->point_oldest, timeline->point_next));
+ continue;
+ }
+
+ if (likely(mali_timeline_is_point_released(timeline, point))) {
+ /* Tracker representing the point has been released so we don't need a
+ * waiter. */
+ continue;
+ }
+
+ /* The point is on timeline. */
+ MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, point));
+
+ /* Get a new zeroed waiter object. */
+ if (likely(NULL != waiter_tail)) {
+ waiter = waiter_tail;
+ waiter_tail = waiter_tail->tracker_next;
+ } else {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+ continue;
+ }
+
+ /* Yanking the trigger ref count of the tracker. */
+ tracker->trigger_ref_count++;
+
+ waiter->point = point;
+ waiter->tracker = tracker;
+
+ /* Insert waiter on tracker's singly-linked waiter list. */
+ if (NULL == tracker->waiter_head) {
+ /* list is empty */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ tracker->waiter_tail = waiter;
+ } else {
+ tracker->waiter_head->tracker_next = waiter;
+ }
+ tracker->waiter_head = waiter;
+
+ /* Add waiter to timeline. */
+ mali_timeline_insert_waiter(timeline, waiter);
+ }
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ if (-1 != tracker->fence.sync_fd) {
+ int ret;
+ struct mali_timeline_waiter *waiter;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ sync_fence = sync_fence_fdget(tracker->fence.sync_fd);
+#else
+ sync_fence = mali_internal_sync_fence_fdget(tracker->fence.sync_fd);
+#endif
+ if (unlikely(NULL == sync_fence)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", tracker->fence.sync_fd));
+ goto exit;
+ }
+
+ /* Check if we have a zeroed waiter object available. */
+ if (unlikely(NULL == waiter_tail)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+ goto exit;
+ }
+
+ /* Start asynchronous wait that will release waiter when the fence is signaled. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ sync_fence_waiter_init(&tracker->sync_fence_waiter, mali_timeline_sync_fence_callback);
+ ret = sync_fence_wait_async(sync_fence, &tracker->sync_fence_waiter);
+#else
+ mali_internal_sync_fence_waiter_init(&tracker->sync_fence_waiter, mali_timeline_sync_fence_callback);
+ ret = mali_internal_sync_fence_wait_async(sync_fence, &tracker->sync_fence_waiter);
+#endif
+ if (1 == ret) {
+ /* Fence already signaled, no waiter needed. */
+ tracker->fence.sync_fd = -1;
+ goto exit;
+ } else if (0 != ret) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, ret));
+ tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
+ goto exit;
+ }
+
+ /* Grab new zeroed waiter object. */
+ waiter = waiter_tail;
+ waiter_tail = waiter_tail->tracker_next;
+
+ /* Increase the trigger ref count of the tracker. */
+ tracker->trigger_ref_count++;
+
+ waiter->point = MALI_TIMELINE_NO_POINT;
+ waiter->tracker = tracker;
+
+ /* Insert waiter on tracker's singly-linked waiter list. */
+ if (NULL == tracker->waiter_head) {
+ /* list is empty */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ tracker->waiter_tail = waiter;
+ } else {
+ tracker->waiter_head->tracker_next = waiter;
+ }
+ tracker->waiter_head = waiter;
+
+ /* Also store waiter in separate field for easy access by sync callback. */
+ tracker->waiter_sync = waiter;
+
+ /* Store the sync fence in tracker so we can retrieve in abort session, if needed. */
+ tracker->sync_fence = sync_fence;
+
+ sync_fence = NULL;
+ }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)*/
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ if ((NULL != tracker->timeline) && (MALI_TIMELINE_PP == tracker->timeline->id)) {
+
+ struct mali_pp_job *job = (struct mali_pp_job *)tracker->job;
+
+ if (0 < job->dma_fence_context.num_dma_fence_waiter) {
+ struct mali_timeline_waiter *waiter;
+ /* Check if we have a zeroed waiter object available. */
+ if (unlikely(NULL == waiter_tail)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+ goto exit;
+ }
+
+ /* Grab new zeroed waiter object. */
+ waiter = waiter_tail;
+ waiter_tail = waiter_tail->tracker_next;
+
+ /* Increase the trigger ref count of the tracker. */
+ tracker->trigger_ref_count++;
+
+ waiter->point = MALI_TIMELINE_NO_POINT;
+ waiter->tracker = tracker;
+
+ /* Insert waiter on tracker's singly-linked waiter list. */
+ if (NULL == tracker->waiter_head) {
+ /* list is empty */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ tracker->waiter_tail = waiter;
+ } else {
+ tracker->waiter_head->tracker_next = waiter;
+ }
+ tracker->waiter_head = waiter;
+
+ /* Also store waiter in separate field for easy access by sync callback. */
+ tracker->waiter_dma_fence = waiter;
+ }
+ }
+#endif /* defined(CONFIG_MALI_DMA_BUF_FENCE)*/
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE) ||defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+exit:
+#endif /* defined(CONFIG_MALI_DMA_BUF_FENCE) || defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+ if (NULL != waiter_tail) {
+ mali_timeline_system_release_waiter_list(system, waiter_tail, waiter_head);
+ }
+
+ /* Release the initial trigger ref count. */
+ tracker->trigger_ref_count--;
+
+ /* If there were no waiters added to this tracker we activate immediately. */
+ if (0 == tracker->trigger_ref_count) {
+ schedule_mask |= mali_timeline_tracker_activate(tracker);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ if (NULL != sync_fence) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ sync_fence_put(sync_fence);
+#else
+ fput(sync_fence->file);
+#endif
+ }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
+ struct mali_timeline_tracker *tracker,
+ enum mali_timeline_id timeline_id)
+{
+ int num_waiters = 0;
+ struct mali_timeline_waiter *waiter_tail, *waiter_head;
+ u32 tid = _mali_osk_get_tid();
+
+ mali_timeline_point point = MALI_TIMELINE_NO_POINT;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == system->session->is_aborting);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > tracker->type);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: adding tracker for job %p, timeline: %d\n", tracker->job, timeline_id));
+
+ MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+ tracker->system = system;
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ num_waiters = mali_timeline_fence_num_waiters(&tracker->fence);
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ if (MALI_TIMELINE_PP == timeline_id) {
+ struct mali_pp_job *job = (struct mali_pp_job *)tracker->job;
+ if (0 < job->dma_fence_context.num_dma_fence_waiter)
+ num_waiters++;
+ }
+#endif
+
+ /* Allocate waiters. */
+ mali_timeline_system_allocate_waiters(system, &waiter_tail, &waiter_head, num_waiters);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ /* Add tracker to timeline. This will allocate a point for the tracker on the timeline. If
+ * timeline ID is MALI_TIMELINE_NONE the tracker will NOT be added to a timeline and the
+ * point will be MALI_TIMELINE_NO_POINT.
+ *
+ * NOTE: the tracker can fail to be added if the timeline is full. If this happens, the
+ * point will be MALI_TIMELINE_NO_POINT. */
+ MALI_DEBUG_ASSERT(timeline_id < MALI_TIMELINE_MAX || timeline_id == MALI_TIMELINE_NONE);
+ if (likely(timeline_id < MALI_TIMELINE_MAX)) {
+ struct mali_timeline *timeline = system->timelines[timeline_id];
+ mali_timeline_insert_tracker(timeline, tracker);
+ MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+ }
+
+ point = tracker->point;
+
+ /* Create waiters for tracker based on supplied fence. Each waiter will increase the
+ * trigger ref count. */
+ mali_timeline_system_create_waiters_and_unlock(system, tracker, waiter_tail, waiter_head);
+ tracker = NULL;
+
+ /* At this point the tracker object might have been freed so we should no longer
+ * access it. */
+
+
+ /* The tracker will always be activated after calling add_tracker, even if NO_POINT is
+ * returned. */
+ return point;
+}
+
+static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
+ struct mali_timeline_waiter *waiter)
+{
+ struct mali_timeline_tracker *tracker;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ tracker = waiter->tracker;
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ /* At this point the waiter has been removed from the timeline's waiter list, but it is
+ * still on the tracker's waiter list. All of the tracker's waiters will be released when
+ * the tracker is activated. */
+
+ waiter->point = MALI_TIMELINE_NO_POINT;
+ waiter->tracker = NULL;
+
+ tracker->trigger_ref_count--;
+ if (0 == tracker->trigger_ref_count) {
+ /* This was the last waiter; activate tracker */
+ schedule_mask |= mali_timeline_tracker_activate(tracker);
+ tracker = NULL;
+ }
+
+ return schedule_mask;
+}
+
+mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
+ enum mali_timeline_id timeline_id)
+{
+ mali_timeline_point point;
+ struct mali_timeline *timeline;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ if (MALI_TIMELINE_MAX <= timeline_id) {
+ return MALI_TIMELINE_NO_POINT;
+ }
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ timeline = system->timelines[timeline_id];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ point = MALI_TIMELINE_NO_POINT;
+ if (timeline->point_oldest != timeline->point_next) {
+ point = timeline->point_next - 1;
+ if (MALI_TIMELINE_NO_POINT == point) point--;
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ return point;
+}
+
+void mali_timeline_initialize(void)
+{
+ _mali_osk_atomic_init(&gp_tracker_count, 0);
+ _mali_osk_atomic_init(&phy_pp_tracker_count, 0);
+ _mali_osk_atomic_init(&virt_pp_tracker_count, 0);
+}
+
+void mali_timeline_terminate(void)
+{
+ _mali_osk_atomic_term(&gp_tracker_count);
+ _mali_osk_atomic_term(&phy_pp_tracker_count);
+ _mali_osk_atomic_term(&virt_pp_tracker_count);
+}
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+
+static mali_bool is_waiting_on_timeline(struct mali_timeline_tracker *tracker, enum mali_timeline_id id)
+{
+ struct mali_timeline *timeline;
+ struct mali_timeline_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ MALI_DEBUG_ASSERT_POINTER(tracker->timeline);
+ timeline = tracker->timeline;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline->system);
+ system = timeline->system;
+
+ if (MALI_TIMELINE_MAX > id) {
+ if (MALI_TIMELINE_NO_POINT != tracker->fence.points[id]) {
+ return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
+ } else {
+ return MALI_FALSE;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NONE == id);
+ return MALI_FALSE;
+ }
+}
+
+static const char *timeline_id_to_string(enum mali_timeline_id id)
+{
+ switch (id) {
+ case MALI_TIMELINE_GP:
+ return "GP";
+ case MALI_TIMELINE_PP:
+ return "PP";
+ case MALI_TIMELINE_SOFT:
+ return "SOFT";
+ default:
+ return "NONE";
+ }
+}
+
+static const char *timeline_tracker_type_to_string(enum mali_timeline_tracker_type type)
+{
+ switch (type) {
+ case MALI_TIMELINE_TRACKER_GP:
+ return "GP";
+ case MALI_TIMELINE_TRACKER_PP:
+ return "PP";
+ case MALI_TIMELINE_TRACKER_SOFT:
+ return "SOFT";
+ case MALI_TIMELINE_TRACKER_WAIT:
+ return "WAIT";
+ case MALI_TIMELINE_TRACKER_SYNC:
+ return "SYNC";
+ default:
+ return "INVALID";
+ }
+}
+
+mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker)
+{
+ struct mali_timeline *timeline = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ timeline = tracker->timeline;
+
+ if (0 != tracker->trigger_ref_count) {
+ return MALI_TIMELINE_TS_WAITING;
+ }
+
+ if (timeline && (timeline->tracker_tail == tracker || NULL != tracker->timeline_prev)) {
+ return MALI_TIMELINE_TS_ACTIVE;
+ }
+
+ if (timeline && (MALI_TIMELINE_NO_POINT == tracker->point)) {
+ return MALI_TIMELINE_TS_INIT;
+ }
+
+ return MALI_TIMELINE_TS_FINISH;
+}
+
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx)
+{
+ const char *tracker_state = "IWAF";
+ char state_char = 'I';
+ char tracker_type[32] = {0};
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker));
+ _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type));
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ if (0 != tracker->trigger_ref_count) {
+ if (print_ctx)
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job));
+ else
+ MALI_DEBUG_PRINT(2, ("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job)));
+ } else {
+ if(print_ctx)
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job));
+ else
+ MALI_DEBUG_PRINT(2, ("TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job)));
+
+ }
+#else
+ if (0 != tracker->trigger_ref_count) {
+ if (print_ctx)
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ (unsigned int)(uintptr_t)(tracker->job));
+ else
+ MALI_DEBUG_PRINT(2, ("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ (unsigned int)(uintptr_t)(tracker->job)));
+ } else {
+ if (print_ctx)
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ (unsigned int)(uintptr_t)(tracker->job));
+ else
+ MALI_DEBUG_PRINT(2, ("TL: %s %u %c job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ (unsigned int)(uintptr_t)(tracker->job)));
+
+ }
+#endif
+}
+
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx)
+{
+ struct mali_timeline_tracker *tracker = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ tracker = timeline->tracker_tail;
+ while (NULL != tracker) {
+ mali_timeline_debug_print_tracker(tracker, print_ctx);
+ tracker = tracker->timeline_next;
+ }
+}
+
+#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker)
+{
+ const char *tracker_state = "IWAF";
+ char state_char = 'I';
+ char tracker_type[32] = {0};
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker));
+ _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type));
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ if (0 != tracker->trigger_ref_count) {
+ MALI_PRINT(("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job));
+ } else {
+ MALI_PRINT(("TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job));
+ }
+#else
+ if (0 != tracker->trigger_ref_count) {
+ MALI_PRINT(("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ tracker->job));
+ } else {
+ MALI_PRINT(("TL: %s %u %c job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ tracker->job));
+ }
+#endif
+}
+
+void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline)
+{
+ struct mali_timeline_tracker *tracker = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ tracker = timeline->tracker_tail;
+ while (NULL != tracker) {
+ mali_timeline_debug_direct_print_tracker(tracker);
+ tracker = tracker->timeline_next;
+ }
+}
+
+#endif
+
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx)
+{
+ int i;
+ int num_printed = 0;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ /* Print all timelines */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (NULL == timeline->tracker_head) continue;
+ if (print_ctx)
+ _mali_osk_ctxprintf(print_ctx, "TL: Timeline %s:\n",
+ timeline_id_to_string((enum mali_timeline_id)i));
+ else
+ MALI_DEBUG_PRINT(2, ("TL: Timeline %s: oldest (%u) next(%u)\n",
+ timeline_id_to_string((enum mali_timeline_id)i), timeline->point_oldest, timeline->point_next));
+
+ mali_timeline_debug_print_timeline(timeline, print_ctx);
+ num_printed++;
+ }
+
+ if (0 == num_printed) {
+ if (print_ctx)
+ _mali_osk_ctxprintf(print_ctx, "TL: All timelines empty\n");
+ else
+ MALI_DEBUG_PRINT(2, ("TL: All timelines empty\n"));
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+}
+
+#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+void mali_timeline_dma_fence_callback(void *pp_job_ptr)
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_waiter *waiter;
+ struct mali_timeline_tracker *tracker;
+ struct mali_pp_job *pp_job = (struct mali_pp_job *)pp_job_ptr;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+ mali_bool is_aborting = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT_POINTER(pp_job);
+
+ tracker = &pp_job->tracker;
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ system = tracker->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ waiter = tracker->waiter_dma_fence;
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+
+ schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
+
+ is_aborting = system->session->is_aborting;
+
+ /* If aborting, wake up sleepers that are waiting for dma fence callbacks to complete. */
+ if (is_aborting) {
+ _mali_osk_wait_queue_wake_up(system->wait_queue);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ if (!is_aborting) {
+ mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE);
+ }
+}
+#endif
diff --git a/drivers/gpu/arm/utgard/common/mali_timeline.h b/drivers/gpu/arm/utgard/common/mali_timeline.h
new file mode 100644
index 000000000000..ba515c336716
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_timeline.h
@@ -0,0 +1,561 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMELINE_H__
+#define __MALI_TIMELINE_H__
+
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "mali_spinlock_reentrant.h"
+#include "mali_sync.h"
+#include "mali_scheduler_types.h"
+#include <linux/version.h>
+
+/**
+ * Soft job timeout.
+ *
+ * Soft jobs have to be signaled as complete after activation. Normally this is done by user space,
+ * but in order to guarantee that every soft job is completed, we also have a timer.
+ */
+#define MALI_TIMELINE_TIMEOUT_HZ ((unsigned long) (HZ * 3 / 2)) /* 1500 ms. */
+
+/**
+ * Timeline type.
+ */
+typedef enum mali_timeline_id {
+ MALI_TIMELINE_GP = MALI_UK_TIMELINE_GP, /**< GP job timeline. */
+ MALI_TIMELINE_PP = MALI_UK_TIMELINE_PP, /**< PP job timeline. */
+ MALI_TIMELINE_SOFT = MALI_UK_TIMELINE_SOFT, /**< Soft job timeline. */
+ MALI_TIMELINE_MAX = MALI_UK_TIMELINE_MAX
+} mali_timeline_id;
+
+/**
+ * Used by trackers that should not be added to a timeline (@ref mali_timeline_system_add_tracker).
+ */
+#define MALI_TIMELINE_NONE MALI_TIMELINE_MAX
+
+/**
+ * Tracker type.
+ */
+typedef enum mali_timeline_tracker_type {
+ MALI_TIMELINE_TRACKER_GP = 0, /**< Tracker used by GP jobs. */
+ MALI_TIMELINE_TRACKER_PP = 1, /**< Tracker used by PP jobs. */
+ MALI_TIMELINE_TRACKER_SOFT = 2, /**< Tracker used by soft jobs. */
+ MALI_TIMELINE_TRACKER_WAIT = 3, /**< Tracker used for fence wait. */
+ MALI_TIMELINE_TRACKER_SYNC = 4, /**< Tracker used for sync fence. */
+ MALI_TIMELINE_TRACKER_MAX = 5,
+} mali_timeline_tracker_type;
+
+/**
+ * Tracker activation error.
+ */
+typedef u32 mali_timeline_activation_error;
+#define MALI_TIMELINE_ACTIVATION_ERROR_NONE 0
+#define MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT (1<<1)
+#define MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT (1<<0)
+
+/**
+ * Type used to represent a point on a timeline.
+ */
+typedef u32 mali_timeline_point;
+
+/**
+ * Used to represent that no point on a timeline.
+ */
+#define MALI_TIMELINE_NO_POINT ((mali_timeline_point) 0)
+
+/**
+ * The maximum span of points on a timeline. A timeline will be considered full if the difference
+ * between the oldest and newest points is equal or larger to this value.
+ */
+#define MALI_TIMELINE_MAX_POINT_SPAN 65536
+
+/**
+ * Magic value used to assert on validity of trackers.
+ */
+#define MALI_TIMELINE_TRACKER_MAGIC 0xabcdabcd
+
+struct mali_timeline;
+struct mali_timeline_waiter;
+struct mali_timeline_tracker;
+
+/**
+ * Timeline fence.
+ */
+struct mali_timeline_fence {
+ mali_timeline_point points[MALI_TIMELINE_MAX]; /**< For each timeline, a point or MALI_TIMELINE_NO_POINT. */
+ s32 sync_fd; /**< A file descriptor representing a sync fence, or -1. */
+};
+
+/**
+ * Timeline system.
+ *
+ * The Timeline system has a set of timelines associated with a session.
+ */
+struct mali_timeline_system {
+ struct mali_spinlock_reentrant *spinlock; /**< Spin lock protecting the timeline system */
+ struct mali_timeline *timelines[MALI_TIMELINE_MAX]; /**< The timelines in this system */
+
+ /* Single-linked list of unused waiter objects. Uses the tracker_next field in tracker. */
+ struct mali_timeline_waiter *waiter_empty_list;
+
+ struct mali_session_data *session; /**< Session that owns this system. */
+
+ mali_bool timer_enabled; /**< Set to MALI_TRUE if soft job timer should be enabled, MALI_FALSE if not. */
+
+ _mali_osk_wait_queue_t *wait_queue; /**< Wait queue. */
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_timeline *signaled_sync_tl; /**< Special sync timeline used to create pre-signaled sync fences */
+#else
+ struct mali_internal_sync_timeline *signaled_sync_tl; /**< Special sync timeline used to create pre-signaled sync fences */
+#endif
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+};
+
+/**
+ * Timeline. Each Timeline system will have MALI_TIMELINE_MAX timelines.
+ */
+struct mali_timeline {
+ mali_timeline_point point_next; /**< The next available point. */
+ mali_timeline_point point_oldest; /**< The oldest point not released. */
+
+ /* Double-linked list of trackers. Sorted in ascending order by tracker->time_number with
+ * tail pointing to the tracker with the oldest time. */
+ struct mali_timeline_tracker *tracker_head;
+ struct mali_timeline_tracker *tracker_tail;
+
+ /* Double-linked list of waiters. Sorted in ascending order by waiter->time_number_wait
+ * with tail pointing to the waiter with oldest wait time. */
+ struct mali_timeline_waiter *waiter_head;
+ struct mali_timeline_waiter *waiter_tail;
+
+ struct mali_timeline_system *system; /**< Timeline system this timeline belongs to. */
+ enum mali_timeline_id id; /**< Timeline type. */
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_timeline *sync_tl; /**< Sync timeline that corresponds to this timeline. */
+#else
+ struct mali_internal_sync_timeline *sync_tl;
+#endif
+ mali_bool destroyed;
+ struct mali_spinlock_reentrant *spinlock; /**< Spin lock protecting the timeline system */
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+ /* The following fields are used to time out soft job trackers. */
+ _mali_osk_wq_delayed_work_t *delayed_work;
+ mali_bool timer_active;
+};
+
+/**
+ * Timeline waiter.
+ */
+struct mali_timeline_waiter {
+ mali_timeline_point point; /**< Point on timeline we are waiting for to be released. */
+ struct mali_timeline_tracker *tracker; /**< Tracker that is waiting. */
+
+ struct mali_timeline_waiter *timeline_next; /**< Next waiter on timeline's waiter list. */
+ struct mali_timeline_waiter *timeline_prev; /**< Previous waiter on timeline's waiter list. */
+
+ struct mali_timeline_waiter *tracker_next; /**< Next waiter on tracker's waiter list. */
+};
+
+/**
+ * Timeline tracker.
+ */
+struct mali_timeline_tracker {
+ MALI_DEBUG_CODE(u32 magic); /**< Should always be MALI_TIMELINE_TRACKER_MAGIC for a valid tracker. */
+
+ mali_timeline_point point; /**< Point on timeline for this tracker */
+
+ struct mali_timeline_tracker *timeline_next; /**< Next tracker on timeline's tracker list */
+ struct mali_timeline_tracker *timeline_prev; /**< Previous tracker on timeline's tracker list */
+
+ u32 trigger_ref_count; /**< When zero tracker will be activated */
+ mali_timeline_activation_error activation_error; /**< Activation error. */
+ struct mali_timeline_fence fence; /**< Fence used to create this tracker */
+
+ /* Single-linked list of waiters. Sorted in order of insertions with
+ * tail pointing to first waiter. */
+ struct mali_timeline_waiter *waiter_head;
+ struct mali_timeline_waiter *waiter_tail;
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ /* These are only used if the tracker is waiting on a sync fence. */
+ struct mali_timeline_waiter *waiter_sync; /**< A direct pointer to timeline waiter representing sync fence. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_fence_waiter sync_fence_waiter; /**< Used to connect sync fence and tracker in sync fence wait callback. */
+ struct sync_fence *sync_fence; /**< The sync fence this tracker is waiting on. */
+#else
+ struct mali_internal_sync_fence_waiter sync_fence_waiter; /**< Used to connect sync fence and tracker in sync fence wait callback. */
+ struct mali_internal_sync_fence *sync_fence; /**< The sync fence this tracker is waiting on. */
+#endif
+ _mali_osk_list_t sync_fence_cancel_list; /**< List node used to cancel sync fence waiters. */
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ struct mali_timeline_waiter *waiter_dma_fence; /**< A direct pointer to timeline waiter representing dma fence. */
+#endif
+
+ struct mali_timeline_system *system; /**< Timeline system. */
+ struct mali_timeline *timeline; /**< Timeline, or NULL if not on a timeline. */
+ enum mali_timeline_tracker_type type; /**< Type of tracker. */
+ void *job; /**< Owner of tracker. */
+
+ /* The following fields are used to time out soft job trackers. */
+ unsigned long os_tick_create;
+ unsigned long os_tick_activate;
+ mali_bool timer_active;
+};
+
+extern _mali_osk_atomic_t gp_tracker_count;
+extern _mali_osk_atomic_t phy_pp_tracker_count;
+extern _mali_osk_atomic_t virt_pp_tracker_count;
+
+/**
+ * What follows is a set of functions to check the state of a timeline and to determine where on a
+ * timeline a given point is. Most of these checks will translate the timeline so the oldest point
+ * on the timeline is aligned with zero. Remember that all of these calculation are done on
+ * unsigned integers.
+ *
+ * The following example illustrates the three different states a point can be in. The timeline has
+ * been translated to put the oldest point at zero:
+ *
+ *
+ *
+ * [ point is in forbidden zone ]
+ * 64k wide
+ * MALI_TIMELINE_MAX_POINT_SPAN
+ *
+ * [ point is on timeline ) ( point is released ]
+ *
+ * 0--------------------------##############################--------------------2^32 - 1
+ * ^ ^
+ * \ |
+ * oldest point on timeline |
+ * \
+ * next point on timeline
+ */
+
+/**
+ * Compare two timeline points
+ *
+ * Returns true if a is after b, false if a is before or equal to b.
+ *
+ * This funcion ignores MALI_TIMELINE_MAX_POINT_SPAN. Wrapping is supported and
+ * the result will be correct if the points is less then UINT_MAX/2 apart.
+ *
+ * @param a Point on timeline
+ * @param b Point on timeline
+ * @return MALI_TRUE if a is after b
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_point_after(mali_timeline_point a, mali_timeline_point b)
+{
+ return 0 > ((s32)b) - ((s32)a);
+}
+
+/**
+ * Check if a point is on timeline. A point is on a timeline if it is greater than, or equal to,
+ * the oldest point, and less than the next point.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point is on timeline, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_on(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+ return (point - timeline->point_oldest) < (timeline->point_next - timeline->point_oldest);
+}
+
+/**
+ * Check if a point has been released. A point is released if it is older than the oldest point on
+ * the timeline, newer than the next point, and also not in the forbidden zone.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point has been release, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_released(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ mali_timeline_point point_normalized;
+ mali_timeline_point next_normalized;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+ point_normalized = point - timeline->point_oldest;
+ next_normalized = timeline->point_next - timeline->point_oldest;
+
+ return point_normalized > (next_normalized + MALI_TIMELINE_MAX_POINT_SPAN);
+}
+
+/**
+ * Check if a point is valid. A point is valid if is on the timeline or has been released.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point is valid, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_valid(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ return mali_timeline_is_point_on(timeline, point) || mali_timeline_is_point_released(timeline, point);
+}
+
+/**
+ * Check if timeline is empty (has no points on it). A timeline is empty if next == oldest.
+ *
+ * @param timeline Timeline.
+ * @return MALI_TRUE if timeline is empty, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_empty(struct mali_timeline *timeline)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ return timeline->point_next == timeline->point_oldest;
+}
+
+/**
+ * Check if timeline is full. A valid timeline cannot span more than 64k points (@ref
+ * MALI_TIMELINE_MAX_POINT_SPAN).
+ *
+ * @param timeline Timeline.
+ * @return MALI_TRUE if timeline is full, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_full(struct mali_timeline *timeline)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ return MALI_TIMELINE_MAX_POINT_SPAN <= (timeline->point_next - timeline->point_oldest);
+}
+
+/**
+ * Create a new timeline system.
+ *
+ * @param session The session this timeline system will belong to.
+ * @return New timeline system.
+ */
+struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session);
+
+/**
+ * Abort timeline system.
+ *
+ * This will release all pending waiters in the timeline system causing all trackers to be
+ * activated.
+ *
+ * @param system Timeline system to abort all jobs from.
+ */
+void mali_timeline_system_abort(struct mali_timeline_system *system);
+
+/**
+ * Destroy an empty timeline system.
+ *
+ * @note @ref mali_timeline_system_abort() should be called prior to this function.
+ *
+ * @param system Timeline system to destroy.
+ */
+void mali_timeline_system_destroy(struct mali_timeline_system *system);
+
+/**
+ * Stop the soft job timer.
+ *
+ * @param system Timeline system
+ */
+void mali_timeline_system_stop_timer(struct mali_timeline_system *system);
+
+/**
+ * Add a tracker to a timeline system and optionally also on a timeline.
+ *
+ * Once added to the timeline system, the tracker is guaranteed to be activated. The tracker can be
+ * activated before this function returns. Thus, it is also possible that the tracker is released
+ * before this function returns, depending on the tracker type.
+ *
+ * @note Tracker must be initialized (@ref mali_timeline_tracker_init) before being added to the
+ * timeline system.
+ *
+ * @param system Timeline system the tracker will be added to.
+ * @param tracker The tracker to be added.
+ * @param timeline_id Id of the timeline the tracker will be added to, or
+ * MALI_TIMELINE_NONE if it should not be added on a timeline.
+ * @return Point on timeline identifying this tracker, or MALI_TIMELINE_NO_POINT if not on timeline.
+ */
+mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
+ struct mali_timeline_tracker *tracker,
+ enum mali_timeline_id timeline_id);
+
+/**
+ * Get latest point on timeline.
+ *
+ * @param system Timeline system.
+ * @param timeline_id Id of timeline to get latest point from.
+ * @return Latest point on timeline, or MALI_TIMELINE_NO_POINT if the timeline is empty.
+ */
+mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
+ enum mali_timeline_id timeline_id);
+
+/**
+ * Initialize tracker.
+ *
+ * Must be called before tracker is added to timeline system (@ref mali_timeline_system_add_tracker).
+ *
+ * @param tracker Tracker to initialize.
+ * @param type Type of tracker.
+ * @param fence Fence used to set up dependencies for tracker.
+ * @param job Pointer to job struct this tracker is associated with.
+ */
+void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
+ mali_timeline_tracker_type type,
+ struct mali_timeline_fence *fence,
+ void *job);
+
+/**
+ * Grab trigger ref count on tracker.
+ *
+ * This will prevent tracker from being activated until the trigger ref count reaches zero.
+ *
+ * @note Tracker must have been initialized (@ref mali_timeline_tracker_init).
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker.
+ */
+void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker);
+
+/**
+ * Release trigger ref count on tracker.
+ *
+ * If the trigger ref count reaches zero, the tracker will be activated.
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker.
+ * @param activation_error Error bitmask if activated with error, or MALI_TIMELINE_ACTIVATION_ERROR_NONE if no error.
+ * @return Scheduling bitmask.
+ */
+mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error);
+
+/**
+ * Release a tracker from the timeline system.
+ *
+ * This is used to signal that the job being tracker is finished, either due to normal circumstances
+ * (job complete/abort) or due to a timeout.
+ *
+ * We may need to schedule some subsystems after a tracker has been released and the returned
+ * bitmask will tell us if it is necessary. If the return value is non-zero, this value needs to be
+ * sent as an input parameter to @ref mali_scheduler_schedule_from_mask() to do the scheduling.
+ *
+ * @note Tracker must have been activated before being released.
+ * @warning Not calling @ref mali_scheduler_schedule_from_mask() after releasing a tracker can lead
+ * to a deadlock.
+ *
+ * @param tracker Tracker being released.
+ * @return Scheduling bitmask.
+ */
+mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker);
+
+MALI_STATIC_INLINE mali_bool mali_timeline_tracker_activation_error(
+ struct mali_timeline_tracker *tracker)
+{
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ return (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT &
+ tracker->activation_error) ? MALI_TRUE : MALI_FALSE;
+}
+
+/**
+ * Copy data from a UK fence to a Timeline fence.
+ *
+ * @param fence Timeline fence.
+ * @param uk_fence UK fence.
+ */
+void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence);
+
+void mali_timeline_initialize(void);
+
+void mali_timeline_terminate(void);
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_gp_job(void)
+{
+ return 0 < _mali_osk_atomic_read(&gp_tracker_count);
+}
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_physical_pp_job(void)
+{
+ return 0 < _mali_osk_atomic_read(&phy_pp_tracker_count);
+}
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_virtual_pp_job(void)
+{
+ return 0 < _mali_osk_atomic_read(&virt_pp_tracker_count);
+}
+
+#if defined(DEBUG)
+#define MALI_TIMELINE_DEBUG_FUNCTIONS
+#endif /* DEBUG */
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+
+/**
+ * Tracker state. Used for debug printing.
+ */
+typedef enum mali_timeline_tracker_state {
+ MALI_TIMELINE_TS_INIT = 0,
+ MALI_TIMELINE_TS_WAITING = 1,
+ MALI_TIMELINE_TS_ACTIVE = 2,
+ MALI_TIMELINE_TS_FINISH = 3,
+} mali_timeline_tracker_state;
+
+/**
+ * Get tracker state.
+ *
+ * @param tracker Tracker to check.
+ * @return State of tracker.
+ */
+mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker);
+
+/**
+ * Print debug information about tracker.
+ *
+ * @param tracker Tracker to print.
+ */
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx);
+
+/**
+ * Print debug information about timeline.
+ *
+ * @param timeline Timeline to print.
+ */
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx);
+
+#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker);
+void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline);
+#endif
+
+/**
+ * Print debug information about timeline system.
+ *
+ * @param system Timeline system to print.
+ */
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx);
+
+#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+/**
+ * The timeline dma fence callback when dma fence signal.
+ *
+ * @param pp_job_ptr The pointer to pp job that link to the signaled dma fence.
+ */
+void mali_timeline_dma_fence_callback(void *pp_job_ptr);
+#endif
+
+#endif /* __MALI_TIMELINE_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.c b/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.c
new file mode 100644
index 000000000000..9c82354b1468
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/file.h>
+#include "mali_timeline_fence_wait.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_spinlock_reentrant.h"
+
+/**
+ * Allocate a fence waiter tracker.
+ *
+ * @return New fence waiter if successful, NULL if not.
+ */
+static struct mali_timeline_fence_wait_tracker *mali_timeline_fence_wait_tracker_alloc(void)
+{
+ return (struct mali_timeline_fence_wait_tracker *) _mali_osk_calloc(1, sizeof(struct mali_timeline_fence_wait_tracker));
+}
+
+/**
+ * Free fence waiter tracker.
+ *
+ * @param wait Fence wait tracker to free.
+ */
+static void mali_timeline_fence_wait_tracker_free(struct mali_timeline_fence_wait_tracker *wait)
+{
+ MALI_DEBUG_ASSERT_POINTER(wait);
+ _mali_osk_atomic_term(&wait->refcount);
+ _mali_osk_free(wait);
+}
+
+/**
+ * Check if fence wait tracker has been activated. Used as a wait queue condition.
+ *
+ * @param data Fence waiter.
+ * @return MALI_TRUE if tracker has been activated, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_fence_wait_tracker_is_activated(void *data)
+{
+ struct mali_timeline_fence_wait_tracker *wait;
+
+ wait = (struct mali_timeline_fence_wait_tracker *) data;
+ MALI_DEBUG_ASSERT_POINTER(wait);
+
+ return wait->activated;
+}
+
+/**
+ * Check if fence has been signaled.
+ *
+ * @param system Timeline system.
+ * @param fence Timeline fence.
+ * @return MALI_TRUE if fence is signaled, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_system *system, struct mali_timeline_fence *fence)
+{
+ int i;
+ u32 tid = _mali_osk_get_tid();
+ mali_bool ret = MALI_TRUE;
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_fence *sync_fence = NULL;
+#else
+ struct mali_internal_sync_fence *sync_fence = NULL;
+#endif
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline;
+ mali_timeline_point point;
+
+ point = fence->points[i];
+
+ if (likely(MALI_TIMELINE_NO_POINT == point)) {
+ /* Fence contains no point on this timeline. */
+ continue;
+ }
+
+ timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
+ MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", point, timeline->point_oldest, timeline->point_next));
+ }
+
+ if (!mali_timeline_is_point_released(timeline, point)) {
+ ret = MALI_FALSE;
+ goto exit;
+ }
+ }
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ if (-1 != fence->sync_fd) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ sync_fence = sync_fence_fdget(fence->sync_fd);
+#else
+ sync_fence = mali_internal_sync_fence_fdget(fence->sync_fd);
+#endif
+ if (likely(NULL != sync_fence)) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ if (0 == sync_fence->status) {
+#else
+ if (0 == atomic_read(&sync_fence->status)) {
+#endif
+ ret = MALI_FALSE;
+ }
+ } else {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", fence->sync_fd));
+ }
+ }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+exit:
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ if (NULL != sync_fence) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ sync_fence_put(sync_fence);
+#else
+ fput(sync_fence->file);
+#endif
+ }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+ return ret;
+}
+
+mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout)
+{
+ struct mali_timeline_fence_wait_tracker *wait;
+ mali_timeline_point point;
+ mali_bool ret;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: wait on fence\n"));
+
+ if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY == timeout) {
+ return mali_timeline_fence_wait_check_status(system, fence);
+ }
+
+ wait = mali_timeline_fence_wait_tracker_alloc();
+ if (unlikely(NULL == wait)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate data for fence wait\n"));
+ return MALI_FALSE;
+ }
+
+ wait->activated = MALI_FALSE;
+ wait->system = system;
+
+ /* Initialize refcount to two references. The reference first will be released by this
+ * function after the wait is over. The second reference will be released when the tracker
+ * is activated. */
+ _mali_osk_atomic_init(&wait->refcount, 2);
+
+ /* Add tracker to timeline system, but not to a timeline. */
+ mali_timeline_tracker_init(&wait->tracker, MALI_TIMELINE_TRACKER_WAIT, fence, wait);
+ point = mali_timeline_system_add_tracker(system, &wait->tracker, MALI_TIMELINE_NONE);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);
+ MALI_IGNORE(point);
+
+ /* Wait for the tracker to be activated or time out. */
+ if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER == timeout) {
+ _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait);
+ } else {
+ _mali_osk_wait_queue_wait_event_timeout(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait, timeout);
+ }
+
+ ret = wait->activated;
+
+ if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
+ mali_timeline_fence_wait_tracker_free(wait);
+ }
+
+ return ret;
+}
+
+void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *wait)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(wait);
+ MALI_DEBUG_ASSERT_POINTER(wait->system);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for fence wait tracker\n"));
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == wait->activated);
+ wait->activated = MALI_TRUE;
+
+ _mali_osk_wait_queue_wake_up(wait->system->wait_queue);
+
+ /* Nothing can wait on this tracker, so nothing to schedule after release. */
+ schedule_mask = mali_timeline_tracker_release(&wait->tracker);
+ MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);
+ MALI_IGNORE(schedule_mask);
+
+ if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
+ mali_timeline_fence_wait_tracker_free(wait);
+ }
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.h b/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.h
new file mode 100644
index 000000000000..46828c7b485f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_timeline_fence_wait.h
+ *
+ * This file contains functions used to wait until a Timeline fence is signaled.
+ */
+
+#ifndef __MALI_TIMELINE_FENCE_WAIT_H__
+#define __MALI_TIMELINE_FENCE_WAIT_H__
+
+#include "mali_osk.h"
+#include "mali_timeline.h"
+
+/**
+ * If used as the timeout argument in @ref mali_timeline_fence_wait, a timer is not used and the
+ * function only returns when the fence is signaled.
+ */
+#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER ((u32) -1)
+
+/**
+ * If used as the timeout argument in @ref mali_timeline_fence_wait, the function will return
+ * immediately with the current state of the fence.
+ */
+#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY 0
+
+/**
+ * Fence wait tracker.
+ *
+ * The fence wait tracker is added to the Timeline system with the fence we are waiting on as a
+ * dependency. We will then perform a blocking wait, possibly with a timeout, until the tracker is
+ * activated, which happens when the fence is signaled.
+ */
+struct mali_timeline_fence_wait_tracker {
+ mali_bool activated; /**< MALI_TRUE if the tracker has been activated, MALI_FALSE if not. */
+ _mali_osk_atomic_t refcount; /**< Reference count. */
+ struct mali_timeline_system *system; /**< Timeline system. */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker. */
+};
+
+/**
+ * Wait for a fence to be signaled, or timeout is reached.
+ *
+ * @param system Timeline system.
+ * @param fence Fence to wait on.
+ * @param timeout Timeout in ms, or MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER or
+ * MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY.
+ * @return MALI_TRUE if signaled, MALI_FALSE if timed out.
+ */
+mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout);
+
+/**
+ * Used by the Timeline system to activate a fence wait tracker.
+ *
+ * @param fence_wait_tracker Fence waiter tracker.
+ */
+void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *fence_wait_tracker);
+
+#endif /* __MALI_TIMELINE_FENCE_WAIT_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.c b/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.c
new file mode 100644
index 000000000000..91e1c7f7bc67
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/file.h>
+#include "mali_timeline_sync_fence.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_sync.h"
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+/**
+ * Creates a sync fence tracker and a sync fence. Adds sync fence tracker to Timeline system and
+ * returns sync fence. The sync fence will be signaled when the sync fence tracker is activated.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return Sync fence that will be signaled when tracker is activated.
+ */
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static struct sync_fence *mali_timeline_sync_fence_create_and_add_tracker(struct mali_timeline *timeline, mali_timeline_point point)
+#else
+static struct mali_internal_sync_fence *mali_timeline_sync_fence_create_and_add_tracker(struct mali_timeline *timeline, mali_timeline_point point)
+#endif
+{
+ struct mali_timeline_sync_fence_tracker *sync_fence_tracker;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_fence *sync_fence;
+#else
+ struct mali_internal_sync_fence *sync_fence;
+#endif
+ struct mali_timeline_fence fence;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+ /* Allocate sync fence tracker. */
+ sync_fence_tracker = _mali_osk_calloc(1, sizeof(struct mali_timeline_sync_fence_tracker));
+ if (NULL == sync_fence_tracker) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync_fence_tracker allocation failed\n"));
+ return NULL;
+ }
+
+ /* Create sync flag. */
+ MALI_DEBUG_ASSERT_POINTER(timeline->sync_tl);
+ sync_fence_tracker->flag = mali_sync_flag_create(timeline->sync_tl, point);
+ if (NULL == sync_fence_tracker->flag) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync_flag creation failed\n"));
+ _mali_osk_free(sync_fence_tracker);
+ return NULL;
+ }
+
+ /* Create sync fence from sync flag. */
+ sync_fence = mali_sync_flag_create_fence(sync_fence_tracker->flag);
+ if (NULL == sync_fence) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync_fence creation failed\n"));
+ mali_sync_flag_put(sync_fence_tracker->flag);
+ _mali_osk_free(sync_fence_tracker);
+ return NULL;
+ }
+
+ /* Setup fence for tracker. */
+ _mali_osk_memset(&fence, 0, sizeof(struct mali_timeline_fence));
+ fence.sync_fd = -1;
+ fence.points[timeline->id] = point;
+
+ /* Finally, add the tracker to Timeline system. */
+ mali_timeline_tracker_init(&sync_fence_tracker->tracker, MALI_TIMELINE_TRACKER_SYNC, &fence, sync_fence_tracker);
+ point = mali_timeline_system_add_tracker(timeline->system, &sync_fence_tracker->tracker, MALI_TIMELINE_NONE);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);
+
+ return sync_fence;
+}
+
+s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence)
+{
+ u32 i;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_fence *sync_fence_acc = NULL;
+#else
+ struct mali_internal_sync_fence *sync_fence_acc = NULL;
+#endif
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_fence *sync_fence;
+#else
+ struct mali_internal_sync_fence *sync_fence;
+#endif
+ if (MALI_TIMELINE_NO_POINT == fence->points[i]) continue;
+
+ timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ sync_fence = mali_timeline_sync_fence_create_and_add_tracker(timeline, fence->points[i]);
+ if (NULL == sync_fence) goto error;
+
+ if (NULL != sync_fence_acc) {
+ /* Merge sync fences. */
+ sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
+ if (NULL == sync_fence_acc) goto error;
+ } else {
+ /* This was the first sync fence created. */
+ sync_fence_acc = sync_fence;
+ }
+ }
+
+ if (-1 != fence->sync_fd) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_fence *sync_fence;
+ sync_fence = sync_fence_fdget(fence->sync_fd);
+#else
+ struct mali_internal_sync_fence *sync_fence;
+ sync_fence = mali_internal_sync_fence_fdget(fence->sync_fd);
+#endif
+
+ if (NULL == sync_fence) goto error;
+
+ if (NULL != sync_fence_acc) {
+ sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
+ if (NULL == sync_fence_acc) goto error;
+ } else {
+ sync_fence_acc = sync_fence;
+ }
+ }
+
+ if (NULL == sync_fence_acc) {
+ MALI_DEBUG_ASSERT_POINTER(system->signaled_sync_tl);
+
+ /* There was nothing to wait on, so return an already signaled fence. */
+
+ sync_fence_acc = mali_sync_timeline_create_signaled_fence(system->signaled_sync_tl);
+ if (NULL == sync_fence_acc) goto error;
+ }
+
+ /* Return file descriptor for the accumulated sync fence. */
+ return mali_sync_fence_fd_alloc(sync_fence_acc);
+
+error:
+ if (NULL != sync_fence_acc) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ sync_fence_put(sync_fence_acc);
+#else
+ fput(sync_fence_acc->file);
+#endif
+ }
+
+ return -1;
+}
+
+void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker);
+ MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker->flag);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for sync fence tracker\n"));
+
+ /* Signal flag and release reference. */
+ mali_sync_flag_signal(sync_fence_tracker->flag, 0);
+ mali_sync_flag_put(sync_fence_tracker->flag);
+
+ /* Nothing can wait on this tracker, so nothing to schedule after release. */
+ schedule_mask = mali_timeline_tracker_release(&sync_fence_tracker->tracker);
+ MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);
+
+ _mali_osk_free(sync_fence_tracker);
+}
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
diff --git a/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.h b/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.h
new file mode 100644
index 000000000000..390647b80e2c
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_timeline_sync_fence.h
+ *
+ * This file contains code related to creating sync fences from timeline fences.
+ */
+
+#ifndef __MALI_TIMELINE_SYNC_FENCE_H__
+#define __MALI_TIMELINE_SYNC_FENCE_H__
+
+#include "mali_timeline.h"
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+
+/**
+ * Sync fence tracker.
+ */
+struct mali_timeline_sync_fence_tracker {
+ struct mali_sync_flag *flag; /**< Sync flag used to connect tracker and sync fence. */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker. */
+};
+
+/**
+ * Create a sync fence that will be signaled when @ref fence is signaled.
+ *
+ * @param system Timeline system.
+ * @param fence Fence to create sync fence from.
+ * @return File descriptor for new sync fence, or -1 on error.
+ */
+s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence);
+
+/**
+ * Used by the Timeline system to activate a sync fence tracker.
+ *
+ * @param sync_fence_tracker Sync fence tracker.
+ *
+ */
+void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker);
+
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+#endif /* __MALI_TIMELINE_SYNC_FENCE_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_ukk.h b/drivers/gpu/arm/utgard/common/mali_ukk.h
new file mode 100644
index 000000000000..55a05c50436a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_ukk.h
@@ -0,0 +1,551 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ * - The Device Driver has implemented the _mali_ukk set of functions
+ * - The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ * - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ * return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ * void *ctx;
+ * u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ * function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ * interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems, which would
+ * not otherwise get called on RTOS systems.
+ * - For example, a U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * - Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ * - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ * - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ * - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ * - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ * meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ * - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ * - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open(void **context);
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close(void **context);
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args);
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args);
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * This function is obsolete, but kept to allow old, incompatible user space
+ * clients to robustly detect the incompatibility.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args);
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_v2_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args);
+
+/** @brief Get the user space settings applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args);
+
+/** @brief Get a user space setting applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_setting_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args);
+
+/* @brief Grant or deny high priority scheduling for this session.
+ *
+ * @param args see _mali_uk_request_high_priority_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args);
+
+/** @brief Make process sleep if the pending big job in kernel >= MALI_MAX_PENDING_BIG_JOB
+ *
+ */
+_mali_osk_errcode_t _mali_ukk_pending_submit(_mali_uk_pending_submit_s *args);
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap(_mali_uk_mem_mmap_s *args);
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap(_mali_uk_mem_munmap_s *args);
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args);
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args);
+
+/** @brief Write user data to specified Mali memory without causing segfaults.
+ * @param args see _mali_uk_mem_write_safe_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args);
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs);
+
+/**
+ * @brief Issue a request to start new jobs on both Vertex Processor and Fragment Processor.
+ *
+ * @note Will call into @ref _mali_ukk_pp_start_job and @ref _mali_ukk_gp_start_job.
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_and_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs);
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args);
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args);
+
+/** @brief Disable Write-back unit(s) on specified job
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ */
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args);
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs);
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args);
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args);
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args);
+
+/** @} */ /* end group _mali_uk_gp */
+
+#if defined(CONFIG_MALI400_PROFILING)
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Get profiling stream fd.
+ *
+ * @param args see _mali_uk_profiling_stream_fd_get_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_stream_fd_get(_mali_uk_profiling_stream_fd_get_s *args);
+
+/** @brief Profiling control set.
+ *
+ * @param args see _mali_uk_profiling_control_set_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_control_set(_mali_uk_profiling_control_set_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @addtogroup _mali_sw_counters_report U/K Software counter reporting
+ * @{ */
+
+/** @brief Report software counters.
+ *
+ * @param args see _mali_uk_sw_counters_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args);
+
+/** @} */ /* end group _mali_sw_counters_report */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+u32 _mali_ukk_report_memory_usage(void);
+
+u32 _mali_ukk_report_total_memory_size(void);
+
+u32 _mali_ukk_utilization_gp_pp(void);
+
+u32 _mali_ukk_utilization_gp(void);
+
+u32 _mali_ukk_utilization_pp(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_user_settings_db.c b/drivers/gpu/arm/utgard/common/mali_user_settings_db.c
new file mode 100644
index 000000000000..1911eff87a72
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_user_settings_db.c
@@ -0,0 +1,147 @@
+/**
+ * Copyright (C) 2012-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_user_settings_db.h"
+#include "mali_session.h"
+
+static u32 mali_user_settings[_MALI_UK_USER_SETTING_MAX];
+const char *_mali_uk_user_setting_descriptions[] = _MALI_UK_USER_SETTING_DESCRIPTIONS;
+
+static void mali_user_settings_notify(_mali_uk_user_setting_t setting, u32 value)
+{
+ mali_bool done = MALI_FALSE;
+
+ /*
+ * This function gets a bit complicated because we can't hold the session lock while
+ * allocating notification objects.
+ */
+
+ while (!done) {
+ u32 i;
+ u32 num_sessions_alloc;
+ u32 num_sessions_with_lock;
+ u32 used_notification_objects = 0;
+ _mali_osk_notification_t **notobjs;
+
+ /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+ num_sessions_alloc = mali_session_get_count();
+ if (0 == num_sessions_alloc) {
+ /* No sessions to report to */
+ return;
+ }
+
+ notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+ if (NULL == notobjs) {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+ return;
+ }
+
+ for (i = 0; i < num_sessions_alloc; i++) {
+ notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_SETTINGS_CHANGED,
+ sizeof(_mali_uk_settings_changed_s));
+ if (NULL != notobjs[i]) {
+ _mali_uk_settings_changed_s *data;
+ data = notobjs[i]->result_buffer;
+
+ data->setting = setting;
+ data->value = value;
+ } else {
+ MALI_PRINT_ERROR(("Failed to notify user space session about setting change (alloc failure %u)\n", i));
+ }
+ }
+
+ mali_session_lock();
+
+ /* number of sessions will not change while we hold the lock */
+ num_sessions_with_lock = mali_session_get_count();
+
+ if (num_sessions_alloc >= num_sessions_with_lock) {
+ /* We have allocated enough notification objects for all the sessions atm */
+ struct mali_session_data *session, *tmp;
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+ if (NULL != notobjs[used_notification_objects]) {
+ mali_session_send_notification(session, notobjs[used_notification_objects]);
+ notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+ }
+ used_notification_objects++;
+ }
+ done = MALI_TRUE;
+ }
+
+ mali_session_unlock();
+
+ /* Delete any remaining/unused notification objects */
+ for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+ if (NULL != notobjs[used_notification_objects]) {
+ _mali_osk_notification_delete(notobjs[used_notification_objects]);
+ }
+ }
+
+ _mali_osk_free(notobjs);
+ }
+}
+
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value)
+{
+ mali_bool notify = MALI_FALSE;
+
+ if (setting >= _MALI_UK_USER_SETTING_MAX) {
+ MALI_DEBUG_PRINT_ERROR(("Invalid user setting %ud\n"));
+ return;
+ }
+
+ if (mali_user_settings[setting] != value) {
+ notify = MALI_TRUE;
+ }
+
+ mali_user_settings[setting] = value;
+
+ if (notify) {
+ mali_user_settings_notify(setting, value);
+ }
+}
+
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting)
+{
+ if (setting >= _MALI_UK_USER_SETTING_MAX) {
+ return 0;
+ }
+
+ return mali_user_settings[setting];
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args)
+{
+ _mali_uk_user_setting_t setting;
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ setting = args->setting;
+
+ if (_MALI_UK_USER_SETTING_MAX > setting) {
+ args->value = mali_user_settings[setting];
+ return _MALI_OSK_ERR_OK;
+ } else {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ _mali_osk_memcpy(args->settings, mali_user_settings, sizeof(mali_user_settings));
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_user_settings_db.h b/drivers/gpu/arm/utgard/common/mali_user_settings_db.h
new file mode 100644
index 000000000000..da9c0630e371
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_user_settings_db.h
@@ -0,0 +1,39 @@
+/**
+ * Copyright (C) 2012-2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_USER_SETTINGS_DB_H__
+#define __MALI_USER_SETTINGS_DB_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_uk_types.h"
+
+/** @brief Set Mali user setting in DB
+ *
+ * Update the DB with a new value for \a setting. If the value is different from theprevious set value running sessions will be notified of the change.
+ *
+ * @param setting the setting to be changed
+ * @param value the new value to set
+ */
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value);
+
+/** @brief Get current Mali user setting value from DB
+ *
+ * @param setting the setting to extract
+ * @return the value of the selected setting
+ */
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __MALI_KERNEL_USER_SETTING__ */
diff --git a/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard.h b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard.h
new file mode 100644
index 000000000000..7df55c951d6f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard.h
@@ -0,0 +1,526 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_utgard.h
+ * Defines types and interface exposed by the Mali Utgard device driver
+ */
+
+#ifndef __MALI_UTGARD_H__
+#define __MALI_UTGARD_H__
+
+#include "mali_osk_types.h"
+#ifdef CONFIG_MALI_DEVFREQ
+#include <linux/devfreq.h>
+#include "mali_pm_metrics.h"
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <linux/devfreq_cooling.h>
+#endif
+#endif
+
+#define MALI_GPU_NAME_UTGARD "mali-utgard"
+
+
+#define MALI_OFFSET_GP 0x00000
+#define MALI_OFFSET_GP_MMU 0x03000
+
+#define MALI_OFFSET_PP0 0x08000
+#define MALI_OFFSET_PP0_MMU 0x04000
+#define MALI_OFFSET_PP1 0x0A000
+#define MALI_OFFSET_PP1_MMU 0x05000
+#define MALI_OFFSET_PP2 0x0C000
+#define MALI_OFFSET_PP2_MMU 0x06000
+#define MALI_OFFSET_PP3 0x0E000
+#define MALI_OFFSET_PP3_MMU 0x07000
+
+#define MALI_OFFSET_PP4 0x28000
+#define MALI_OFFSET_PP4_MMU 0x1C000
+#define MALI_OFFSET_PP5 0x2A000
+#define MALI_OFFSET_PP5_MMU 0x1D000
+#define MALI_OFFSET_PP6 0x2C000
+#define MALI_OFFSET_PP6_MMU 0x1E000
+#define MALI_OFFSET_PP7 0x2E000
+#define MALI_OFFSET_PP7_MMU 0x1F000
+
+#define MALI_OFFSET_L2_RESOURCE0 0x01000
+#define MALI_OFFSET_L2_RESOURCE1 0x10000
+#define MALI_OFFSET_L2_RESOURCE2 0x11000
+
+#define MALI400_OFFSET_L2_CACHE0 MALI_OFFSET_L2_RESOURCE0
+#define MALI450_OFFSET_L2_CACHE0 MALI_OFFSET_L2_RESOURCE1
+#define MALI450_OFFSET_L2_CACHE1 MALI_OFFSET_L2_RESOURCE0
+#define MALI450_OFFSET_L2_CACHE2 MALI_OFFSET_L2_RESOURCE2
+#define MALI470_OFFSET_L2_CACHE1 MALI_OFFSET_L2_RESOURCE0
+
+#define MALI_OFFSET_BCAST 0x13000
+#define MALI_OFFSET_DLBU 0x14000
+
+#define MALI_OFFSET_PP_BCAST 0x16000
+#define MALI_OFFSET_PP_BCAST_MMU 0x15000
+
+#define MALI_OFFSET_PMU 0x02000
+#define MALI_OFFSET_DMA 0x12000
+
+/* Mali-300 */
+
+#define MALI_GPU_RESOURCES_MALI300(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI300_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+/* Mali-400 */
+
+#define MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+ /* Mali-450 */
+#define MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
+
+#define MALI_GPU_RESOURCES_MALI450_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI450_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
+
+#define MALI_GPU_RESOURCES_MALI450_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP4, pp3_irq, base_addr + MALI_OFFSET_PP4_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP5, pp4_irq, base_addr + MALI_OFFSET_PP5_MMU, pp4_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP6, pp5_irq, base_addr + MALI_OFFSET_PP6_MMU, pp5_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
+
+#define MALI_GPU_RESOURCES_MALI450_MP6_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP4, pp4_irq, base_addr + MALI_OFFSET_PP4_MMU, pp4_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP5, pp5_irq, base_addr + MALI_OFFSET_PP5_MMU, pp5_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + MALI_OFFSET_PP6, pp6_irq, base_addr + MALI_OFFSET_PP6_MMU, pp6_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + MALI_OFFSET_PP7, pp7_irq, base_addr + MALI_OFFSET_PP7_MMU, pp7_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
+
+#define MALI_GPU_RESOURCES_MALI450_MP8_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+ /* Mali - 470 */
+#define MALI_GPU_RESOURCES_MALI470_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI470_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI470_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI470_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI470_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI470_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI470_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI470_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCE_L2(addr) \
+ { \
+ .name = "Mali_L2", \
+ .flags = IORESOURCE_MEM, \
+ .start = addr, \
+ .end = addr + 0x200, \
+ },
+
+#define MALI_GPU_RESOURCE_GP(gp_addr, gp_irq) \
+ { \
+ .name = "Mali_GP", \
+ .flags = IORESOURCE_MEM, \
+ .start = gp_addr, \
+ .end = gp_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_GP_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = gp_irq, \
+ .end = gp_irq, \
+ }, \
+
+#define MALI_GPU_RESOURCE_GP_WITH_MMU(gp_addr, gp_irq, gp_mmu_addr, gp_mmu_irq) \
+ { \
+ .name = "Mali_GP", \
+ .flags = IORESOURCE_MEM, \
+ .start = gp_addr, \
+ .end = gp_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_GP_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = gp_irq, \
+ .end = gp_irq, \
+ }, \
+ { \
+ .name = "Mali_GP_MMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = gp_mmu_addr, \
+ .end = gp_mmu_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_GP_MMU_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = gp_mmu_irq, \
+ .end = gp_mmu_irq, \
+ },
+
+#define MALI_GPU_RESOURCE_PP(pp_addr, pp_irq) \
+ { \
+ .name = "Mali_PP", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_addr, \
+ .end = pp_addr + 0x1100, \
+ }, \
+ { \
+ .name = "Mali_PP_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_irq, \
+ .end = pp_irq, \
+ }, \
+
+#define MALI_GPU_RESOURCE_PP_WITH_MMU(id, pp_addr, pp_irq, pp_mmu_addr, pp_mmu_irq) \
+ { \
+ .name = "Mali_PP" #id, \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_addr, \
+ .end = pp_addr + 0x1100, \
+ }, \
+ { \
+ .name = "Mali_PP" #id "_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_irq, \
+ .end = pp_irq, \
+ }, \
+ { \
+ .name = "Mali_PP" #id "_MMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_mmu_addr, \
+ .end = pp_mmu_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_PP" #id "_MMU_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_mmu_irq, \
+ .end = pp_mmu_irq, \
+ },
+
+#define MALI_GPU_RESOURCE_MMU(mmu_addr, mmu_irq) \
+ { \
+ .name = "Mali_MMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = mmu_addr, \
+ .end = mmu_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_MMU_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = mmu_irq, \
+ .end = mmu_irq, \
+ },
+
+#define MALI_GPU_RESOURCE_PMU(pmu_addr) \
+ { \
+ .name = "Mali_PMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = pmu_addr, \
+ .end = pmu_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_DMA(dma_addr) \
+ { \
+ .name = "Mali_DMA", \
+ .flags = IORESOURCE_MEM, \
+ .start = dma_addr, \
+ .end = dma_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_DLBU(dlbu_addr) \
+ { \
+ .name = "Mali_DLBU", \
+ .flags = IORESOURCE_MEM, \
+ .start = dlbu_addr, \
+ .end = dlbu_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_BCAST(bcast_addr) \
+ { \
+ .name = "Mali_Broadcast", \
+ .flags = IORESOURCE_MEM, \
+ .start = bcast_addr, \
+ .end = bcast_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_PP_BCAST(pp_addr, pp_irq) \
+ { \
+ .name = "Mali_PP_Broadcast", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_addr, \
+ .end = pp_addr + 0x1100, \
+ }, \
+ { \
+ .name = "Mali_PP_Broadcast_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_irq, \
+ .end = pp_irq, \
+ }, \
+
+#define MALI_GPU_RESOURCE_PP_MMU_BCAST(pp_mmu_bcast_addr) \
+ { \
+ .name = "Mali_PP_MMU_Broadcast", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_mmu_bcast_addr, \
+ .end = pp_mmu_bcast_addr + 0x100, \
+ },
+
+ struct mali_gpu_utilization_data {
+ unsigned int utilization_gpu; /* Utilization for GP and all PP cores combined, 0 = no utilization, 256 = full utilization */
+ unsigned int utilization_gp; /* Utilization for GP core only, 0 = no utilization, 256 = full utilization */
+ unsigned int utilization_pp; /* Utilization for all PP cores combined, 0 = no utilization, 256 = full utilization */
+ };
+
+ struct mali_gpu_clk_item {
+ unsigned int clock; /* unit(MHz) */
+ unsigned int vol;
+ };
+
+ struct mali_gpu_clock {
+ struct mali_gpu_clk_item *item;
+ unsigned int num_of_steps;
+ };
+
+ struct mali_gpu_device_data {
+ /* Shared GPU memory */
+ unsigned long shared_mem_size;
+
+ /*
+ * Mali PMU switch delay.
+ * Only needed if the power gates are connected to the PMU in a high fanout
+ * network. This value is the number of Mali clock cycles it takes to
+ * enable the power gates and turn on the power mesh.
+ * This value will have no effect if a daisy chain implementation is used.
+ */
+ u32 pmu_switch_delay;
+
+ /* Mali Dynamic power domain configuration in sequence from 0-11
+ * GP PP0 PP1 PP2 PP3 PP4 PP5 PP6 PP7, L2$0 L2$1 L2$2
+ */
+ u16 pmu_domain_config[12];
+
+ /* Dedicated GPU memory range (physical). */
+ unsigned long dedicated_mem_start;
+ unsigned long dedicated_mem_size;
+
+ /* Frame buffer memory to be accessible by Mali GPU (physical) */
+ unsigned long fb_start;
+ unsigned long fb_size;
+
+ /* Max runtime [ms] for jobs */
+ int max_job_runtime;
+
+ /* Report GPU utilization and related control in this interval (specified in ms) */
+ unsigned long control_interval;
+
+ /* Function that will receive periodic GPU utilization numbers */
+ void (*utilization_callback)(struct mali_gpu_utilization_data *data);
+
+ /* Fuction that platform callback for freq setting, needed when CONFIG_MALI_DVFS enabled */
+ int (*set_freq)(int setting_clock_step);
+ /* Function that platfrom report it's clock info which driver can set, needed when CONFIG_MALI_DVFS enabled */
+ void (*get_clock_info)(struct mali_gpu_clock **data);
+ /* Function that get the current clock info, needed when CONFIG_MALI_DVFS enabled */
+ int (*get_freq)(void);
+ /* Function that init the mali gpu secure mode */
+ int (*secure_mode_init)(void);
+ /* Function that deinit the mali gpu secure mode */
+ void (*secure_mode_deinit)(void);
+ /* Function that reset GPU and enable gpu secure mode */
+ int (*gpu_reset_and_secure_mode_enable)(void);
+ /* Function that Reset GPU and disable gpu secure mode */
+ int (*gpu_reset_and_secure_mode_disable)(void);
+ /* ipa related interface customer need register */
+#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
+ struct devfreq_cooling_power *gpu_cooling_ops;
+#endif
+ };
+
+ /**
+ * Pause the scheduling and power state changes of Mali device driver.
+ * mali_dev_resume() must always be called as soon as possible after this function
+ * in order to resume normal operation of the Mali driver.
+ */
+ void mali_dev_pause(void);
+
+ /**
+ * Resume scheduling and allow power changes in Mali device driver.
+ * This must always be called after mali_dev_pause().
+ */
+ void mali_dev_resume(void);
+
+ /** @brief Set the desired number of PP cores to use.
+ *
+ * The internal Mali PMU will be used, if present, to physically power off the PP cores.
+ *
+ * @param num_cores The number of desired cores
+ * @return 0 on success, otherwise error. -EINVAL means an invalid number of cores was specified.
+ */
+ int mali_perf_set_num_pp_cores(unsigned int num_cores);
+
+#endif
diff --git a/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_ioctl.h b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_ioctl.h
new file mode 100644
index 000000000000..70729e9c6e0a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_ioctl.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_UTGARD_IOCTL_H__
+#define __MALI_UTGARD_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h> /* file system operations */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file mali_kernel_ioctl.h
+ * Interface to the Linux device driver.
+ * This file describes the interface needed to use the Linux device driver.
+ * Its interface is designed to used by the HAL implementation through a thin arch layer.
+ */
+
+/**
+ * ioctl commands
+ */
+
+#define MALI_IOC_BASE 0x82
+#define MALI_IOC_CORE_BASE (_MALI_UK_CORE_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_MEMORY_BASE (_MALI_UK_MEMORY_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PP_BASE (_MALI_UK_PP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_GP_BASE (_MALI_UK_GP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_VSYNC_BASE (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
+
+#define MALI_IOC_WAIT_FOR_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s)
+#define MALI_IOC_GET_API_VERSION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, u32)
+#define MALI_IOC_GET_API_VERSION_V2 _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_v2_s)
+#define MALI_IOC_POST_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s)
+#define MALI_IOC_GET_USER_SETTING _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s)
+#define MALI_IOC_GET_USER_SETTINGS _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s)
+#define MALI_IOC_REQUEST_HIGH_PRIORITY _IOW (MALI_IOC_CORE_BASE, _MALI_UK_REQUEST_HIGH_PRIORITY, _mali_uk_request_high_priority_s)
+#define MALI_IOC_TIMELINE_GET_LATEST_POINT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_GET_LATEST_POINT, _mali_uk_timeline_get_latest_point_s)
+#define MALI_IOC_TIMELINE_WAIT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_WAIT, _mali_uk_timeline_wait_s)
+#define MALI_IOC_TIMELINE_CREATE_SYNC_FENCE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, _mali_uk_timeline_create_sync_fence_s)
+#define MALI_IOC_SOFT_JOB_START _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_START, _mali_uk_soft_job_start_s)
+#define MALI_IOC_SOFT_JOB_SIGNAL _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_SIGNAL, _mali_uk_soft_job_signal_s)
+#define MALI_IOC_PENDING_SUBMIT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_PENDING_SUBMIT, _mali_uk_pending_submit_s)
+
+#define MALI_IOC_MEM_ALLOC _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ALLOC_MEM, _mali_uk_alloc_mem_s)
+#define MALI_IOC_MEM_FREE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_MEM, _mali_uk_free_mem_s)
+#define MALI_IOC_MEM_BIND _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_BIND_MEM, _mali_uk_bind_mem_s)
+#define MALI_IOC_MEM_UNBIND _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_UNBIND_MEM, _mali_uk_unbind_mem_s)
+#define MALI_IOC_MEM_COW _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_COW_MEM, _mali_uk_cow_mem_s)
+#define MALI_IOC_MEM_COW_MODIFY_RANGE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_COW_MODIFY_RANGE, _mali_uk_cow_modify_range_s)
+#define MALI_IOC_MEM_RESIZE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_RESIZE_MEM, _mali_uk_mem_resize_s)
+#define MALI_IOC_MEM_DMA_BUF_GET_SIZE _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s)
+#define MALI_IOC_MEM_WRITE_SAFE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MEM_WRITE_SAFE, _mali_uk_mem_write_safe_s)
+
+#define MALI_IOC_PP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s)
+#define MALI_IOC_PP_AND_GP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_AND_GP_START_JOB, _mali_uk_pp_and_gp_start_job_s)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s)
+#define MALI_IOC_PP_CORE_VERSION_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s)
+#define MALI_IOC_PP_DISABLE_WB _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s)
+
+#define MALI_IOC_GP2_START_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s)
+#define MALI_IOC_GP2_CORE_VERSION_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s)
+
+#define MALI_IOC_PROFILING_ADD_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s)
+#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s)
+#define MALI_IOC_PROFILING_MEMORY_USAGE_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_MEMORY_USAGE_GET, _mali_uk_profiling_memory_usage_get_s)
+#define MALI_IOC_PROFILING_STREAM_FD_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STREAM_FD_GET, _mali_uk_profiling_stream_fd_get_s)
+#define MALI_IOC_PROILING_CONTROL_SET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CONTROL_SET, _mali_uk_profiling_control_set_s)
+
+#define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_IOCTL_H__ */
diff --git a/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_events.h b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_events.h
new file mode 100644
index 000000000000..17d31de931d0
--- /dev/null
+++ b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_events.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI_UTGARD_PROFILING_EVENTS_H_
+#define _MALI_UTGARD_PROFILING_EVENTS_H_
+
+/*
+ * The event ID is a 32 bit value consisting of different fields
+ * reserved, 4 bits, for future use
+ * event type, 4 bits, cinstr_profiling_event_type_t
+ * event channel, 8 bits, the source of the event.
+ * event data, 16 bit field, data depending on event type
+ */
+
+/**
+ * Specifies what kind of event this is
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_TYPE_SINGLE = 0 << 24,
+ MALI_PROFILING_EVENT_TYPE_START = 1 << 24,
+ MALI_PROFILING_EVENT_TYPE_STOP = 2 << 24,
+ MALI_PROFILING_EVENT_TYPE_SUSPEND = 3 << 24,
+ MALI_PROFILING_EVENT_TYPE_RESUME = 4 << 24,
+} cinstr_profiling_event_type_t;
+
+
+/**
+ * Secifies the channel/source of the event
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE = 0 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_GP0 = 1 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP0 = 5 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP1 = 6 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP2 = 7 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP3 = 8 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP4 = 9 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP5 = 10 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP6 = 11 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP7 = 12 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_GPU = 21 << 16,
+} cinstr_profiling_event_channel_t;
+
+
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(num) (((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) + (num)) << 16)
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(num) (((MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) + (num)) << 16)
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from software channel
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_NEW_FRAME = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_FLUSH = 2,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SWAP_BUFFERS = 3,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_FB_EVENT = 4,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE = 5,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE = 6,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_READBACK = 7,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_WRITEBACK = 8,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_ENTER_API_FUNC = 10,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC = 11,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_DISCARD_ATTACHMENTS = 13,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_TRY_LOCK = 53,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_LOCK = 54,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_UNLOCK = 55,
+ MALI_PROFILING_EVENT_REASON_SINGLE_LOCK_CONTENDED = 56,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_MALI_FENCE_DUP = 57,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SET_PP_JOB_FENCE = 58,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_WAIT_SYNC = 59,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_FENCE_SYNC = 60,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_NATIVE_FENCE_SYNC = 61,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FENCE_FLUSH = 62,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FLUSH_SERVER_WAITS = 63,
+} cinstr_profiling_event_reason_single_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ * to inform whether the core is physical or virtual
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL = 0,
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL = 1,
+} cinstr_profiling_event_reason_start_stop_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ */
+typedef enum {
+ /*MALI_PROFILING_EVENT_REASON_START_STOP_SW_NONE = 0,*/
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_MALI = 1,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_CALLBACK_THREAD = 2,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_WORKER_THREAD = 3,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF = 4,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF = 5,
+} cinstr_profiling_event_reason_start_stop_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SUSPEND/RESUME is used from software channel
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_NONE = 0, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PIPELINE_FULL = 1, /* NOT used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC = 26, /* used in some build configurations */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_WAIT = 27, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_SYNC = 28, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_FILTER_CLEANUP = 29, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_TEXTURE = 30, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_MIPLEVEL = 31, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_READPIXELS = 32, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SWAP_IMMEDIATE = 33, /* NOT used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_QUEUE_BUFFER = 34, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_DEQUEUE_BUFFER = 35, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_UMP_LOCK = 36, /* Not currently used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_GLOBAL_LOCK = 37, /* Not currently used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_SWAP = 38, /* Not currently used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_MALI_EGL_IMAGE_SYNC_WAIT = 39, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GP_JOB_HANDLING = 40, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PP_JOB_HANDLING = 41, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_MERGE = 42, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_DUP = 43,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_FLUSH_SERVER_WAITS = 44,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SYNC = 45, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_JOBS_WAIT = 46, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOFRAMES_WAIT = 47, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOJOBS_WAIT = 48, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_SUBMIT_LIMITER_WAIT = 49, /* USED */
+} cinstr_profiling_event_reason_suspend_resume_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from a HW channel (GPx+PPx)
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH = 2,
+} cinstr_profiling_event_reason_single_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from the GPU channel
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS = 2,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS = 3,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS = 4,
+} cinstr_profiling_event_reason_single_gpu_t;
+
+/**
+ * These values are applicable for the 3rd data parameter when
+ * the type MALI_PROFILING_EVENT_TYPE_START is used from the software channel
+ * with the MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF reason.
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_DATA_CORE_GP0 = 1,
+ MALI_PROFILING_EVENT_DATA_CORE_PP0 = 5,
+ MALI_PROFILING_EVENT_DATA_CORE_PP1 = 6,
+ MALI_PROFILING_EVENT_DATA_CORE_PP2 = 7,
+ MALI_PROFILING_EVENT_DATA_CORE_PP3 = 8,
+ MALI_PROFILING_EVENT_DATA_CORE_PP4 = 9,
+ MALI_PROFILING_EVENT_DATA_CORE_PP5 = 10,
+ MALI_PROFILING_EVENT_DATA_CORE_PP6 = 11,
+ MALI_PROFILING_EVENT_DATA_CORE_PP7 = 12,
+ MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU = 22, /* GP0 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU = 26, /* PP0 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP1_MMU = 27, /* PP1 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP2_MMU = 28, /* PP2 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP3_MMU = 29, /* PP3 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP4_MMU = 30, /* PP4 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP5_MMU = 31, /* PP5 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP6_MMU = 32, /* PP6 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP7_MMU = 33, /* PP7 + 21 */
+
+} cinstr_profiling_event_data_core_t;
+
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU + (num))
+
+
+#endif /*_MALI_UTGARD_PROFILING_EVENTS_H_*/
diff --git a/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_gator_api.h b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_gator_api.h
new file mode 100644
index 000000000000..c1927d1450dc
--- /dev/null
+++ b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_gator_api.h
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2013, 2015-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_UTGARD_PROFILING_GATOR_API_H__
+#define __MALI_UTGARD_PROFILING_GATOR_API_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MALI_PROFILING_API_VERSION 4
+
+#define MAX_NUM_L2_CACHE_CORES 3
+#define MAX_NUM_FP_CORES 8
+#define MAX_NUM_VP_CORES 1
+
+#define _MALI_SPCIAL_COUNTER_DESCRIPTIONS \
+ { \
+ "Filmstrip_cnt0", \
+ "Frequency", \
+ "Voltage", \
+ "vertex", \
+ "fragment", \
+ "Total_alloc_pages", \
+ };
+
+#define _MALI_MEM_COUTNER_DESCRIPTIONS \
+ { \
+ "untyped_memory", \
+ "vertex_index_buffer", \
+ "texture_buffer", \
+ "varying_buffer", \
+ "render_target", \
+ "pbuffer_buffer", \
+ "plbu_heap", \
+ "pointer_array_buffer", \
+ "slave_tilelist", \
+ "untyped_gp_cmdlist", \
+ "polygon_cmdlist", \
+ "texture_descriptor", \
+ "render_state_word", \
+ "shader", \
+ "stream_buffer", \
+ "fragment_stack", \
+ "uniform", \
+ "untyped_frame_pool", \
+ "untyped_surface", \
+ };
+
+/** The list of events supported by the Mali DDK. */
+typedef enum {
+ /* Vertex processor activity */
+ ACTIVITY_VP_0 = 0,
+
+ /* Fragment processor activity */
+ ACTIVITY_FP_0,
+ ACTIVITY_FP_1,
+ ACTIVITY_FP_2,
+ ACTIVITY_FP_3,
+ ACTIVITY_FP_4,
+ ACTIVITY_FP_5,
+ ACTIVITY_FP_6,
+ ACTIVITY_FP_7,
+
+ /* L2 cache counters */
+ COUNTER_L2_0_C0,
+ COUNTER_L2_0_C1,
+ COUNTER_L2_1_C0,
+ COUNTER_L2_1_C1,
+ COUNTER_L2_2_C0,
+ COUNTER_L2_2_C1,
+
+ /* Vertex processor counters */
+ COUNTER_VP_0_C0,
+ COUNTER_VP_0_C1,
+
+ /* Fragment processor counters */
+ COUNTER_FP_0_C0,
+ COUNTER_FP_0_C1,
+ COUNTER_FP_1_C0,
+ COUNTER_FP_1_C1,
+ COUNTER_FP_2_C0,
+ COUNTER_FP_2_C1,
+ COUNTER_FP_3_C0,
+ COUNTER_FP_3_C1,
+ COUNTER_FP_4_C0,
+ COUNTER_FP_4_C1,
+ COUNTER_FP_5_C0,
+ COUNTER_FP_5_C1,
+ COUNTER_FP_6_C0,
+ COUNTER_FP_6_C1,
+ COUNTER_FP_7_C0,
+ COUNTER_FP_7_C1,
+
+ /*
+ * If more hardware counters are added, the _mali_osk_hw_counter_table
+ * below should also be updated.
+ */
+
+ /* EGL software counters */
+ COUNTER_EGL_BLIT_TIME,
+
+ /* GLES software counters */
+ COUNTER_GLES_DRAW_ELEMENTS_CALLS,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_ARRAYS_CALLS,
+ COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_POINTS,
+ COUNTER_GLES_DRAW_LINES,
+ COUNTER_GLES_DRAW_LINE_LOOP,
+ COUNTER_GLES_DRAW_LINE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLES,
+ COUNTER_GLES_DRAW_TRIANGLE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLE_FAN,
+ COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
+ COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
+ COUNTER_GLES_UPLOAD_TEXTURE_TIME,
+ COUNTER_GLES_UPLOAD_VBO_TIME,
+ COUNTER_GLES_NUM_FLUSHES,
+ COUNTER_GLES_NUM_VSHADERS_GENERATED,
+ COUNTER_GLES_NUM_FSHADERS_GENERATED,
+ COUNTER_GLES_VSHADER_GEN_TIME,
+ COUNTER_GLES_FSHADER_GEN_TIME,
+ COUNTER_GLES_INPUT_TRIANGLES,
+ COUNTER_GLES_VXCACHE_HIT,
+ COUNTER_GLES_VXCACHE_MISS,
+ COUNTER_GLES_VXCACHE_COLLISION,
+ COUNTER_GLES_CULLED_TRIANGLES,
+ COUNTER_GLES_CULLED_LINES,
+ COUNTER_GLES_BACKFACE_TRIANGLES,
+ COUNTER_GLES_GBCLIP_TRIANGLES,
+ COUNTER_GLES_GBCLIP_LINES,
+ COUNTER_GLES_TRIANGLES_DRAWN,
+ COUNTER_GLES_DRAWCALL_TIME,
+ COUNTER_GLES_TRIANGLES_COUNT,
+ COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
+ COUNTER_GLES_STRIP_TRIANGLES_COUNT,
+ COUNTER_GLES_FAN_TRIANGLES_COUNT,
+ COUNTER_GLES_LINES_COUNT,
+ COUNTER_GLES_INDEPENDENT_LINES_COUNT,
+ COUNTER_GLES_STRIP_LINES_COUNT,
+ COUNTER_GLES_LOOP_LINES_COUNT,
+
+ /* Special counter */
+
+ /* Framebuffer capture pseudo-counter */
+ COUNTER_FILMSTRIP,
+ COUNTER_FREQUENCY,
+ COUNTER_VOLTAGE,
+ COUNTER_VP_ACTIVITY,
+ COUNTER_FP_ACTIVITY,
+ COUNTER_TOTAL_ALLOC_PAGES,
+
+ /* Memory usage counter */
+ COUNTER_MEM_UNTYPED,
+ COUNTER_MEM_VB_IB,
+ COUNTER_MEM_TEXTURE,
+ COUNTER_MEM_VARYING,
+ COUNTER_MEM_RT,
+ COUNTER_MEM_PBUFFER,
+ /* memory usages for gp command */
+ COUNTER_MEM_PLBU_HEAP,
+ COUNTER_MEM_POINTER_ARRAY,
+ COUNTER_MEM_SLAVE_TILELIST,
+ COUNTER_MEM_UNTYPE_GP_CMDLIST,
+ /* memory usages for polygon list command */
+ COUNTER_MEM_POLYGON_CMDLIST,
+ /* memory usages for pp command */
+ COUNTER_MEM_TD,
+ COUNTER_MEM_RSW,
+ /* other memory usages */
+ COUNTER_MEM_SHADER,
+ COUNTER_MEM_STREAMS,
+ COUNTER_MEM_FRAGMENT_STACK,
+ COUNTER_MEM_UNIFORM,
+ /* Special mem usage, which is used for mem pool allocation */
+ COUNTER_MEM_UNTYPE_MEM_POOL,
+ COUNTER_MEM_UNTYPE_SURFACE,
+
+ NUMBER_OF_EVENTS
+} _mali_osk_counter_id;
+
+#define FIRST_ACTIVITY_EVENT ACTIVITY_VP_0
+#define LAST_ACTIVITY_EVENT ACTIVITY_FP_7
+
+#define FIRST_HW_COUNTER COUNTER_L2_0_C0
+#define LAST_HW_COUNTER COUNTER_FP_7_C1
+
+#define FIRST_SW_COUNTER COUNTER_EGL_BLIT_TIME
+#define LAST_SW_COUNTER COUNTER_GLES_LOOP_LINES_COUNT
+
+#define FIRST_SPECIAL_COUNTER COUNTER_FILMSTRIP
+#define LAST_SPECIAL_COUNTER COUNTER_TOTAL_ALLOC_PAGES
+
+#define FIRST_MEM_COUNTER COUNTER_MEM_UNTYPED
+#define LAST_MEM_COUNTER COUNTER_MEM_UNTYPE_SURFACE
+
+#define MALI_PROFILING_MEM_COUNTERS_NUM (LAST_MEM_COUNTER - FIRST_MEM_COUNTER + 1)
+#define MALI_PROFILING_SPECIAL_COUNTERS_NUM (LAST_SPECIAL_COUNTER - FIRST_SPECIAL_COUNTER + 1)
+#define MALI_PROFILING_SW_COUNTERS_NUM (LAST_SW_COUNTER - FIRST_SW_COUNTER + 1)
+
+/**
+ * Define the stream header type for porfiling stream.
+ */
+#define STREAM_HEADER_FRAMEBUFFER 0x05 /* The stream packet header type for framebuffer dumping. */
+#define STREAM_HEADER_COUNTER_VALUE 0x09 /* The stream packet header type for hw/sw/memory counter sampling. */
+#define STREAM_HEADER_CORE_ACTIVITY 0x0a /* The stream packet header type for activity counter sampling. */
+#define STREAM_HEADER_SIZE 5
+
+/**
+ * Define the packet header type of profiling control packet.
+ */
+#define PACKET_HEADER_ERROR 0x80 /* The response packet header type if error. */
+#define PACKET_HEADER_ACK 0x81 /* The response packet header type if OK. */
+#define PACKET_HEADER_COUNTERS_REQUEST 0x82 /* The control packet header type to request counter information from ddk. */
+#define PACKET_HEADER_COUNTERS_ACK 0x83 /* The response packet header type to send out counter information. */
+#define PACKET_HEADER_COUNTERS_ENABLE 0x84 /* The control packet header type to enable counters. */
+#define PACKET_HEADER_START_CAPTURE_VALUE 0x85 /* The control packet header type to start capture values. */
+
+#define PACKET_HEADER_SIZE 5
+
+/**
+ * Structure to pass performance counter data of a Mali core
+ */
+typedef struct _mali_profiling_core_counters {
+ u32 source0;
+ u32 value0;
+ u32 source1;
+ u32 value1;
+} _mali_profiling_core_counters;
+
+/**
+ * Structure to pass performance counter data of Mali L2 cache cores
+ */
+typedef struct _mali_profiling_l2_counter_values {
+ struct _mali_profiling_core_counters cores[MAX_NUM_L2_CACHE_CORES];
+} _mali_profiling_l2_counter_values;
+
+/**
+ * Structure to pass data defining Mali instance in use:
+ *
+ * mali_product_id - Mali product id
+ * mali_version_major - Mali version major number
+ * mali_version_minor - Mali version minor number
+ * num_of_l2_cores - number of L2 cache cores
+ * num_of_fp_cores - number of fragment processor cores
+ * num_of_vp_cores - number of vertex processor cores
+ */
+typedef struct _mali_profiling_mali_version {
+ u32 mali_product_id;
+ u32 mali_version_major;
+ u32 mali_version_minor;
+ u32 num_of_l2_cores;
+ u32 num_of_fp_cores;
+ u32 num_of_vp_cores;
+} _mali_profiling_mali_version;
+
+/**
+ * Structure to define the mali profiling counter struct.
+ */
+typedef struct mali_profiling_counter {
+ char counter_name[40];
+ u32 counter_id;
+ u32 counter_event;
+ u32 prev_counter_value;
+ u32 current_counter_value;
+ u32 key;
+ int enabled;
+} mali_profiling_counter;
+
+/*
+ * List of possible actions to be controlled by Streamline.
+ * The following numbers are used by gator to control the frame buffer dumping and s/w counter reporting.
+ * We cannot use the enums in mali_uk_types.h because they are unknown inside gator.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_COUNTER_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+#define MEM_COUNTER_ENABLE (5)
+#define ANNOTATE_PROFILING_ENABLE (6)
+
+void _mali_profiling_control(u32 action, u32 value);
+
+u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values);
+
+int _mali_profiling_set_event(u32 counter_id, s32 event_id);
+
+u32 _mali_profiling_get_api_version(void);
+
+void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_PROFILING_GATOR_API_H__ */
diff --git a/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_uk_types.h b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_uk_types.h
new file mode 100644
index 000000000000..9aa1cf4bb834
--- /dev/null
+++ b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_uk_types.h
@@ -0,0 +1,1090 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __MALI_UTGARD_UK_TYPES_H__
+#define __MALI_UTGARD_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Iteration functions depend on these values being consecutive. */
+#define MALI_UK_TIMELINE_GP 0
+#define MALI_UK_TIMELINE_PP 1
+#define MALI_UK_TIMELINE_SOFT 2
+#define MALI_UK_TIMELINE_MAX 3
+
+#define MALI_UK_BIG_VARYING_SIZE (1024*1024*2)
+
+typedef struct {
+ u32 points[MALI_UK_TIMELINE_MAX];
+ s32 sync_fd;
+} _mali_uk_fence_t;
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_uk_core U/K Core
+ * @{ */
+
+/** Definition of subsystem numbers, to assist in creating a unique identifier
+ * for each U/K call.
+ *
+ * @see _mali_uk_functions */
+typedef enum {
+ _MALI_UK_CORE_SUBSYSTEM, /**< Core Group of U/K calls */
+ _MALI_UK_MEMORY_SUBSYSTEM, /**< Memory Group of U/K calls */
+ _MALI_UK_PP_SUBSYSTEM, /**< Fragment Processor Group of U/K calls */
+ _MALI_UK_GP_SUBSYSTEM, /**< Vertex Processor Group of U/K calls */
+ _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
+ _MALI_UK_VSYNC_SUBSYSTEM, /**< VSYNC Group of U/K calls */
+} _mali_uk_subsystem_t;
+
+/** Within a function group each function has its unique sequence number
+ * to assist in creating a unique identifier for each U/K call.
+ *
+ * An ordered pair of numbers selected from
+ * ( \ref _mali_uk_subsystem_t,\ref _mali_uk_functions) will uniquely identify the
+ * U/K call across all groups of functions, and all functions. */
+typedef enum {
+ /** Core functions */
+
+ _MALI_UK_OPEN = 0, /**< _mali_ukk_open() */
+ _MALI_UK_CLOSE, /**< _mali_ukk_close() */
+ _MALI_UK_WAIT_FOR_NOTIFICATION, /**< _mali_ukk_wait_for_notification() */
+ _MALI_UK_GET_API_VERSION, /**< _mali_ukk_get_api_version() */
+ _MALI_UK_POST_NOTIFICATION, /**< _mali_ukk_post_notification() */
+ _MALI_UK_GET_USER_SETTING, /**< _mali_ukk_get_user_setting() *//**< [out] */
+ _MALI_UK_GET_USER_SETTINGS, /**< _mali_ukk_get_user_settings() *//**< [out] */
+ _MALI_UK_REQUEST_HIGH_PRIORITY, /**< _mali_ukk_request_high_priority() */
+ _MALI_UK_TIMELINE_GET_LATEST_POINT, /**< _mali_ukk_timeline_get_latest_point() */
+ _MALI_UK_TIMELINE_WAIT, /**< _mali_ukk_timeline_wait() */
+ _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, /**< _mali_ukk_timeline_create_sync_fence() */
+ _MALI_UK_SOFT_JOB_START, /**< _mali_ukk_soft_job_start() */
+ _MALI_UK_SOFT_JOB_SIGNAL, /**< _mali_ukk_soft_job_signal() */
+ _MALI_UK_PENDING_SUBMIT, /**< _mali_ukk_pending_submit() */
+
+ /** Memory functions */
+
+ _MALI_UK_ALLOC_MEM = 0, /**< _mali_ukk_alloc_mem() */
+ _MALI_UK_FREE_MEM, /**< _mali_ukk_free_mem() */
+ _MALI_UK_BIND_MEM, /**< _mali_ukk_mem_bind() */
+ _MALI_UK_UNBIND_MEM, /**< _mali_ukk_mem_unbind() */
+ _MALI_UK_COW_MEM, /**< _mali_ukk_mem_cow() */
+ _MALI_UK_COW_MODIFY_RANGE, /**< _mali_ukk_mem_cow_modify_range() */
+ _MALI_UK_RESIZE_MEM, /**<._mali_ukk_mem_resize() */
+ _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
+ _MALI_UK_DUMP_MMU_PAGE_TABLE, /**< _mali_ukk_mem_dump_mmu_page_table() */
+ _MALI_UK_DMA_BUF_GET_SIZE, /**< _mali_ukk_dma_buf_get_size() */
+ _MALI_UK_MEM_WRITE_SAFE, /**< _mali_uku_mem_write_safe() */
+
+ /** Common functions for each core */
+
+ _MALI_UK_START_JOB = 0, /**< Start a Fragment/Vertex Processor Job on a core */
+ _MALI_UK_GET_NUMBER_OF_CORES, /**< Get the number of Fragment/Vertex Processor cores */
+ _MALI_UK_GET_CORE_VERSION, /**< Get the Fragment/Vertex Processor version compatible with all cores */
+
+ /** Fragment Processor Functions */
+
+ _MALI_UK_PP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_pp_start_job() */
+ _MALI_UK_GET_PP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_pp_number_of_cores() */
+ _MALI_UK_GET_PP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_pp_core_version() */
+ _MALI_UK_PP_DISABLE_WB, /**< _mali_ukk_pp_job_disable_wb() */
+ _MALI_UK_PP_AND_GP_START_JOB, /**< _mali_ukk_pp_and_gp_start_job() */
+
+ /** Vertex Processor Functions */
+
+ _MALI_UK_GP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_gp_start_job() */
+ _MALI_UK_GET_GP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_gp_number_of_cores() */
+ _MALI_UK_GET_GP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_gp_core_version() */
+ _MALI_UK_GP_SUSPEND_RESPONSE, /**< _mali_ukk_gp_suspend_response() */
+
+ /** Profiling functions */
+
+ _MALI_UK_PROFILING_ADD_EVENT = 0, /**< __mali_uku_profiling_add_event() */
+ _MALI_UK_PROFILING_REPORT_SW_COUNTERS,/**< __mali_uku_profiling_report_sw_counters() */
+ _MALI_UK_PROFILING_MEMORY_USAGE_GET, /**< __mali_uku_profiling_memory_usage_get() */
+ _MALI_UK_PROFILING_STREAM_FD_GET, /** < __mali_uku_profiling_stream_fd_get() */
+ _MALI_UK_PROFILING_CONTROL_SET, /** < __mali_uku_profiling_control_set() */
+
+ /** VSYNC reporting fuctions */
+ _MALI_UK_VSYNC_EVENT_REPORT = 0, /**< _mali_ukk_vsync_event_report() */
+} _mali_uk_functions;
+
+/** @defgroup _mali_uk_getsysteminfo U/K Get System Info
+ * @{ */
+
+/**
+ * Type definition for the core version number.
+ * Used when returning the version number read from a core
+ *
+ * Its format is that of the 32-bit Version register for a particular core.
+ * Refer to the "Mali200 and MaliGP2 3D Graphics Processor Technical Reference
+ * Manual", ARM DDI 0415C, for more information.
+ */
+typedef u32 _mali_core_version;
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @defgroup _mali_uk_gp_suspend_response_s Vertex Processor Suspend Response
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_gp_suspend_response()
+ *
+ * When _mali_wait_for_notification() receives notification that a
+ * Vertex Processor job was suspended, you need to send a response to indicate
+ * what needs to happen with this job. You can either abort or resume the job.
+ *
+ * - set @c code to indicate response code. This is either @c _MALIGP_JOB_ABORT or
+ * @c _MALIGP_JOB_RESUME_WITH_NEW_HEAP to indicate you will provide a new heap
+ * for the job that will resolve the out of memory condition for the job.
+ * - copy the @c cookie value from the @c _mali_uk_gp_job_suspended_s notification;
+ * this is an identifier for the suspended job
+ * - set @c arguments[0] and @c arguments[1] to zero if you abort the job. If
+ * you resume it, @c argument[0] should specify the Mali start address for the new
+ * heap and @c argument[1] the Mali end address of the heap.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ */
+typedef enum _maligp_job_suspended_response_code {
+ _MALIGP_JOB_ABORT, /**< Abort the Vertex Processor job */
+ _MALIGP_JOB_RESUME_WITH_NEW_HEAP /**< Resume the Vertex Processor job with a new heap */
+} _maligp_job_suspended_response_code;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
+ _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
+ u32 arguments[2]; /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
+} _mali_uk_gp_suspend_response_s;
+
+/** @} */ /* end group _mali_uk_gp_suspend_response_s */
+
+/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
+ * @{ */
+
+/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job */
+typedef enum {
+ _MALI_UK_JOB_STATUS_END_SUCCESS = 1 << (16 + 0),
+ _MALI_UK_JOB_STATUS_END_OOM = 1 << (16 + 1),
+ _MALI_UK_JOB_STATUS_END_ABORT = 1 << (16 + 2),
+ _MALI_UK_JOB_STATUS_END_TIMEOUT_SW = 1 << (16 + 3),
+ _MALI_UK_JOB_STATUS_END_HANG = 1 << (16 + 4),
+ _MALI_UK_JOB_STATUS_END_SEG_FAULT = 1 << (16 + 5),
+ _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB = 1 << (16 + 6),
+ _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR = 1 << (16 + 7),
+ _MALI_UK_JOB_STATUS_END_SHUTDOWN = 1 << (16 + 8),
+ _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1 << (16 + 9)
+} _mali_uk_job_status;
+
+#define MALIGP2_NUM_REGS_FRAME (6)
+
+/** @brief Arguments for _mali_ukk_gp_start_job()
+ *
+ * To start a Vertex Processor job
+ * - associate the request with a reference to a @c mali_gp_job_info by setting
+ * user_job_ptr to the address of the @c mali_gp_job_info of the job.
+ * - set @c priority to the priority of the @c mali_gp_job_info
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_gp_job_info into @c frame_registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ * When @c _mali_ukk_gp_start_job() returns @c _MALI_OSK_ERR_OK, status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, @c _mali_wait_for_notification() will be notified
+ * that the job finished or got suspended. It may get suspended due to
+ * resource shortage. If it finished (see _mali_ukk_wait_for_notification())
+ * the notification will contain a @c _mali_uk_gp_job_finished_s result. If
+ * it got suspended the notification will contain a @c _mali_uk_gp_job_suspended_s
+ * result.
+ *
+ * The @c _mali_uk_gp_job_finished_s contains the job status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ * In case the job got suspended, @c _mali_uk_gp_job_suspended_s contains
+ * the @c user_job_ptr identifier used to start the job with, the @c reason
+ * why the job stalled (see \ref _maligp_job_suspended_reason) and a @c cookie
+ * to identify the core on which the job stalled. This @c cookie will be needed
+ * when responding to this nofication by means of _mali_ukk_gp_suspend_response().
+ * (see _mali_ukk_gp_suspend_response()). The response is either to abort or
+ * resume the job. If the job got suspended due to an out of memory condition
+ * you may be able to resolve this by providing more memory and resuming the job.
+ *
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 user_job_ptr; /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 frame_builder_id; /**< [in] id of the originating frame builder */
+ u32 flush_id; /**< [in] flush id within the originating frame builder */
+ _mali_uk_fence_t fence; /**< [in] fence this job must wait on */
+ u64 timeline_point_ptr; /**< [in,out] pointer to u32: location where point on gp timeline for this job will be written */
+ u32 varying_memsize; /** < [in] size of varying memory to use deffer bind*/
+ u32 deferred_mem_num;
+ u64 deferred_mem_list; /** < [in] memory hanlde list of varying buffer to use deffer bind */
+} _mali_uk_gp_start_job_s;
+
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE (1<<1) /**< Enable performance counter SRC1 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE (1<<2) /**< Enable per tile (aka heatmap) generation with for a job (using the enabled counter sources) */
+
+/** @} */ /* end group _mali_uk_gpstartjob_s */
+
+typedef struct {
+ u64 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 heap_current_addr; /**< [out] value of the GP PLB PL heap start address register */
+ u32 perf_counter0; /**< [out] value of performance counter 0 (see ARM DDI0415A) */
+ u32 perf_counter1; /**< [out] value of performance counter 1 (see ARM DDI0415A) */
+ u32 pending_big_job_num;
+} _mali_uk_gp_job_finished_s;
+
+typedef struct {
+ u64 user_job_ptr; /**< [out] identifier for the job in user space */
+ u32 cookie; /**< [out] identifier for the core in kernel space on which the job stalled */
+} _mali_uk_gp_job_suspended_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @defgroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+#define _MALI_PP_MAX_SUB_JOBS 8
+
+#define _MALI_PP_MAX_FRAME_REGISTERS ((0x058/4)+1)
+
+#define _MALI_PP_MAX_WB_REGISTERS ((0x02C/4)+1)
+
+#define _MALI_DLBU_MAX_REGISTERS 4
+
+/** Flag for _mali_uk_pp_start_job_s */
+#define _MALI_PP_JOB_FLAG_NO_NOTIFICATION (1<<0)
+#define _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE (1<<1)
+#define _MALI_PP_JOB_FLAG_PROTECTED (1<<2)
+
+/** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_pp_start_job()
+ *
+ * To start a Fragment Processor job
+ * - associate the request with a reference to a mali_pp_job by setting
+ * @c user_job_ptr to the address of the @c mali_pp_job of the job.
+ * - set @c priority to the priority of the mali_pp_job
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_pp_job into @c frame_registers.
+ * For MALI200 you also need to copy the write back 0,1 and 2 registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context in @c ctx that was returned from _mali_ukk_open()
+ *
+ * When _mali_ukk_pp_start_job() returns @c _MALI_OSK_ERR_OK, @c status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, _mali_wait_for_notification() will be notified
+ * when the job finished. The notification will contain a
+ * @c _mali_uk_pp_job_finished_s result. It contains the @c user_job_ptr
+ * identifier used to start the job with, the job @c status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than @c watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 user_job_ptr; /**< [in] identifier for the job in user space */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS]; /**< [in] core specific registers associated with first sub job, see ARM DDI0415A */
+ u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_FRAME registers for sub job 1-7 */
+ u32 frame_registers_addr_stack[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_STACK registers for sub job 1-7 */
+ u32 wb0_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 wb1_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 wb2_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 dlbu_registers[_MALI_DLBU_MAX_REGISTERS]; /**< [in] Dynamic load balancing unit registers */
+ u32 num_cores; /**< [in] Number of cores to set up (valid range: 1-8(M450) or 4(M400)) */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 frame_builder_id; /**< [in] id of the originating frame builder */
+ u32 flush_id; /**< [in] flush id within the originating frame builder */
+ u32 flags; /**< [in] See _MALI_PP_JOB_FLAG_* for a list of avaiable flags */
+ u32 tilesx; /**< [in] number of tiles in the x direction (needed for heatmap generation */
+ u32 tilesy; /**< [in] number of tiles in y direction (needed for reading the heatmap memory) */
+ u32 heatmap_mem; /**< [in] memory address to store counter values per tile (aka heatmap) */
+ u32 num_memory_cookies; /**< [in] number of memory cookies attached to job */
+ u64 memory_cookies; /**< [in] pointer to array of u32 memory cookies attached to job */
+ _mali_uk_fence_t fence; /**< [in] fence this job must wait on */
+ u64 timeline_point_ptr; /**< [in,out] pointer to location of u32 where point on pp timeline for this job will be written */
+} _mali_uk_pp_start_job_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 gp_args; /**< [in,out] GP uk arguments (see _mali_uk_gp_start_job_s) */
+ u64 pp_args; /**< [in,out] PP uk arguments (see _mali_uk_pp_start_job_s) */
+} _mali_uk_pp_and_gp_start_job_s;
+
+/** @} */ /* end group _mali_uk_ppstartjob_s */
+
+typedef struct {
+ u64 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 perf_counter0[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 0 (see ARM DDI0415A), one for each sub job */
+ u32 perf_counter1[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 1 (see ARM DDI0415A), one for each sub job */
+ u32 perf_counter_src0;
+ u32 perf_counter_src1;
+} _mali_uk_pp_job_finished_s;
+
+typedef struct {
+ u32 number_of_enabled_cores; /**< [out] the new number of enabled cores */
+} _mali_uk_pp_num_cores_changed_s;
+
+
+
+/**
+ * Flags to indicate write-back units
+ */
+typedef enum {
+ _MALI_UK_PP_JOB_WB0 = 1,
+ _MALI_UK_PP_JOB_WB1 = 2,
+ _MALI_UK_PP_JOB_WB2 = 4,
+} _mali_uk_pp_job_wbx_flag;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 fb_id; /**< [in] Frame builder ID of job to disable WB units for */
+ u32 wb0_memory;
+ u32 wb1_memory;
+ u32 wb2_memory;
+} _mali_uk_pp_disable_wb_s;
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+/** @defgroup _mali_uk_soft_job U/K Soft Job
+ * @{ */
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 user_job; /**< [in] identifier for the job in user space */
+ u64 job_id_ptr; /**< [in,out] pointer to location of u32 where job id will be written */
+ _mali_uk_fence_t fence; /**< [in] fence this job must wait on */
+ u32 point; /**< [out] point on soft timeline for this job */
+ u32 type; /**< [in] type of soft job */
+} _mali_uk_soft_job_start_s;
+
+typedef struct {
+ u64 user_job; /**< [out] identifier for the job in user space */
+} _mali_uk_soft_job_activated_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 job_id; /**< [in] id for soft job */
+} _mali_uk_soft_job_signal_s;
+
+/** @} */ /* end group _mali_uk_soft_job */
+
+typedef struct {
+ u32 counter_id;
+ u32 key;
+ int enable;
+} _mali_uk_annotate_profiling_mem_counter_s;
+
+typedef struct {
+ u32 sampling_rate;
+ int enable;
+} _mali_uk_annotate_profiling_enable_s;
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ * @{ */
+
+/** @defgroup _mali_uk_waitfornotification_s Wait For Notification
+ * @{ */
+
+/** @brief Notification type encodings
+ *
+ * Each Notification type is an ordered pair of (subsystem,id), and is unique.
+ *
+ * The encoding of subsystem,id into a 32-bit word is:
+ * encoding = (( subsystem << _MALI_NOTIFICATION_SUBSYSTEM_SHIFT ) & _MALI_NOTIFICATION_SUBSYSTEM_MASK)
+ * | (( id << _MALI_NOTIFICATION_ID_SHIFT ) & _MALI_NOTIFICATION_ID_MASK)
+ *
+ * @see _mali_uk_wait_for_notification_s
+ */
+typedef enum {
+ /** core notifications */
+
+ _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
+ _MALI_NOTIFICATION_APPLICATION_QUIT = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
+ _MALI_NOTIFICATION_SETTINGS_CHANGED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x80,
+ _MALI_NOTIFICATION_SOFT_ACTIVATED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x100,
+
+ /** Fragment Processor notifications */
+
+ _MALI_NOTIFICATION_PP_FINISHED = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_PP_NUM_CORE_CHANGE = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x20,
+
+ /** Vertex Processor notifications */
+
+ _MALI_NOTIFICATION_GP_FINISHED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_GP_STALLED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
+
+ /** Profiling notifications */
+ _MALI_NOTIFICATION_ANNOTATE_PROFILING_MEM_COUNTER = (_MALI_UK_PROFILING_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_ANNOTATE_PROFILING_ENABLE = (_MALI_UK_PROFILING_SUBSYSTEM << 16) | 0x20,
+} _mali_uk_notification_type;
+
+/** to assist in splitting up 32-bit notification value in subsystem and id value */
+#define _MALI_NOTIFICATION_SUBSYSTEM_MASK 0xFFFF0000
+#define _MALI_NOTIFICATION_SUBSYSTEM_SHIFT 16
+#define _MALI_NOTIFICATION_ID_MASK 0x0000FFFF
+#define _MALI_NOTIFICATION_ID_SHIFT 0
+
+
+/** @brief Enumeration of possible settings which match mali_setting_t in user space
+ *
+ *
+ */
+typedef enum {
+ _MALI_UK_USER_SETTING_SW_EVENTS_ENABLE = 0,
+ _MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_DEPTHBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_STENCILBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_PER_TILE_COUNTERS_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_COMPOSITOR,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_WINDOW,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_OTHER,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR,
+ _MALI_UK_USER_SETTING_SW_COUNTER_ENABLED,
+ _MALI_UK_USER_SETTING_MAX,
+} _mali_uk_user_setting_t;
+
+/* See mali_user_settings_db.c */
+extern const char *_mali_uk_user_setting_descriptions[];
+#define _MALI_UK_USER_SETTING_DESCRIPTIONS \
+ { \
+ "sw_events_enable", \
+ "colorbuffer_capture_enable", \
+ "depthbuffer_capture_enable", \
+ "stencilbuffer_capture_enable", \
+ "per_tile_counters_enable", \
+ "buffer_capture_compositor", \
+ "buffer_capture_window", \
+ "buffer_capture_other", \
+ "buffer_capture_n_frames", \
+ "buffer_capture_resize_factor", \
+ "sw_counters_enable", \
+ };
+
+/** @brief struct to hold the value to a particular setting as seen in the kernel space
+ */
+typedef struct {
+ _mali_uk_user_setting_t setting;
+ u32 value;
+} _mali_uk_settings_changed_s;
+
+/** @brief Arguments for _mali_ukk_wait_for_notification()
+ *
+ * On successful return from _mali_ukk_wait_for_notification(), the members of
+ * this structure will indicate the reason for notification.
+ *
+ * Specifically, the source of the notification can be identified by the
+ * subsystem and id fields of the mali_uk_notification_type in the code.type
+ * member. The type member is encoded in a way to divide up the types into a
+ * subsystem field, and a per-subsystem ID field. See
+ * _mali_uk_notification_type for more information.
+ *
+ * Interpreting the data union member depends on the notification type:
+ *
+ * - type == _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS
+ * - The kernel side is shutting down. No further
+ * _mali_uk_wait_for_notification() calls should be made.
+ * - In this case, the value of the data union member is undefined.
+ * - This is used to indicate to the user space client that it should close
+ * the connection to the Mali Device Driver.
+ * - type == _MALI_NOTIFICATION_PP_FINISHED
+ * - The notification data is of type _mali_uk_pp_job_finished_s. It contains the user_job_ptr
+ * identifier used to start the job with, the job status, the number of milliseconds the job took to render,
+ * and values of core registers when the job finished (irq status, performance counters, renderer list
+ * address).
+ * - A job has finished succesfully when its status member is _MALI_UK_JOB_STATUS_FINISHED.
+ * - If the hardware detected a timeout while rendering the job, or software detected the job is
+ * taking more than watchdog_msecs (see _mali_ukk_pp_start_job()) to complete, the status member will
+ * indicate _MALI_UK_JOB_STATUS_HANG.
+ * - If the hardware detected a bus error while accessing memory associated with the job, status will
+ * indicate _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * - Status will indicate MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to stop the job but the job
+ * didn't start the hardware yet, e.g. when the driver closes.
+ * - type == _MALI_NOTIFICATION_GP_FINISHED
+ * - The notification data is of type _mali_uk_gp_job_finished_s. The notification is similar to that of
+ * type == _MALI_NOTIFICATION_PP_FINISHED, except that several other GP core register values are returned.
+ * The status values have the same meaning for type == _MALI_NOTIFICATION_PP_FINISHED.
+ * - type == _MALI_NOTIFICATION_GP_STALLED
+ * - The nofication data is of type _mali_uk_gp_job_suspended_s. It contains the user_job_ptr
+ * identifier used to start the job with, the reason why the job stalled and a cookie to identify the core on
+ * which the job stalled.
+ * - The reason member of gp_job_suspended is set to _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY
+ * when the polygon list builder unit has run out of memory.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [out] Type of notification available */
+ union {
+ _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
+ _mali_uk_gp_job_finished_s gp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_GP_FINISHED notification type */
+ _mali_uk_pp_job_finished_s pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */
+ _mali_uk_settings_changed_s setting_changed;/**< [out] Notification data for _MALI_NOTIFICAATION_SETTINGS_CHANGED notification type */
+ _mali_uk_soft_job_activated_s soft_job_activated; /**< [out] Notification data for _MALI_NOTIFICATION_SOFT_ACTIVATED notification type */
+ _mali_uk_annotate_profiling_mem_counter_s profiling_mem_counter;
+ _mali_uk_annotate_profiling_enable_s profiling_enable;
+ } data;
+} _mali_uk_wait_for_notification_s;
+
+/** @brief Arguments for _mali_ukk_post_notification()
+ *
+ * Posts the specified notification to the notification queue for this application.
+ * This is used to send a quit message to the callback thread.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [in] Type of notification to post */
+} _mali_uk_post_notification_s;
+
+/** @} */ /* end group _mali_uk_waitfornotification_s */
+
+/** @defgroup _mali_uk_getapiversion_s Get API Version
+ * @{ */
+
+/** helpers for Device Driver API version handling */
+
+/** @brief Encode a version ID from a 16-bit input
+ *
+ * @note the input is assumed to be 16 bits. It must not exceed 16 bits. */
+#define _MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+
+/** @brief Check whether a 32-bit value is likely to be Device Driver API
+ * version ID. */
+#define _IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+
+/** @brief Decode a 16-bit version number from a 32-bit Device Driver API version
+ * ID */
+#define _GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+
+/** @brief Determine whether two 32-bit encoded version IDs match */
+#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * For example, for version 1 the value would be 0x00010001
+ */
+#define _MALI_API_VERSION 900
+#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
+
+/**
+ * The API version is a 16-bit integer stored in both the lower and upper 16-bits
+ * of a 32-bit value. The 16-bit API version value is incremented on each API
+ * change. Version 1 would be 0x00010001. Used in _mali_uk_get_api_version_s.
+ */
+typedef u32 _mali_uk_api_version;
+
+/** @brief Arguments for _mali_uk_get_api_version()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct {
+ u32 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
+ int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_s;
+
+/** @brief Arguments for _mali_uk_get_api_version_v2()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
+ int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_v2_s;
+
+/** @} */ /* end group _mali_uk_getapiversion_s */
+
+/** @defgroup _mali_uk_get_user_settings_s Get user space settings */
+
+/** @brief struct to keep the matching values of the user space settings within certain context
+ *
+ * Each member of the settings array corresponds to a matching setting in the user space and its value is the value
+ * of that particular setting.
+ *
+ * All settings are given reference to the context pointed to by the ctx pointer.
+ *
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 settings[_MALI_UK_USER_SETTING_MAX]; /**< [out] The values for all settings */
+} _mali_uk_get_user_settings_s;
+
+/** @brief struct to hold the value of a particular setting from the user space within a given context
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_user_setting_t setting; /**< [in] setting to get */
+ u32 value; /**< [out] value of setting */
+} _mali_uk_get_user_setting_s;
+
+/** @brief Arguments for _mali_ukk_request_high_priority() */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_request_high_priority_s;
+
+/** @brief Arguments for _mali_ukk_pending_submit() */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_pending_submit_s;
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_memory U/K Memory
+ * @{ */
+
+#define _MALI_MEMORY_ALLOCATE_RESIZEABLE (1<<4) /* BUFFER can trim dow/grow*/
+#define _MALI_MEMORY_ALLOCATE_NO_BIND_GPU (1<<5) /*Not map to GPU when allocate, must call bind later*/
+#define _MALI_MEMORY_ALLOCATE_SWAPPABLE (1<<6) /* Allocate swappale memory. */
+#define _MALI_MEMORY_ALLOCATE_DEFER_BIND (1<<7) /*Not map to GPU when allocate, must call bind later*/
+#define _MALI_MEMORY_ALLOCATE_SECURE (1<<8) /* Allocate secure memory. */
+
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 gpu_vaddr; /**< [in] GPU virtual address */
+ u32 vsize; /**< [in] vitrual size of the allocation */
+ u32 psize; /**< [in] physical size of the allocation */
+ u32 flags;
+ u64 backend_handle; /**< [out] backend handle */
+ s32 secure_shared_fd; /** < [in] the mem handle for secure mem */
+} _mali_uk_alloc_mem_s;
+
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 gpu_vaddr; /**< [in] use as handle to free allocation */
+ u32 free_pages_nr; /** < [out] record the number of free pages */
+} _mali_uk_free_mem_s;
+
+
+#define _MALI_MEMORY_BIND_BACKEND_UMP (1<<8)
+#define _MALI_MEMORY_BIND_BACKEND_DMA_BUF (1<<9)
+#define _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY (1<<10)
+#define _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY (1<<11)
+#define _MALI_MEMORY_BIND_BACKEND_EXT_COW (1<<12)
+#define _MALI_MEMORY_BIND_BACKEND_HAVE_ALLOCATION (1<<13)
+
+
+#define _MALI_MEMORY_BIND_BACKEND_MASK (_MALI_MEMORY_BIND_BACKEND_UMP| \
+ _MALI_MEMORY_BIND_BACKEND_DMA_BUF |\
+ _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY |\
+ _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY |\
+ _MALI_MEMORY_BIND_BACKEND_EXT_COW |\
+ _MALI_MEMORY_BIND_BACKEND_HAVE_ALLOCATION)
+
+
+#define _MALI_MEMORY_GPU_READ_ALLOCATE (1<<16)
+
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 vaddr; /**< [in] mali address to map the physical memory to */
+ u32 size; /**< [in] size */
+ u32 flags; /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */
+ u32 padding; /** padding for 32/64 struct alignment */
+ union {
+ struct {
+ u32 secure_id; /**< [in] secure id */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ } bind_ump;
+ struct {
+ u32 mem_fd; /**< [in] Memory descriptor */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ } bind_dma_buf;
+ struct {
+ u32 phys_addr; /**< [in] physical address */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ } bind_ext_memory;
+ } mem_union;
+} _mali_uk_bind_mem_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 flags; /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */
+ u32 vaddr; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_unbind_mem_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 target_handle; /**< [in] handle of allocation need to do COW */
+ u32 target_offset; /**< [in] offset in target allocation to do COW(for support COW a memory allocated from memory_bank, PAGE_SIZE align)*/
+ u32 target_size; /**< [in] size of target allocation to do COW (for support memory bank, PAGE_SIZE align)(in byte) */
+ u32 range_start; /**< [in] re allocate range start offset, offset from the start of allocation (PAGE_SIZE align)*/
+ u32 range_size; /**< [in] re allocate size (PAGE_SIZE align)*/
+ u32 vaddr; /**< [in] mali address for the new allocaiton */
+ u32 backend_handle; /**< [out] backend handle */
+ u32 flags;
+} _mali_uk_cow_mem_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 range_start; /**< [in] re allocate range start offset, offset from the start of allocation */
+ u32 size; /**< [in] re allocate size*/
+ u32 vaddr; /**< [in] mali address for the new allocaiton */
+ s32 change_pages_nr; /**< [out] record the page number change for cow operation */
+} _mali_uk_cow_modify_range_s;
+
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 mem_fd; /**< [in] Memory descriptor */
+ u32 size; /**< [out] size */
+} _mali_uk_dma_buf_get_size_s;
+
+/** Flag for _mali_uk_map_external_mem_s, _mali_uk_attach_ump_mem_s and _mali_uk_attach_dma_buf_s */
+#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 vaddr; /* the buffer to do resize*/
+ u32 psize; /* wanted physical size of this memory */
+} _mali_uk_mem_resize_s;
+
+/**
+ * @brief Arguments for _mali_uk[uk]_mem_write_safe()
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 src; /**< [in] Pointer to source data */
+ u64 dest; /**< [in] Destination Mali buffer */
+ u32 size; /**< [in,out] Number of bytes to write/copy on input, number of bytes actually written/copied on output */
+} _mali_uk_mem_write_safe_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of MMU page table information (registers + page tables) */
+} _mali_uk_query_mmu_page_table_dump_size_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer to receive mmu page table information */
+ u64 buffer; /**< [in,out] buffer to receive mmu page table information */
+ u32 register_writes_size; /**< [out] size of MMU register dump */
+ u64 register_writes; /**< [out] pointer within buffer where MMU register dump is stored */
+ u32 page_table_dump_size; /**< [out] size of MMU page table dump */
+ u64 page_table_dump; /**< [out] pointer within buffer where MMU page table dump is stored */
+} _mali_uk_dump_mmu_page_table_s;
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_pp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_number_of_cores(), @c number_of_cores
+ * will contain the number of Fragment Processor cores in the system.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_total_cores; /**< [out] Total number of Fragment Processor cores in the system */
+ u32 number_of_enabled_cores; /**< [out] Number of enabled Fragment Processor cores */
+} _mali_uk_get_pp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_pp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_core_version(), @c version contains
+ * the version that all Fragment Processor cores are compatible with.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+ u32 padding;
+} _mali_uk_get_pp_core_version_s;
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_gp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_number_of_cores(), @c number_of_cores
+ * will contain the number of Vertex Processor cores in the system.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Vertex Processor cores in the system */
+} _mali_uk_get_gp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_gp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_core_version(), @c version contains
+ * the version that all Vertex Processor cores are compatible with.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_gp_core_version_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 event_id; /**< [in] event id to register (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [in] event specific data */
+} _mali_uk_profiling_add_event_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 memory_usage; /**< [out] total memory usage */
+ u32 vaddr; /**< [in] mali address for the cow allocaiton */
+ s32 change_pages_nr; /**< [out] record the page number change for cow operation */
+} _mali_uk_profiling_memory_usage_get_s;
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments to _mali_ukk_mem_mmap()
+ *
+ * Use of the phys_addr member depends on whether the driver is compiled for
+ * Mali-MMU or nonMMU:
+ * - in the nonMMU case, this is the physical address of the memory as seen by
+ * the CPU (which may be a constant offset from that used by Mali)
+ * - in the MMU case, this is the Mali Virtual base address of the memory to
+ * allocate, and the particular physical pages used to back the memory are
+ * entirely determined by _mali_ukk_mem_mmap(). The details of the physical pages
+ * are not reported to user-space for security reasons.
+ *
+ * The cookie member must be stored for use later when freeing the memory by
+ * calling _mali_ukk_mem_munmap(). In the Mali-MMU case, the cookie is secure.
+ *
+ * The ukk_private word must be set to zero when calling from user-space. On
+ * Kernel-side, the OS implementation of the U/K interface can use it to
+ * communicate data to the OS implementation of the OSK layer. In particular,
+ * _mali_ukk_get_big_block() directly calls _mali_ukk_mem_mmap directly, and
+ * will communicate its own ukk_private word through the ukk_private member
+ * here. The common code itself will not inspect or modify the ukk_private
+ * word, and so it may be safely used for whatever purposes necessary to
+ * integrate Mali Memory handling into the OS.
+ *
+ * The uku_private member is currently reserved for use by the user-side
+ * implementation of the U/K interface. Its value must be zero.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ u32 size; /**< [in] Size of the requested mapping */
+ u32 phys_addr; /**< [in] Physical address - could be offset, depending on caller+callee convention */
+ mali_bool writeable;
+} _mali_uk_mem_mmap_s;
+
+/** @brief Arguments to _mali_ukk_mem_munmap()
+ *
+ * The cookie and mapping members must be that returned from the same previous
+ * call to _mali_ukk_mem_mmap(). The size member must correspond to cookie
+ * and mapping - that is, it must be the value originally supplied to a call to
+ * _mali_ukk_mem_mmap that returned the values of mapping and cookie.
+ *
+ * An error will be returned if an attempt is made to unmap only part of the
+ * originally obtained range, or to unmap more than was originally obtained.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] The mapping returned from mmap call */
+ u32 size; /**< [in] The size passed to mmap call */
+} _mali_uk_mem_munmap_s;
+/** @} */ /* end group _mali_uk_memory */
+
+/** @defgroup _mali_uk_vsync U/K VSYNC Wait Reporting Module
+ * @{ */
+
+/** @brief VSYNC events
+ *
+ * These events are reported when DDK starts to wait for vsync and when the
+ * vsync has occured and the DDK can continue on the next frame.
+ */
+typedef enum _mali_uk_vsync_event {
+ _MALI_UK_VSYNC_EVENT_BEGIN_WAIT = 0,
+ _MALI_UK_VSYNC_EVENT_END_WAIT
+} _mali_uk_vsync_event;
+
+/** @brief Arguments to _mali_ukk_vsync_event()
+ *
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_vsync_event event; /**< [in] VSYNCH event type */
+} _mali_uk_vsync_event_report_s;
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @defgroup _mali_uk_sw_counters_report U/K Software Counter Reporting
+ * @{ */
+
+/** @brief Software counter values
+ *
+ * Values recorded for each of the software counters during a single renderpass.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 counters; /**< [in] The array of u32 counter values */
+ u32 num_counters; /**< [in] The number of elements in counters array */
+} _mali_uk_sw_counters_report_s;
+
+/** @} */ /* end group _mali_uk_sw_counters_report */
+
+/** @defgroup _mali_uk_timeline U/K Mali Timeline
+ * @{ */
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 timeline; /**< [in] timeline id */
+ u32 point; /**< [out] latest point on timeline */
+} _mali_uk_timeline_get_latest_point_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_fence_t fence; /**< [in] fence */
+ u32 timeout; /**< [in] timeout (0 for no wait, -1 for blocking) */
+ u32 status; /**< [out] status of fence (1 if signaled, 0 if timeout) */
+} _mali_uk_timeline_wait_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_fence_t fence; /**< [in] mali fence to create linux sync fence from */
+ s32 sync_fd; /**< [out] file descriptor for new linux sync fence */
+} _mali_uk_timeline_create_sync_fence_s;
+
+/** @} */ /* end group _mali_uk_timeline */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ s32 stream_fd; /**< [in] The profiling kernel base stream fd handle */
+} _mali_uk_profiling_stream_fd_get_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 control_packet_data; /**< [in] the control packet data for control settings */
+ u32 control_packet_size; /**< [in] The control packet size */
+ u64 response_packet_data; /** < [out] The response packet data */
+ u32 response_packet_size; /** < [in,out] The response packet data */
+} _mali_uk_profiling_control_set_s;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_UK_TYPES_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/license/gpl/mali_kernel_license.h b/drivers/gpu/arm/utgard/linux/license/gpl/mali_kernel_license.h
new file mode 100644
index 000000000000..6fafc6777e48
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/license/gpl/mali_kernel_license.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010, 2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __MALI_KERNEL_LICENSE_H__
+#define __MALI_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MALI_KERNEL_LINUX_LICENSE "GPL"
+#define MALI_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LICENSE_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_devfreq.c b/drivers/gpu/arm/utgard/linux/mali_devfreq.c
new file mode 100644
index 000000000000..b28f489e2cbf
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_devfreq.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <linux/devfreq_cooling.h>
+#endif
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+#include <linux/pm_opp.h>
+#else /* Linux >= 3.13 */
+/* In 3.13 the OPP include header file, types, and functions were all
+ * renamed. Use the old filename for the include, and define the new names to
+ * the old, when an old kernel is detected.
+ */
+#include <linux/opp.h>
+#define dev_pm_opp opp
+#define dev_pm_opp_get_voltage opp_get_voltage
+#define dev_pm_opp_get_opp_count opp_get_opp_count
+#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil
+#endif /* Linux >= 3.13 */
+
+#include "mali_pm_metrics.h"
+
+static int
+mali_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
+{
+ struct mali_device *mdev = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
+ unsigned long freq = 0;
+ unsigned long voltage;
+ int err;
+
+ freq = *target_freq;
+
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(dev, &freq, flags);
+ voltage = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+ if (IS_ERR_OR_NULL(opp)) {
+ MALI_PRINT_ERROR(("Failed to get opp (%ld)\n", PTR_ERR(opp)));
+ return PTR_ERR(opp);
+ }
+
+ MALI_DEBUG_PRINT(2, ("mali_devfreq_target:set_freq = %lld flags = 0x%x\n", freq, flags));
+ /*
+ * Only update if there is a change of frequency
+ */
+ if (mdev->current_freq == freq) {
+ *target_freq = freq;
+ mali_pm_reset_dvfs_utilisation(mdev);
+ return 0;
+ }
+
+#ifdef CONFIG_REGULATOR
+ if (mdev->regulator && mdev->current_voltage != voltage
+ && mdev->current_freq < freq) {
+ err = regulator_set_voltage(mdev->regulator, voltage, voltage);
+ if (err) {
+ MALI_PRINT_ERROR(("Failed to increase voltage (%d)\n", err));
+ return err;
+ }
+ }
+#endif
+
+ err = clk_set_rate(mdev->clock, freq);
+ if (err) {
+ MALI_PRINT_ERROR(("Failed to set clock %lu (target %lu)\n", freq, *target_freq));
+ return err;
+ }
+
+#ifdef CONFIG_REGULATOR
+ if (mdev->regulator && mdev->current_voltage != voltage
+ && mdev->current_freq > freq) {
+ err = regulator_set_voltage(mdev->regulator, voltage, voltage);
+ if (err) {
+ MALI_PRINT_ERROR(("Failed to decrease voltage (%d)\n", err));
+ return err;
+ }
+ }
+#endif
+
+ *target_freq = freq;
+ mdev->current_voltage = voltage;
+ mdev->current_freq = freq;
+
+ mali_pm_reset_dvfs_utilisation(mdev);
+
+ return err;
+}
+
+static int
+mali_devfreq_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct mali_device *mdev = dev_get_drvdata(dev);
+
+ *freq = mdev->current_freq;
+
+ MALI_DEBUG_PRINT(2, ("mali_devfreq_cur_freq: freq = %d \n", *freq));
+ return 0;
+}
+
+static int
+mali_devfreq_status(struct device *dev, struct devfreq_dev_status *stat)
+{
+ struct mali_device *mdev = dev_get_drvdata(dev);
+
+ stat->current_frequency = mdev->current_freq;
+
+ mali_pm_get_dvfs_utilisation(mdev,
+ &stat->total_time, &stat->busy_time);
+
+ stat->private_data = NULL;
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+ memcpy(&mdev->devfreq->last_status, stat, sizeof(*stat));
+#endif
+
+ return 0;
+}
+
+/* setup platform specific opp in platform.c*/
+int __weak setup_opps(void)
+{
+ return 0;
+}
+
+/* term platform specific opp in platform.c*/
+int __weak term_opps(struct device *dev)
+{
+ return 0;
+}
+
+static int mali_devfreq_init_freq_table(struct mali_device *mdev,
+ struct devfreq_dev_profile *dp)
+{
+ int err, count;
+ int i = 0;
+ unsigned long freq = 0;
+ struct dev_pm_opp *opp;
+
+ err = setup_opps();
+ if (err)
+ return err;
+
+ rcu_read_lock();
+ count = dev_pm_opp_get_opp_count(mdev->dev);
+ if (count < 0) {
+ rcu_read_unlock();
+ return count;
+ }
+ rcu_read_unlock();
+
+ MALI_DEBUG_PRINT(2, ("mali devfreq table count %d\n", count));
+
+ dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]),
+ GFP_KERNEL);
+ if (!dp->freq_table)
+ return -ENOMEM;
+
+ rcu_read_lock();
+ for (i = 0; i < count; i++, freq++) {
+ opp = dev_pm_opp_find_freq_ceil(mdev->dev, &freq);
+ if (IS_ERR(opp))
+ break;
+
+ dp->freq_table[i] = freq;
+ MALI_DEBUG_PRINT(2, ("mali devfreq table array[%d] = %d\n", i, freq));
+ }
+ rcu_read_unlock();
+
+ if (count != i)
+ MALI_PRINT_ERROR(("Unable to enumerate all OPPs (%d!=%d)\n",
+ count, i));
+
+ dp->max_state = i;
+
+ return 0;
+}
+
+static void mali_devfreq_term_freq_table(struct mali_device *mdev)
+{
+ struct devfreq_dev_profile *dp = mdev->devfreq->profile;
+
+ kfree(dp->freq_table);
+ term_opps(mdev->dev);
+}
+
+static void mali_devfreq_exit(struct device *dev)
+{
+ struct mali_device *mdev = dev_get_drvdata(dev);
+
+ mali_devfreq_term_freq_table(mdev);
+}
+
+int mali_devfreq_init(struct mali_device *mdev)
+{
+#ifdef CONFIG_DEVFREQ_THERMAL
+ struct devfreq_cooling_power *callbacks = NULL;
+ _mali_osk_device_data data;
+#endif
+ struct devfreq_dev_profile *dp;
+ int err;
+
+ MALI_DEBUG_PRINT(2, ("Init Mali devfreq\n"));
+
+ if (!mdev->clock)
+ return -ENODEV;
+
+ mdev->current_freq = clk_get_rate(mdev->clock);
+
+ dp = &mdev->devfreq_profile;
+
+ dp->initial_freq = mdev->current_freq;
+ dp->polling_ms = 100;
+ dp->target = mali_devfreq_target;
+ dp->get_dev_status = mali_devfreq_status;
+ dp->get_cur_freq = mali_devfreq_cur_freq;
+ dp->exit = mali_devfreq_exit;
+
+ if (mali_devfreq_init_freq_table(mdev, dp))
+ return -EFAULT;
+
+ mdev->devfreq = devfreq_add_device(mdev->dev, dp,
+ "simple_ondemand", NULL);
+ if (IS_ERR(mdev->devfreq)) {
+ mali_devfreq_term_freq_table(mdev);
+ return PTR_ERR(mdev->devfreq);
+ }
+
+ err = devfreq_register_opp_notifier(mdev->dev, mdev->devfreq);
+ if (err) {
+ MALI_PRINT_ERROR(("Failed to register OPP notifier (%d)\n", err));
+ goto opp_notifier_failed;
+ }
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+ /* Initilization last_status it will be used when first power allocate called */
+ mdev->devfreq->last_status.current_frequency = mdev->current_freq;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ if (NULL != data.gpu_cooling_ops) {
+ callbacks = data.gpu_cooling_ops;
+ MALI_DEBUG_PRINT(2, ("Mali GPU Thermal: Callback handler installed \n"));
+ }
+ }
+
+ if (callbacks) {
+ mdev->devfreq_cooling = of_devfreq_cooling_register_power(
+ mdev->dev->of_node,
+ mdev->devfreq,
+ callbacks);
+ if (IS_ERR_OR_NULL(mdev->devfreq_cooling)) {
+ err = PTR_ERR(mdev->devfreq_cooling);
+ MALI_PRINT_ERROR(("Failed to register cooling device (%d)\n", err));
+ goto cooling_failed;
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali GPU Thermal Cooling installed \n"));
+ }
+ }
+#endif
+
+ return 0;
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+cooling_failed:
+ devfreq_unregister_opp_notifier(mdev->dev, mdev->devfreq);
+#endif /* CONFIG_DEVFREQ_THERMAL */
+opp_notifier_failed:
+ err = devfreq_remove_device(mdev->devfreq);
+ if (err)
+ MALI_PRINT_ERROR(("Failed to terminate devfreq (%d)\n", err));
+ else
+ mdev->devfreq = NULL;
+
+ return err;
+}
+
+void mali_devfreq_term(struct mali_device *mdev)
+{
+ int err;
+
+ MALI_DEBUG_PRINT(2, ("Term Mali devfreq\n"));
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+ devfreq_cooling_unregister(mdev->devfreq_cooling);
+#endif
+
+ devfreq_unregister_opp_notifier(mdev->dev, mdev->devfreq);
+
+ err = devfreq_remove_device(mdev->devfreq);
+ if (err)
+ MALI_PRINT_ERROR(("Failed to terminate devfreq (%d)\n", err));
+ else
+ mdev->devfreq = NULL;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_devfreq.h b/drivers/gpu/arm/utgard/linux/mali_devfreq.h
new file mode 100644
index 000000000000..ba7c017d88dc
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_devfreq.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _MALI_DEVFREQ_H_
+#define _MALI_DEVFREQ_H_
+
+int mali_devfreq_init(struct mali_device *mdev);
+
+void mali_devfreq_term(struct mali_device *mdev);
+
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_device_pause_resume.c b/drivers/gpu/arm/utgard/linux/mali_device_pause_resume.c
new file mode 100644
index 000000000000..95c3ea12d645
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_device_pause_resume.c
@@ -0,0 +1,36 @@
+/**
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_device_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+
+#include <linux/module.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_pm.h"
+
+void mali_dev_pause(void)
+{
+ /*
+ * Deactive all groups to prevent hardware being touched
+ * during the period of mali device pausing
+ */
+ mali_pm_os_suspend(MALI_FALSE);
+}
+
+EXPORT_SYMBOL(mali_dev_pause);
+
+void mali_dev_resume(void)
+{
+ mali_pm_os_resume();
+}
+
+EXPORT_SYMBOL(mali_dev_resume);
diff --git a/drivers/gpu/arm/utgard/linux/mali_dma_fence.c b/drivers/gpu/arm/utgard/linux/mali_dma_fence.c
new file mode 100644
index 000000000000..2084af2e1e9a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_dma_fence.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/version.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+#include "mali_dma_fence.h"
+#include <linux/atomic.h>
+#include <linux/workqueue.h>
+#endif
+
+static DEFINE_SPINLOCK(mali_dma_fence_lock);
+
+static bool mali_dma_fence_enable_signaling(struct fence *fence)
+{
+ MALI_IGNORE(fence);
+ return true;
+}
+
+static const char *mali_dma_fence_get_driver_name(struct fence *fence)
+{
+ MALI_IGNORE(fence);
+ return "mali";
+}
+
+static const char *mali_dma_fence_get_timeline_name(struct fence *fence)
+{
+ MALI_IGNORE(fence);
+ return "mali_dma_fence";
+}
+
+static const struct fence_ops mali_dma_fence_ops = {
+ .get_driver_name = mali_dma_fence_get_driver_name,
+ .get_timeline_name = mali_dma_fence_get_timeline_name,
+ .enable_signaling = mali_dma_fence_enable_signaling,
+ .signaled = NULL,
+ .wait = fence_default_wait,
+ .release = NULL
+};
+
+static void mali_dma_fence_context_cleanup(struct mali_dma_fence_context *dma_fence_context)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+
+ for (i = 0; i < dma_fence_context->num_dma_fence_waiter; i++) {
+ if (dma_fence_context->mali_dma_fence_waiters[i]) {
+ fence_remove_callback(dma_fence_context->mali_dma_fence_waiters[i]->fence,
+ &dma_fence_context->mali_dma_fence_waiters[i]->base);
+ fence_put(dma_fence_context->mali_dma_fence_waiters[i]->fence);
+ kfree(dma_fence_context->mali_dma_fence_waiters[i]);
+ dma_fence_context->mali_dma_fence_waiters[i] = NULL;
+ }
+ }
+
+ if (NULL != dma_fence_context->mali_dma_fence_waiters)
+ kfree(dma_fence_context->mali_dma_fence_waiters);
+
+ dma_fence_context->mali_dma_fence_waiters = NULL;
+ dma_fence_context->num_dma_fence_waiter = 0;
+}
+
+static void mali_dma_fence_context_work_func(struct work_struct *work_handle)
+{
+ struct mali_dma_fence_context *dma_fence_context;
+
+ MALI_DEBUG_ASSERT_POINTER(work_handle);
+
+ dma_fence_context = container_of(work_handle, struct mali_dma_fence_context, work_handle);
+
+ dma_fence_context->cb_func(dma_fence_context->pp_job_ptr);
+}
+
+static void mali_dma_fence_callback(struct fence *fence, struct fence_cb *cb)
+{
+ struct mali_dma_fence_waiter *dma_fence_waiter = NULL;
+ struct mali_dma_fence_context *dma_fence_context = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+ MALI_DEBUG_ASSERT_POINTER(cb);
+
+ MALI_IGNORE(fence);
+
+ dma_fence_waiter = container_of(cb, struct mali_dma_fence_waiter, base);
+ dma_fence_context = dma_fence_waiter->parent;
+
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+
+ if (atomic_dec_and_test(&dma_fence_context->count))
+ schedule_work(&dma_fence_context->work_handle);
+}
+
+static _mali_osk_errcode_t mali_dma_fence_add_callback(struct mali_dma_fence_context *dma_fence_context, struct fence *fence)
+{
+ int ret = 0;
+ struct mali_dma_fence_waiter *dma_fence_waiter;
+ struct mali_dma_fence_waiter **dma_fence_waiters;
+
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ dma_fence_waiters = krealloc(dma_fence_context->mali_dma_fence_waiters,
+ (dma_fence_context->num_dma_fence_waiter + 1)
+ * sizeof(struct mali_dma_fence_waiter *),
+ GFP_KERNEL);
+
+ if (NULL == dma_fence_waiters) {
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to realloc the dma fence waiters.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ dma_fence_context->mali_dma_fence_waiters = dma_fence_waiters;
+
+ dma_fence_waiter = kzalloc(sizeof(struct mali_dma_fence_waiter), GFP_KERNEL);
+
+ if (NULL == dma_fence_waiter) {
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to create mali dma fence waiter.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ fence_get(fence);
+
+ dma_fence_waiter->fence = fence;
+ dma_fence_waiter->parent = dma_fence_context;
+ atomic_inc(&dma_fence_context->count);
+
+ ret = fence_add_callback(fence, &dma_fence_waiter->base,
+ mali_dma_fence_callback);
+ if (0 > ret) {
+ fence_put(fence);
+ kfree(dma_fence_waiter);
+ atomic_dec(&dma_fence_context->count);
+ if (-ENOENT == ret) {
+ /*-ENOENT if fence has already been signaled, return _MALI_OSK_ERR_OK*/
+ return _MALI_OSK_ERR_OK;
+ }
+ /* Failed to add the fence callback into fence, return _MALI_OSK_ERR_FAULT*/
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into fence.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ dma_fence_context->mali_dma_fence_waiters[dma_fence_context->num_dma_fence_waiter] = dma_fence_waiter;
+ dma_fence_context->num_dma_fence_waiter++;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+struct fence *mali_dma_fence_new(u32 context, u32 seqno)
+{
+ struct fence *fence = NULL;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+
+ if (NULL == fence) {
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to create dma fence.\n"));
+ return fence;
+ }
+
+ fence_init(fence,
+ &mali_dma_fence_ops,
+ &mali_dma_fence_lock,
+ context, seqno);
+
+ return fence;
+}
+
+void mali_dma_fence_signal_and_put(struct fence **fence)
+{
+ MALI_DEBUG_ASSERT_POINTER(fence);
+ MALI_DEBUG_ASSERT_POINTER(*fence);
+
+ fence_signal(*fence);
+ fence_put(*fence);
+ *fence = NULL;
+}
+
+void mali_dma_fence_context_init(struct mali_dma_fence_context *dma_fence_context,
+ mali_dma_fence_context_callback_func_t cb_func,
+ void *pp_job_ptr)
+{
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+
+ INIT_WORK(&dma_fence_context->work_handle, mali_dma_fence_context_work_func);
+ atomic_set(&dma_fence_context->count, 1);
+ dma_fence_context->num_dma_fence_waiter = 0;
+ dma_fence_context->mali_dma_fence_waiters = NULL;
+ dma_fence_context->cb_func = cb_func;
+ dma_fence_context->pp_job_ptr = pp_job_ptr;
+}
+
+_mali_osk_errcode_t mali_dma_fence_context_add_waiters(struct mali_dma_fence_context *dma_fence_context,
+ struct reservation_object *dma_reservation_object)
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+ struct fence *exclusive_fence = NULL;
+ u32 shared_count = 0, i;
+ struct fence **shared_fences = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+ MALI_DEBUG_ASSERT_POINTER(dma_reservation_object);
+
+ /* Get all the shared/exclusive fences in the reservation object of dma buf*/
+ ret = reservation_object_get_fences_rcu(dma_reservation_object, &exclusive_fence,
+ &shared_count, &shared_fences);
+ if (ret < 0) {
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to get shared or exclusive_fence dma fences from the reservation object of dma buf.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (exclusive_fence) {
+ ret = mali_dma_fence_add_callback(dma_fence_context, exclusive_fence);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into exclusive fence.\n"));
+ mali_dma_fence_context_cleanup(dma_fence_context);
+ goto ended;
+ }
+ }
+
+
+ for (i = 0; i < shared_count; i++) {
+ ret = mali_dma_fence_add_callback(dma_fence_context, shared_fences[i]);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into shared fence [%d].\n", i));
+ mali_dma_fence_context_cleanup(dma_fence_context);
+ break;
+ }
+ }
+
+ended:
+
+ if (exclusive_fence)
+ fence_put(exclusive_fence);
+
+ if (shared_fences) {
+ for (i = 0; i < shared_count; i++) {
+ fence_put(shared_fences[i]);
+ }
+ kfree(shared_fences);
+ }
+
+ return ret;
+}
+
+
+void mali_dma_fence_context_term(struct mali_dma_fence_context *dma_fence_context)
+{
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+ atomic_set(&dma_fence_context->count, 0);
+ if (dma_fence_context->work_handle.func) {
+ cancel_work_sync(&dma_fence_context->work_handle);
+ }
+ mali_dma_fence_context_cleanup(dma_fence_context);
+}
+
+void mali_dma_fence_context_dec_count(struct mali_dma_fence_context *dma_fence_context)
+{
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+
+ if (atomic_dec_and_test(&dma_fence_context->count))
+ schedule_work(&dma_fence_context->work_handle);
+}
+
+
+void mali_dma_fence_add_reservation_object_list(struct reservation_object *dma_reservation_object,
+ struct reservation_object **dma_reservation_object_list,
+ u32 *num_dma_reservation_object)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(dma_reservation_object);
+ MALI_DEBUG_ASSERT_POINTER(dma_reservation_object_list);
+ MALI_DEBUG_ASSERT_POINTER(num_dma_reservation_object);
+
+ for (i = 0; i < *num_dma_reservation_object; i++) {
+ if (dma_reservation_object_list[i] == dma_reservation_object)
+ return;
+ }
+
+ dma_reservation_object_list[*num_dma_reservation_object] = dma_reservation_object;
+ (*num_dma_reservation_object)++;
+}
+
+int mali_dma_fence_lock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
+ u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx)
+{
+ u32 i;
+
+ struct reservation_object *reservation_object_to_slow_lock = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(dma_reservation_object_list);
+ MALI_DEBUG_ASSERT_POINTER(ww_actx);
+
+ ww_acquire_init(ww_actx, &reservation_ww_class);
+
+again:
+ for (i = 0; i < num_dma_reservation_object; i++) {
+ int ret;
+
+ if (dma_reservation_object_list[i] == reservation_object_to_slow_lock) {
+ reservation_object_to_slow_lock = NULL;
+ continue;
+ }
+
+ ret = ww_mutex_lock(&dma_reservation_object_list[i]->lock, ww_actx);
+
+ if (ret < 0) {
+ u32 slow_lock_index = i;
+
+ /* unlock all pre locks we have already locked.*/
+ while (i > 0) {
+ i--;
+ ww_mutex_unlock(&dma_reservation_object_list[i]->lock);
+ }
+
+ if (NULL != reservation_object_to_slow_lock)
+ ww_mutex_unlock(&reservation_object_to_slow_lock->lock);
+
+ if (ret == -EDEADLK) {
+ reservation_object_to_slow_lock = dma_reservation_object_list[slow_lock_index];
+ ww_mutex_lock_slow(&reservation_object_to_slow_lock->lock, ww_actx);
+ goto again;
+ }
+ ww_acquire_fini(ww_actx);
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to lock all dma reservation objects.\n", i));
+ return ret;
+ }
+ }
+
+ ww_acquire_done(ww_actx);
+ return 0;
+}
+
+void mali_dma_fence_unlock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
+ u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx)
+{
+ u32 i;
+
+ for (i = 0; i < num_dma_reservation_object; i++)
+ ww_mutex_unlock(&dma_reservation_object_list[i]->lock);
+
+ ww_acquire_fini(ww_actx);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_dma_fence.h b/drivers/gpu/arm/utgard/linux/mali_dma_fence.h
new file mode 100644
index 000000000000..cefce98a5343
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_dma_fence.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_dma_fence.h
+ *
+ * Mali interface for Linux dma buf fence objects.
+ */
+
+#ifndef _MALI_DMA_FENCE_H_
+#define _MALI_DMA_FENCE_H_
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+#include <linux/fence.h>
+#include <linux/reservation.h>
+#endif
+
+struct mali_dma_fence_context;
+
+/* The mali dma fence context callback function */
+typedef void (*mali_dma_fence_context_callback_func_t)(void *pp_job_ptr);
+
+struct mali_dma_fence_waiter {
+ struct fence_cb base;
+ struct mali_dma_fence_context *parent;
+ struct fence *fence;
+};
+
+struct mali_dma_fence_context {
+ struct work_struct work_handle;
+ struct mali_dma_fence_waiter **mali_dma_fence_waiters;
+ u32 num_dma_fence_waiter;
+ atomic_t count;
+ void *pp_job_ptr; /* the mali pp job pointer */;
+ mali_dma_fence_context_callback_func_t cb_func;
+};
+
+/* Create a dma fence
+ * @param context The execution context this fence is run on
+ * @param seqno A linearly increasing sequence number for this context
+ * @return the new dma fence if success, or NULL on failure.
+ */
+struct fence *mali_dma_fence_new(u32 context, u32 seqno);
+
+/* Signal and put dma fence
+ * @param fence The dma fence to signal and put
+ */
+void mali_dma_fence_signal_and_put(struct fence **fence);
+
+/**
+ * Initialize a mali dma fence context for pp job.
+ * @param dma_fence_context The mali dma fence context to initialize.
+ * @param cb_func The dma fence context callback function to call when all dma fence release.
+ * @param pp_job_ptr The pp_job to call function with.
+ */
+void mali_dma_fence_context_init(struct mali_dma_fence_context *dma_fence_context,
+ mali_dma_fence_context_callback_func_t cb_func,
+ void *pp_job_ptr);
+
+/**
+ * Add new mali dma fence waiter into mali dma fence context
+ * @param dma_fence_context The mali dma fence context
+ * @param dma_reservation_object the reservation object to create new mali dma fence waiters
+ * @return _MALI_OSK_ERR_OK if success, or not.
+ */
+_mali_osk_errcode_t mali_dma_fence_context_add_waiters(struct mali_dma_fence_context *dma_fence_context,
+ struct reservation_object *dma_reservation_object);
+
+/**
+ * Release the dma fence context
+ * @param dma_fence_text The mali dma fence context.
+ */
+void mali_dma_fence_context_term(struct mali_dma_fence_context *dma_fence_context);
+
+/**
+ * Decrease the dma fence context atomic count
+ * @param dma_fence_text The mali dma fence context.
+ */
+void mali_dma_fence_context_dec_count(struct mali_dma_fence_context *dma_fence_context);
+
+/**
+ * Get all reservation object
+ * @param dma_reservation_object The reservation object to add into the reservation object list
+ * @param dma_reservation_object_list The reservation object list to store all reservation object
+ * @param num_dma_reservation_object The number of all reservation object
+ */
+void mali_dma_fence_add_reservation_object_list(struct reservation_object *dma_reservation_object,
+ struct reservation_object **dma_reservation_object_list,
+ u32 *num_dma_reservation_object);
+
+/**
+ * Wait/wound mutex lock to lock all reservation object.
+ */
+int mali_dma_fence_lock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
+ u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx);
+
+/**
+ * Wait/wound mutex lock to unlock all reservation object.
+ */
+void mali_dma_fence_unlock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
+ u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx);
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_internal_sync.c b/drivers/gpu/arm/utgard/linux/mali_internal_sync.c
new file mode 100644
index 000000000000..aaae7841cc4a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_internal_sync.c
@@ -0,0 +1,813 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2012-2016 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+ #include "mali_internal_sync.h"
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+#include <linux/ioctl.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/anon_inodes.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#if defined(DEBUG)
+#include "mali_session.h"
+#include "mali_timeline.h"
+#endif
+
+struct mali_internal_sync_merge_data {
+ s32 fd;
+ char name[32];
+ s32 fence;
+};
+
+struct mali_internal_sync_pt_info {
+ u32 len;
+ char obj_name[32];
+ char driver_name[32];
+ int status;
+ u64 timestamp_ns;
+ u8 driver_data[0];
+};
+
+struct mali_internal_sync_info_data {
+ u32 len;
+ char name[32];
+ int status;
+ u8 sync_pt_info[0];
+};
+
+/**
+ * Define the ioctl constant for sync fence wait.
+ */
+#define MALI_INTERNAL_SYNC_IOC_WAIT _IOW('>', 0, s32)
+
+/**
+ * Define the ioctl constant for sync fence merge.
+ */
+#define MALI_INTERNAL_SYNC_IOC_MERGE _IOWR('>', 1, struct mali_internal_sync_merge_data)
+
+/**
+ * Define the ioctl constant for sync fence info.
+ */
+ #define MALI_INTERNAL_SYNC_IOC_FENCE_INFO _IOWR('>', 2, struct mali_internal_sync_info_data)
+
+static const struct fence_ops fence_ops;
+static const struct file_operations sync_fence_fops;
+
+static struct mali_internal_sync_point *mali_internal_fence_to_sync_pt(struct fence *fence)
+{
+ MALI_DEBUG_ASSERT_POINTER(fence);
+ return container_of(fence, struct mali_internal_sync_point, base);
+}
+
+static inline struct mali_internal_sync_timeline *mali_internal_sync_pt_to_sync_timeline(struct mali_internal_sync_point *sync_pt)
+{
+ MALI_DEBUG_ASSERT_POINTER(sync_pt);
+ return container_of(sync_pt->base.lock, struct mali_internal_sync_timeline, sync_pt_list_lock);
+}
+
+static void mali_internal_sync_timeline_free(struct kref *kref_count)
+{
+ struct mali_internal_sync_timeline *sync_timeline;
+
+ MALI_DEBUG_ASSERT_POINTER(kref_count);
+
+ sync_timeline = container_of(kref_count, struct mali_internal_sync_timeline, kref_count);
+
+ if (sync_timeline->ops->release_obj)
+ sync_timeline->ops->release_obj(sync_timeline);
+
+ kfree(sync_timeline);
+}
+
+static struct mali_internal_sync_fence *mali_internal_sync_fence_alloc(int size)
+{
+ struct mali_internal_sync_fence *sync_fence = NULL;
+
+ sync_fence = kzalloc(size, GFP_KERNEL);
+ if (NULL == sync_fence) {
+ MALI_PRINT_ERROR(("Mali internal sync: Failed to allocate buffer for the mali internal sync fence.\n"));
+ goto err;
+ }
+
+ sync_fence->file = anon_inode_getfile("mali_sync_fence", &sync_fence_fops, sync_fence, 0);
+ if (IS_ERR(sync_fence->file)) {
+ MALI_PRINT_ERROR(("Mali internal sync: Failed to get file for the mali internal sync fence: err %d.\n", IS_ERR(sync_fence->file)));
+ goto err;
+ }
+
+ kref_init(&sync_fence->kref_count);
+ init_waitqueue_head(&sync_fence->wq);
+
+ return sync_fence;
+
+err:
+ if (NULL != sync_fence) {
+ kfree(sync_fence);
+ }
+ return NULL;
+}
+
+static void mali_internal_fence_check_cb_func(struct fence *fence, struct fence_cb *cb)
+{
+ struct mali_internal_sync_fence_cb *check;
+ struct mali_internal_sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(cb);
+ MALI_IGNORE(fence);
+
+ check = container_of(cb, struct mali_internal_sync_fence_cb, cb);
+ sync_fence = check->sync_fence;
+
+ if (atomic_dec_and_test(&sync_fence->status))
+ wake_up_all(&sync_fence->wq);
+}
+
+static void mali_internal_sync_fence_add_fence(struct mali_internal_sync_fence *sync_fence, struct fence *sync_pt)
+{
+ int fence_num = 0;
+ MALI_DEBUG_ASSERT_POINTER(sync_fence);
+ MALI_DEBUG_ASSERT_POINTER(sync_pt);
+
+ fence_num = atomic_read(&sync_fence->num_fences);
+
+ sync_fence->cbs[fence_num].base = sync_pt;
+ sync_fence->cbs[fence_num].sync_fence = sync_fence;
+
+ if (!fence_add_callback(sync_pt, &sync_fence->cbs[fence_num].cb, mali_internal_fence_check_cb_func)) {
+ fence_get(sync_pt);
+ atomic_inc(&sync_fence->num_fences);
+ atomic_inc(&sync_fence->status);
+ }
+}
+
+static int mali_internal_sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
+ int wake_flags, void *key)
+{
+ struct mali_internal_sync_fence_waiter *wait;
+ MALI_IGNORE(mode);
+ MALI_IGNORE(wake_flags);
+ MALI_IGNORE(key);
+
+ wait = container_of(curr, struct mali_internal_sync_fence_waiter, work);
+ list_del_init(&wait->work.task_list);
+
+ wait->callback(wait->work.private, wait);
+ return 1;
+}
+
+struct mali_internal_sync_timeline *mali_internal_sync_timeline_create(const struct mali_internal_sync_timeline_ops *ops,
+ int size, const char *name)
+{
+ struct mali_internal_sync_timeline *sync_timeline = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(ops);
+
+ if (size < sizeof(struct mali_internal_sync_timeline)) {
+ MALI_PRINT_ERROR(("Mali internal sync:Invalid size to create the mali internal sync timeline.\n"));
+ goto err;
+ }
+
+ sync_timeline = kzalloc(size, GFP_KERNEL);
+ if (NULL == sync_timeline) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to allocate buffer for the mali internal sync timeline.\n"));
+ goto err;
+ }
+ kref_init(&sync_timeline->kref_count);
+ sync_timeline->ops = ops;
+ sync_timeline->fence_context = fence_context_alloc(1);
+ strlcpy(sync_timeline->name, name, sizeof(sync_timeline->name));
+
+ INIT_LIST_HEAD(&sync_timeline->sync_pt_list_head);
+ spin_lock_init(&sync_timeline->sync_pt_list_lock);
+
+ return sync_timeline;
+err:
+ if (NULL != sync_timeline) {
+ kfree(sync_timeline);
+ }
+ return NULL;
+}
+
+void mali_internal_sync_timeline_destroy(struct mali_internal_sync_timeline *sync_timeline)
+{
+ MALI_DEBUG_ASSERT_POINTER(sync_timeline);
+
+ sync_timeline->destroyed = MALI_TRUE;
+
+ smp_wmb();
+
+ mali_internal_sync_timeline_signal(sync_timeline);
+ kref_put(&sync_timeline->kref_count, mali_internal_sync_timeline_free);
+}
+
+void mali_internal_sync_timeline_signal(struct mali_internal_sync_timeline *sync_timeline)
+{
+ unsigned long flags;
+ struct mali_internal_sync_point *sync_pt, *next;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_timeline);
+
+ spin_lock_irqsave(&sync_timeline->sync_pt_list_lock, flags);
+
+ list_for_each_entry_safe(sync_pt, next, &sync_timeline->sync_pt_list_head,
+ sync_pt_list) {
+ if (fence_is_signaled_locked(&sync_pt->base))
+ list_del_init(&sync_pt->sync_pt_list);
+ }
+
+ spin_unlock_irqrestore(&sync_timeline->sync_pt_list_lock, flags);
+}
+
+struct mali_internal_sync_point *mali_internal_sync_point_create(struct mali_internal_sync_timeline *sync_timeline, int size)
+{
+ unsigned long flags;
+ struct mali_internal_sync_point *sync_pt = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_timeline);
+
+ if (size < sizeof(struct mali_internal_sync_point)) {
+ MALI_PRINT_ERROR(("Mali internal sync:Invalid size to create the mali internal sync point.\n"));
+ goto err;
+ }
+
+ sync_pt = kzalloc(size, GFP_KERNEL);
+ if (NULL == sync_pt) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to allocate buffer for the mali internal sync point.\n"));
+ goto err;
+ }
+ spin_lock_irqsave(&sync_timeline->sync_pt_list_lock, flags);
+ kref_get(&sync_timeline->kref_count);
+ fence_init(&sync_pt->base, &fence_ops, &sync_timeline->sync_pt_list_lock,
+ sync_timeline->fence_context, ++sync_timeline->value);
+ INIT_LIST_HEAD(&sync_pt->sync_pt_list);
+ spin_unlock_irqrestore(&sync_timeline->sync_pt_list_lock, flags);
+
+ return sync_pt;
+err:
+ if (NULL != sync_pt) {
+ kfree(sync_pt);
+ }
+ return NULL;
+}
+
+struct mali_internal_sync_fence *mali_internal_sync_fence_create(struct mali_internal_sync_point *sync_pt)
+{
+ struct mali_internal_sync_fence *sync_fence = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_pt);
+
+ sync_fence = mali_internal_sync_fence_alloc(offsetof(struct mali_internal_sync_fence, cbs[1]));
+ if (NULL == sync_fence) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to create the mali internal sync fence.\n"));
+ return NULL;
+ }
+
+ atomic_set(&sync_fence->num_fences, 1);
+ atomic_set(&sync_fence->status, 1);
+
+ sync_fence->cbs[0].base = &sync_pt->base;
+ sync_fence->cbs[0].sync_fence = sync_fence;
+ if (fence_add_callback(&sync_pt->base, &sync_fence->cbs[0].cb,
+ mali_internal_fence_check_cb_func))
+ atomic_dec(&sync_fence->status);
+
+ return sync_fence;
+}
+
+struct mali_internal_sync_fence *mali_internal_sync_fence_fdget(int fd)
+{
+ struct file *file = fget(fd);
+
+ if (NULL == file) {
+ return NULL;
+ }
+
+ return file->private_data;
+}
+
+struct mali_internal_sync_fence *mali_internal_sync_fence_merge(
+ struct mali_internal_sync_fence *sync_fence1, struct mali_internal_sync_fence *sync_fence2)
+{
+ struct mali_internal_sync_fence *new_sync_fence;
+ int i, j, num_fence1, num_fence2, total_fences;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+ MALI_DEBUG_ASSERT_POINTER(sync_fence2);
+
+ num_fence1 = atomic_read(&sync_fence1->num_fences);
+ num_fence2= atomic_read(&sync_fence2->num_fences);
+
+ total_fences = num_fence1 + num_fence2;
+
+ new_sync_fence = mali_internal_sync_fence_alloc(offsetof(struct mali_internal_sync_fence, cbs[total_fences]));
+ if (NULL == new_sync_fence) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to create the mali internal sync fence when merging sync fence.\n"));
+ return NULL;
+ }
+
+ for (i = j = 0; i < num_fence1 && j < num_fence2; ) {
+ struct fence *fence1 = sync_fence1->cbs[i].base;
+ struct fence *fence2 = sync_fence2->cbs[j].base;
+
+ if (fence1->context < fence2->context) {
+ mali_internal_sync_fence_add_fence(new_sync_fence, fence1);
+
+ i++;
+ } else if (fence1->context > fence2->context) {
+ mali_internal_sync_fence_add_fence(new_sync_fence, fence2);
+
+ j++;
+ } else {
+ if (fence1->seqno - fence2->seqno <= INT_MAX)
+ mali_internal_sync_fence_add_fence(new_sync_fence, fence1);
+ else
+ mali_internal_sync_fence_add_fence(new_sync_fence, fence2);
+ i++;
+ j++;
+ }
+ }
+
+ for (; i < num_fence1; i++)
+ mali_internal_sync_fence_add_fence(new_sync_fence, sync_fence1->cbs[i].base);
+
+ for (; j < num_fence2; j++)
+ mali_internal_sync_fence_add_fence(new_sync_fence, sync_fence2->cbs[j].base);
+
+ return new_sync_fence;
+}
+
+void mali_internal_sync_fence_waiter_init(struct mali_internal_sync_fence_waiter *waiter,
+ mali_internal_sync_callback_t callback)
+{
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+ MALI_DEBUG_ASSERT_POINTER(callback);
+
+ INIT_LIST_HEAD(&waiter->work.task_list);
+ waiter->callback = callback;
+}
+
+int mali_internal_sync_fence_wait_async(struct mali_internal_sync_fence *sync_fence,
+ struct mali_internal_sync_fence_waiter *waiter)
+{
+ int err;
+ unsigned long flags;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence);
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+
+ err = atomic_read(&sync_fence->status);
+
+ if (0 > err)
+ return err;
+
+ if (!err)
+ return 1;
+
+ init_waitqueue_func_entry(&waiter->work, mali_internal_sync_fence_wake_up_wq);
+ waiter->work.private = sync_fence;
+
+ spin_lock_irqsave(&sync_fence->wq.lock, flags);
+ err = atomic_read(&sync_fence->status);
+ if (0 < err)
+ __add_wait_queue_tail(&sync_fence->wq, &waiter->work);
+ spin_unlock_irqrestore(&sync_fence->wq.lock, flags);
+
+ if (0 > err)
+ return err;
+
+ return !err;
+}
+
+int mali_internal_sync_fence_cancel_async(struct mali_internal_sync_fence *sync_fence,
+ struct mali_internal_sync_fence_waiter *waiter)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence);
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+
+ spin_lock_irqsave(&sync_fence->wq.lock, flags);
+ if (!list_empty(&waiter->work.task_list))
+ list_del_init(&waiter->work.task_list);
+ else
+ ret = -ENOENT;
+ spin_unlock_irqrestore(&sync_fence->wq.lock, flags);
+
+ return ret;
+}
+
+#if defined(DEBUG)
+static void mali_internal_sync_timeline_show(void)
+{
+ struct mali_session_data *session, *tmp;
+ u32 session_seq = 1;
+ MALI_DEBUG_PRINT(2, ("timeline system info: \n=================\n\n"));
+
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ MALI_DEBUG_PRINT(2, ("session %d <%p> start:\n", session_seq, session));
+ mali_timeline_debug_print_system(session->timeline_system, NULL);
+ MALI_DEBUG_PRINT(2, ("session %d end\n\n\n", session_seq++));
+ }
+ mali_session_unlock();
+}
+#endif
+static int mali_internal_sync_fence_wait(struct mali_internal_sync_fence *sync_fence, long timeout)
+{
+ long ret;
+ MALI_DEBUG_ASSERT_POINTER(sync_fence);
+
+ if (0 > timeout)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = msecs_to_jiffies(timeout);
+
+ ret = wait_event_interruptible_timeout(sync_fence->wq,
+ atomic_read(&sync_fence->status) <= 0, timeout);
+
+ if (0 > ret) {
+ return ret;
+ } else if (ret == 0) {
+ if (timeout) {
+ int i;
+ MALI_DEBUG_PRINT(2, ("Mali internal sync:fence timeout on [%p] after %dms\n",
+ sync_fence, jiffies_to_msecs(timeout)));
+
+ for (i = 0; i < atomic_read(&sync_fence->num_fences); ++i) {
+ sync_fence->cbs[i].base->ops->fence_value_str(sync_fence->cbs[i].base, NULL, 0);
+ }
+
+#if defined(DEBUG)
+ mali_internal_sync_timeline_show();
+#endif
+
+ }
+ return -ETIME;
+ }
+
+ ret = atomic_read(&sync_fence->status);
+ if (ret) {
+ int i;
+ MALI_DEBUG_PRINT(2, ("fence error %ld on [%p]\n", ret, sync_fence));
+ for (i = 0; i < atomic_read(&sync_fence->num_fences); ++i) {
+ sync_fence->cbs[i].base->ops->fence_value_str(sync_fence->cbs[i].base, NULL, 0);
+ }
+#if defined(DEBUG)
+ mali_internal_sync_timeline_show();
+#endif
+ }
+ return ret;
+}
+
+static const char *mali_internal_fence_get_driver_name(struct fence *fence)
+{
+ struct mali_internal_sync_point *sync_pt;
+ struct mali_internal_sync_timeline *parent;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ sync_pt = mali_internal_fence_to_sync_pt(fence);
+ parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
+
+ return parent->ops->driver_name;
+}
+
+static const char *mali_internal_fence_get_timeline_name(struct fence *fence)
+{
+ struct mali_internal_sync_point *sync_pt;
+ struct mali_internal_sync_timeline *parent;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ sync_pt = mali_internal_fence_to_sync_pt(fence);
+ parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
+
+ return parent->name;
+}
+
+static void mali_internal_fence_release(struct fence *fence)
+{
+ unsigned long flags;
+ struct mali_internal_sync_point *sync_pt;
+ struct mali_internal_sync_timeline *parent;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ sync_pt = mali_internal_fence_to_sync_pt(fence);
+ parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
+
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (WARN_ON_ONCE(!list_empty(&sync_pt->sync_pt_list)))
+ list_del(&sync_pt->sync_pt_list);
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ if (parent->ops->free_pt)
+ parent->ops->free_pt(sync_pt);
+
+ kref_put(&parent->kref_count, mali_internal_sync_timeline_free);
+ fence_free(&sync_pt->base);
+}
+
+static bool mali_internal_fence_signaled(struct fence *fence)
+{
+ int ret;
+ struct mali_internal_sync_point *sync_pt;
+ struct mali_internal_sync_timeline *parent;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ sync_pt = mali_internal_fence_to_sync_pt(fence);
+ parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
+
+ ret = parent->ops->has_signaled(sync_pt);
+ if (0 > ret)
+ fence->error = ret;
+ return ret;
+}
+
+static bool mali_internal_fence_enable_signaling(struct fence *fence)
+{
+ struct mali_internal_sync_point *sync_pt;
+ struct mali_internal_sync_timeline *parent;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ sync_pt = mali_internal_fence_to_sync_pt(fence);
+ parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
+
+ if (mali_internal_fence_signaled(fence))
+ return false;
+
+ list_add_tail(&sync_pt->sync_pt_list, &parent->sync_pt_list_head);
+ return true;
+}
+
+static void mali_internal_fence_value_str(struct fence *fence,
+ char *str, int size)
+{
+ struct mali_internal_sync_point *sync_pt;
+ struct mali_internal_sync_timeline *parent;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+ MALI_IGNORE(str);
+ MALI_IGNORE(size);
+
+ sync_pt = mali_internal_fence_to_sync_pt(fence);
+ parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
+
+ parent->ops->print_sync_pt(sync_pt);
+}
+
+static const struct fence_ops fence_ops = {
+ .get_driver_name = mali_internal_fence_get_driver_name,
+ .get_timeline_name = mali_internal_fence_get_timeline_name,
+ .enable_signaling = mali_internal_fence_enable_signaling,
+ .signaled = mali_internal_fence_signaled,
+ .wait = fence_default_wait,
+ .release = mali_internal_fence_release,
+ .fence_value_str = mali_internal_fence_value_str,
+};
+
+static void mali_internal_sync_fence_free(struct kref *kref_count)
+{
+ struct mali_internal_sync_fence *sync_fence;
+ int i, num_fences;
+
+ MALI_DEBUG_ASSERT_POINTER(kref_count);
+
+ sync_fence = container_of(kref_count, struct mali_internal_sync_fence, kref_count);
+ num_fences = atomic_read(&sync_fence->num_fences);
+
+ for (i = 0; i <num_fences; ++i) {
+ fence_remove_callback(sync_fence->cbs[i].base, &sync_fence->cbs[i].cb);
+ fence_put(sync_fence->cbs[i].base);
+ }
+
+ kfree(sync_fence);
+}
+
+static int mali_internal_sync_fence_release(struct inode *inode, struct file *file)
+{
+ struct mali_internal_sync_fence *sync_fence;
+ MALI_IGNORE(inode);
+ MALI_DEBUG_ASSERT_POINTER(file);
+ sync_fence = file->private_data;
+ kref_put(&sync_fence->kref_count, mali_internal_sync_fence_free);
+ return 0;
+}
+
+static unsigned int mali_internal_sync_fence_poll(struct file *file, poll_table *wait)
+{
+ int status;
+ struct mali_internal_sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(file);
+ MALI_DEBUG_ASSERT_POINTER(wait);
+
+ sync_fence = file->private_data;
+ poll_wait(file, &sync_fence->wq, wait);
+ status = atomic_read(&sync_fence->status);
+
+ if (!status)
+ return POLLIN;
+ else if (status < 0)
+ return POLLERR;
+ return 0;
+}
+
+static long mali_internal_sync_fence_ioctl_wait(struct mali_internal_sync_fence *sync_fence, unsigned long arg)
+{
+ s32 value;
+ MALI_DEBUG_ASSERT_POINTER(sync_fence);
+
+ if (copy_from_user(&value, (void __user *)arg, sizeof(value))) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to copy from user when sync fence ioctl wait.\n"));
+ return -EFAULT;
+ }
+ return mali_internal_sync_fence_wait(sync_fence, value);
+}
+
+static long mali_internal_sync_fence_ioctl_merge(struct mali_internal_sync_fence *old_sync_fence1, unsigned long arg)
+{
+ int err;
+ struct mali_internal_sync_fence *old_sync_fence2, *new_sync_fence;
+ struct mali_internal_sync_merge_data data;
+ int fd;
+
+ MALI_DEBUG_ASSERT_POINTER(old_sync_fence1);
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+
+ if (0 > fd) {
+ MALI_PRINT_ERROR(("Mali internal sync:Invaid fd when sync fence ioctl merge.\n"));
+ return fd;
+ }
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to copy from user when sync fence ioctl merge.\n"));
+ err = -EFAULT;
+ goto copy_from_user_failed;
+ }
+
+ old_sync_fence2 = mali_internal_sync_fence_fdget(data.fd);
+ if (NULL == old_sync_fence2) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to sync fence fdget when sync fence ioctl merge.\n"));
+ err = -ENOENT;
+ goto sync_fence_fdget_failed;
+ }
+
+ new_sync_fence = mali_internal_sync_fence_merge(old_sync_fence1, old_sync_fence2);
+ if (NULL == new_sync_fence) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to sync fence merge when sync fence ioctl merge.\n"));
+ err = -ENOMEM;
+ goto sync_fence_merge_failed;
+ }
+
+ data.fence = fd;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to copy to user when sync fence ioctl merge.\n"));
+ err = -EFAULT;
+ goto copy_to_user_failed;
+ }
+
+ fd_install(fd, new_sync_fence->file);
+ fput(old_sync_fence2->file);
+ return 0;
+
+copy_to_user_failed:
+ fput(new_sync_fence->file);
+sync_fence_merge_failed:
+ fput(old_sync_fence2->file);
+sync_fence_fdget_failed:
+copy_from_user_failed:
+ put_unused_fd(fd);
+ return err;
+}
+
+static long mali_internal_sync_fence_ioctl_fence_info(struct mali_internal_sync_fence *sync_fence, unsigned long arg)
+{
+ struct mali_internal_sync_info_data *sync_info_data;
+ u32 size;
+ char name[32] = "mali_internal_fence";
+ u32 len = sizeof(struct mali_internal_sync_info_data);
+ int num_fences, err, i;
+
+ if (copy_from_user(&size, (void __user *)arg, sizeof(size))) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to copy from user when sync fence ioctl fence data info.\n"));
+ err = -EFAULT;
+ goto copy_from_user_failed;
+ }
+
+ if (size < sizeof(struct mali_internal_sync_info_data)) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to data size check when sync fence ioctl fence data info.\n"));
+ err= -EINVAL;
+ goto data_size_check_failed;
+ }
+
+ if (size > 4096)
+ size = 4096;
+
+ sync_info_data = kzalloc(size, GFP_KERNEL);
+ if (sync_info_data == NULL) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to allocate buffer when sync fence ioctl fence data info.\n"));
+ err = -ENOMEM;
+ goto allocate_buffer_failed;
+ }
+
+ strlcpy(sync_info_data->name, name, sizeof(sync_info_data->name));
+
+ sync_info_data->status = atomic_read(&sync_fence->status);
+ if (sync_info_data->status >= 0)
+ sync_info_data->status = !sync_info_data->status;
+
+ num_fences = atomic_read(&sync_fence->num_fences);
+
+ for (i = 0; i < num_fences; ++i) {
+ struct mali_internal_sync_pt_info *sync_pt_info = NULL;
+ struct fence *base = sync_fence->cbs[i].base;
+
+ if ((size - len) < sizeof(struct mali_internal_sync_pt_info)) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to fence size check when sync fence ioctl fence data info.\n"));
+ err = -ENOMEM;
+ goto fence_size_check_failed;
+
+ }
+
+ sync_pt_info = (struct mali_internal_sync_pt_info *)((u8 *)sync_info_data + len);
+ sync_pt_info->len = sizeof(struct mali_internal_sync_pt_info);
+
+ strlcpy(sync_pt_info->obj_name, base->ops->get_timeline_name(base), sizeof(sync_pt_info->obj_name));
+ strlcpy(sync_pt_info->driver_name, base->ops->get_driver_name(base), sizeof(sync_pt_info->driver_name));
+
+ if (fence_is_signaled(base))
+ sync_pt_info->status = base->error >= 0 ? 1 : base->error;
+ else
+ sync_pt_info->status = 0;
+
+ sync_pt_info->timestamp_ns = ktime_to_ns(base->timestamp);
+
+ len += sync_pt_info->len;
+ }
+
+ sync_info_data->len = len;
+
+ if (copy_to_user((void __user *)arg, sync_info_data, len)) {
+ MALI_PRINT_ERROR(("Mali internal sync:Failed to copy to user when sync fence ioctl fence data info.\n"));
+ err = -EFAULT;
+ goto copy_to_user_failed;
+ }
+
+ err = 0;
+
+copy_to_user_failed:
+fence_size_check_failed:
+ kfree(sync_info_data);
+allocate_buffer_failed:
+data_size_check_failed:
+copy_from_user_failed:
+ return err;
+}
+
+static long mali_internal_sync_fence_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mali_internal_sync_fence *sync_fence = file->private_data;
+
+ switch (cmd) {
+ case MALI_INTERNAL_SYNC_IOC_WAIT:
+ return mali_internal_sync_fence_ioctl_wait(sync_fence, arg);
+
+ case MALI_INTERNAL_SYNC_IOC_MERGE:
+ return mali_internal_sync_fence_ioctl_merge(sync_fence, arg);
+
+ case MALI_INTERNAL_SYNC_IOC_FENCE_INFO:
+ return mali_internal_sync_fence_ioctl_fence_info(sync_fence, arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations sync_fence_fops = {
+ .release = mali_internal_sync_fence_release,
+ .poll = mali_internal_sync_fence_poll,
+ .unlocked_ioctl = mali_internal_sync_fence_ioctl,
+ .compat_ioctl = mali_internal_sync_fence_ioctl,
+};
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_internal_sync.h b/drivers/gpu/arm/utgard/linux/mali_internal_sync.h
new file mode 100644
index 000000000000..37673ae47d4f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_internal_sync.h
@@ -0,0 +1,144 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+/**
+ * @file mali_internal_sync.h
+ *
+ * Mali internal structure/interface for sync.
+ */
+
+#ifndef _MALI_INTERNAL_SYNC_H
+#define _MALI_INTERNAL_SYNC_H
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/fence.h>
+
+struct mali_internal_sync_timeline;
+struct mali_internal_sync_point;
+struct mali_internal_sync_fence;
+
+struct mali_internal_sync_timeline_ops {
+ const char *driver_name;
+ int (*has_signaled)(struct mali_internal_sync_point *pt);
+ void (*free_pt)(struct mali_internal_sync_point *sync_pt);
+ void (*release_obj)(struct mali_internal_sync_timeline *sync_timeline);
+ void (*print_sync_pt)(struct mali_internal_sync_point *sync_pt);
+};
+
+struct mali_internal_sync_timeline {
+ struct kref kref_count;
+ const struct mali_internal_sync_timeline_ops *ops;
+ char name[32];
+ bool destroyed;
+ int fence_context;
+ int value;
+ spinlock_t sync_pt_list_lock;
+ struct list_head sync_pt_list_head;
+};
+
+struct mali_internal_sync_point {
+ struct fence base;
+ struct list_head sync_pt_list;
+};
+
+struct mali_internal_sync_fence_cb {
+ struct fence_cb cb;
+ struct fence *base;
+ struct mali_internal_sync_fence *sync_fence;
+};
+
+struct mali_internal_sync_fence {
+ struct file *file;
+ struct kref kref_count;
+ atomic_t num_fences;
+ wait_queue_head_t wq;
+ atomic_t status;
+ struct mali_internal_sync_fence_cb cbs[];
+};
+
+struct mali_internal_sync_fence_waiter;
+
+typedef void (*mali_internal_sync_callback_t)(struct mali_internal_sync_fence *sync_fence,
+ struct mali_internal_sync_fence_waiter *waiter);
+
+struct mali_internal_sync_fence_waiter {
+ wait_queue_t work;
+ mali_internal_sync_callback_t callback;
+};
+
+/**
+ * Create a mali internal sync timeline.
+ * @param ops The implementation ops for the mali internal sync timeline
+ * @param size The size to allocate
+ * @param name The sync_timeline name
+ * @return The new mali internal sync timeline if successful, NULL if not.
+ */
+struct mali_internal_sync_timeline *mali_internal_sync_timeline_create(const struct mali_internal_sync_timeline_ops *ops,
+ int size, const char *name);
+
+/**
+ * Destroy one mali internal sync timeline.
+ * @param sync_timeline The mali internal sync timeline to destroy.
+ */
+void mali_internal_sync_timeline_destroy(struct mali_internal_sync_timeline *sync_timeline);
+
+/**
+ * Signal one mali internal sync timeline.
+ * @param sync_timeline The mali internal sync timeline to signal.
+ */
+void mali_internal_sync_timeline_signal(struct mali_internal_sync_timeline *sync_timeline);
+
+/**
+ * Create one mali internal sync point.
+ * @param sync_timeline The mali internal sync timeline to add this mali internal sync point.
+ * @return the new mali internal sync point if successful, NULL if not.
+ */
+struct mali_internal_sync_point *mali_internal_sync_point_create(struct mali_internal_sync_timeline *sync_timeline, int size);
+
+/**
+ * Create a mali internal sync fence
+ * @param sync_pt The mali internel sync point to add
+ * @return the mali internal sync fence if successful, NULL if not.
+ */
+struct mali_internal_sync_fence *mali_internal_sync_fence_create(struct mali_internal_sync_point *sync_pt);
+
+/**
+ * Merge mali internal sync fences
+ * @param sync_fence1 The mali internal sync fence to merge
+ * @param sync_fence2 The mali internal sync fence to merge
+ * @return the new mali internal sync fence if successful, NULL if not.
+ */
+struct mali_internal_sync_fence *mali_internal_sync_fence_merge(struct mali_internal_sync_fence *sync_fence1,
+ struct mali_internal_sync_fence *sync_fence2);
+
+/**
+ * Get the mali internal sync fence from sync fd
+ * @param fd The sync handle to get the mali internal sync fence
+ * @return the mali internal sync fence if successful, NULL if not.
+ */
+struct mali_internal_sync_fence *mali_internal_sync_fence_fdget(int fd);
+
+
+void mali_internal_sync_fence_waiter_init(struct mali_internal_sync_fence_waiter *waiter,
+ mali_internal_sync_callback_t callback);
+
+int mali_internal_sync_fence_wait_async(struct mali_internal_sync_fence *sync_fence,
+ struct mali_internal_sync_fence_waiter *waiter);
+
+int mali_internal_sync_fence_cancel_async(struct mali_internal_sync_fence *sync_fence,
+ struct mali_internal_sync_fence_waiter *waiter);
+
+#endif /*LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)*/
+#endif /* _MALI_INTERNAL_SYNC_H */
diff --git a/drivers/gpu/arm/utgard/linux/mali_kernel_linux.c b/drivers/gpu/arm/utgard/linux/mali_kernel_linux.c
new file mode 100644
index 000000000000..d7893a3200cb
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_kernel_linux.c
@@ -0,0 +1,1134 @@
+/**
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_linux.c
+ * Implementation of the Linux device driver entrypoints
+ */
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/mm.h> /* memory manager definitions */
+#include <linux/mali/mali_utgard_ioctl.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include "mali_kernel_license.h"
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/bug.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_kernel_sysfs.h"
+#include "mali_pm.h"
+#include "mali_kernel_license.h"
+#include "mali_memory.h"
+#include "mali_memory_dma_buf.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_swap_alloc.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+#include "mali_osk_profiling.h"
+#include "mali_dvfs_policy.h"
+
+static int is_first_resume = 1;
+/*Store the clk and vol for boot/insmod and mali_resume*/
+static struct mali_gpu_clk_item mali_gpu_clk[2];
+#endif
+
+/* Streamline support for the Mali driver */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_MALI400_PROFILING)
+/* Ask Linux to create the tracepoints */
+#define CREATE_TRACE_POINTS
+#include "mali_linux_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_event);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_hw_counter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counters);
+#endif /* CONFIG_TRACEPOINTS */
+
+#ifdef CONFIG_MALI_DEVFREQ
+#include "mali_devfreq.h"
+#include "mali_osk_mali.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+#include <linux/pm_opp.h>
+#else
+/* In 3.13 the OPP include header file, types, and functions were all
+ * renamed. Use the old filename for the include, and define the new names to
+ * the old, when an old kernel is detected.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+#include <linux/pm_opp.h>
+#else
+#include <linux/opp.h>
+#endif /* Linux >= 3.13*/
+#define dev_pm_opp_of_add_table of_init_opp_table
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
+#define dev_pm_opp_of_remove_table of_free_opp_table
+#endif /* Linux >= 3.19 */
+#endif /* Linux >= 4.4.0 */
+#endif
+
+/* from the __malidrv_build_info.c file that is generated during build */
+extern const char *__malidrv_build_info(void);
+
+/* Module parameter to control log level */
+int mali_debug_level = 2;
+module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output");
+
+extern int mali_max_job_runtime;
+module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what");
+
+extern int mali_l2_max_reads;
+module_param(mali_l2_max_reads, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache");
+
+extern unsigned int mali_dedicated_mem_start;
+module_param(mali_dedicated_mem_start, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_start, "Physical start address of dedicated Mali GPU memory.");
+
+extern unsigned int mali_dedicated_mem_size;
+module_param(mali_dedicated_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_size, "Size of dedicated Mali GPU memory.");
+
+extern unsigned int mali_shared_mem_size;
+module_param(mali_shared_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_shared_mem_size, "Size of shared Mali GPU memory.");
+
+#if defined(CONFIG_MALI400_PROFILING)
+extern int mali_boot_profiling;
+module_param(mali_boot_profiling, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_boot_profiling, "Start profiling as a part of Mali driver initialization");
+#endif
+
+extern int mali_max_pp_cores_group_1;
+module_param(mali_max_pp_cores_group_1, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_1, "Limit the number of PP cores to use from first PP group.");
+
+extern int mali_max_pp_cores_group_2;
+module_param(mali_max_pp_cores_group_2, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_2, "Limit the number of PP cores to use from second PP group (Mali-450 only).");
+
+extern unsigned int mali_mem_swap_out_threshold_value;
+module_param(mali_mem_swap_out_threshold_value, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_mem_swap_out_threshold_value, "Threshold value used to limit how much swappable memory cached in Mali driver.");
+
+#if defined(CONFIG_MALI_DVFS)
+/** the max fps the same as display vsync default 60, can set by module insert parameter */
+extern int mali_max_system_fps;
+module_param(mali_max_system_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_system_fps, "Max system fps the same as display VSYNC.");
+
+/** a lower limit on their desired FPS default 58, can set by module insert parameter*/
+extern int mali_desired_fps;
+module_param(mali_desired_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_desired_fps, "A bit lower than max_system_fps which user desired fps");
+#endif
+
+#if MALI_ENABLE_CPU_CYCLES
+#include <linux/cpumask.h>
+#include <linux/timer.h>
+#include <asm/smp.h>
+static struct timer_list mali_init_cpu_clock_timers[8];
+static u32 mali_cpu_clock_last_value[8] = {0,};
+#endif
+
+/* Export symbols from common code: mali_user_settings.c */
+#include "mali_user_settings_db.h"
+EXPORT_SYMBOL(mali_set_user_setting);
+EXPORT_SYMBOL(mali_get_user_setting);
+
+static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */
+
+/* This driver only supports one Mali device, and this variable stores this single platform device */
+struct platform_device *mali_platform_device = NULL;
+
+/* This driver only supports one Mali device, and this variable stores the exposed misc device (/dev/mali) */
+static struct miscdevice mali_miscdevice = { 0, };
+
+static int mali_miscdevice_register(struct platform_device *pdev);
+static void mali_miscdevice_unregister(void);
+
+static int mali_open(struct inode *inode, struct file *filp);
+static int mali_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+static int mali_probe(struct platform_device *pdev);
+static int mali_remove(struct platform_device *pdev);
+
+static int mali_driver_suspend_scheduler(struct device *dev);
+static int mali_driver_resume_scheduler(struct device *dev);
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev);
+static int mali_driver_runtime_resume(struct device *dev);
+static int mali_driver_runtime_idle(struct device *dev);
+#endif
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+#if defined(CONFIG_MALI_DT)
+extern int mali_platform_device_init(struct platform_device *device);
+extern int mali_platform_device_deinit(struct platform_device *device);
+#else
+extern int mali_platform_device_register(void);
+extern int mali_platform_device_unregister(void);
+#endif
+#endif
+
+/* Linux power management operations provided by the Mali device driver */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
+struct pm_ext_ops mali_dev_ext_pm_ops = {
+ .base =
+ {
+ .suspend = mali_driver_suspend_scheduler,
+ .resume = mali_driver_resume_scheduler,
+ .freeze = mali_driver_suspend_scheduler,
+ .thaw = mali_driver_resume_scheduler,
+ },
+};
+#else
+static const struct dev_pm_ops mali_dev_pm_ops = {
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = mali_driver_runtime_suspend,
+ .runtime_resume = mali_driver_runtime_resume,
+ .runtime_idle = mali_driver_runtime_idle,
+#endif
+ .suspend = mali_driver_suspend_scheduler,
+ .resume = mali_driver_resume_scheduler,
+ .freeze = mali_driver_suspend_scheduler,
+ .thaw = mali_driver_resume_scheduler,
+ .poweroff = mali_driver_suspend_scheduler,
+};
+#endif
+
+#ifdef CONFIG_MALI_DT
+static struct of_device_id base_dt_ids[] = {
+ {.compatible = "arm,mali-300"},
+ {.compatible = "arm,mali-400"},
+ {.compatible = "arm,mali-450"},
+ {.compatible = "arm,mali-470"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, base_dt_ids);
+#endif
+
+/* The Mali device driver struct */
+static struct platform_driver mali_platform_driver = {
+ .probe = mali_probe,
+ .remove = mali_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
+ .pm = &mali_dev_ext_pm_ops,
+#endif
+ .driver =
+ {
+ .name = MALI_GPU_NAME_UTGARD,
+ .owner = THIS_MODULE,
+ .bus = &platform_bus_type,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
+ .pm = &mali_dev_pm_ops,
+#endif
+#ifdef CONFIG_MALI_DT
+ .of_match_table = of_match_ptr(base_dt_ids),
+#endif
+ },
+};
+
+/* Linux misc device operations (/dev/mali) */
+struct file_operations mali_fops = {
+ .owner = THIS_MODULE,
+ .open = mali_open,
+ .release = mali_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = mali_ioctl,
+#else
+ .ioctl = mali_ioctl,
+#endif
+ .compat_ioctl = mali_ioctl,
+ .mmap = mali_mmap
+};
+
+#if MALI_ENABLE_CPU_CYCLES
+void mali_init_cpu_time_counters(int reset, int enable_divide_by_64)
+{
+ /* The CPU assembly reference used is: ARM Architecture Reference Manual ARMv7-AR C.b */
+ u32 write_value;
+
+ /* See B4.1.116 PMCNTENSET, Performance Monitors Count Enable Set register, VMSA */
+ /* setting p15 c9 c12 1 to 0x8000000f==CPU_CYCLE_ENABLE |EVENT_3_ENABLE|EVENT_2_ENABLE|EVENT_1_ENABLE|EVENT_0_ENABLE */
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" :: "r"(0x8000000f));
+
+
+ /* See B4.1.117 PMCR, Performance Monitors Control Register. Writing to p15, c9, c12, 0 */
+ write_value = 1 << 0; /* Bit 0 set. Enable counters */
+ if (reset) {
+ write_value |= 1 << 1; /* Reset event counters */
+ write_value |= 1 << 2; /* Reset cycle counter */
+ }
+ if (enable_divide_by_64) {
+ write_value |= 1 << 3; /* Enable the Clock divider by 64 */
+ }
+ write_value |= 1 << 4; /* Export enable. Not needed */
+ asm volatile("MCR p15, 0, %0, c9, c12, 0\t\n" :: "r"(write_value));
+
+ /* PMOVSR Overflow Flag Status Register - Clear Clock and Event overflows */
+ asm volatile("MCR p15, 0, %0, c9, c12, 3\t\n" :: "r"(0x8000000f));
+
+
+ /* See B4.1.124 PMUSERENR - setting p15 c9 c14 to 1" */
+ /* User mode access to the Performance Monitors enabled. */
+ /* Lets User space read cpu clock cycles */
+ asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(1));
+}
+
+/** A timer function that configures the cycle clock counter on current CPU.
+ * The function \a mali_init_cpu_time_counters_on_all_cpus sets up this
+ * function to trigger on all Cpus during module load.
+ */
+static void mali_init_cpu_clock_timer_func(unsigned long data)
+{
+ int reset_counters, enable_divide_clock_counter_by_64;
+ int current_cpu = raw_smp_processor_id();
+ unsigned int sample0;
+ unsigned int sample1;
+
+ MALI_IGNORE(data);
+
+ reset_counters = 1;
+ enable_divide_clock_counter_by_64 = 0;
+ mali_init_cpu_time_counters(reset_counters, enable_divide_clock_counter_by_64);
+
+ sample0 = mali_get_cpu_cyclecount();
+ sample1 = mali_get_cpu_cyclecount();
+
+ MALI_DEBUG_PRINT(3, ("Init Cpu %d cycle counter- First two samples: %08x %08x \n", current_cpu, sample0, sample1));
+}
+
+/** A timer functions for storing current time on all cpus.
+ * Used for checking if the clocks have similar values or if they are drifting.
+ */
+static void mali_print_cpu_clock_timer_func(unsigned long data)
+{
+ int current_cpu = raw_smp_processor_id();
+ unsigned int sample0;
+
+ MALI_IGNORE(data);
+ sample0 = mali_get_cpu_cyclecount();
+ if (current_cpu < 8) {
+ mali_cpu_clock_last_value[current_cpu] = sample0;
+ }
+}
+
+/** Init the performance registers on all CPUs to count clock cycles.
+ * For init \a print_only should be 0.
+ * If \a print_only is 1, it will intead print the current clock value of all CPUs.
+ */
+void mali_init_cpu_time_counters_on_all_cpus(int print_only)
+{
+ int i = 0;
+ int cpu_number;
+ int jiffies_trigger;
+ int jiffies_wait;
+
+ jiffies_wait = 2;
+ jiffies_trigger = jiffies + jiffies_wait;
+
+ for (i = 0 ; i < 8 ; i++) {
+ init_timer(&mali_init_cpu_clock_timers[i]);
+ if (print_only) mali_init_cpu_clock_timers[i].function = mali_print_cpu_clock_timer_func;
+ else mali_init_cpu_clock_timers[i].function = mali_init_cpu_clock_timer_func;
+ mali_init_cpu_clock_timers[i].expires = jiffies_trigger ;
+ }
+ cpu_number = cpumask_first(cpu_online_mask);
+ for (i = 0 ; i < 8 ; i++) {
+ int next_cpu;
+ add_timer_on(&mali_init_cpu_clock_timers[i], cpu_number);
+ next_cpu = cpumask_next(cpu_number, cpu_online_mask);
+ if (next_cpu >= nr_cpu_ids) break;
+ cpu_number = next_cpu;
+ }
+
+ while (jiffies_wait) jiffies_wait = schedule_timeout_uninterruptible(jiffies_wait);
+
+ for (i = 0 ; i < 8 ; i++) {
+ del_timer_sync(&mali_init_cpu_clock_timers[i]);
+ }
+
+ if (print_only) {
+ if ((0 == mali_cpu_clock_last_value[2]) && (0 == mali_cpu_clock_last_value[3])) {
+ /* Diff can be printed if we want to check if the clocks are in sync
+ int diff = mali_cpu_clock_last_value[0] - mali_cpu_clock_last_value[1];*/
+ MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1]));
+ } else {
+ MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1], mali_cpu_clock_last_value[2], mali_cpu_clock_last_value[3]));
+ }
+ }
+}
+#endif
+
+int mali_module_init(void)
+{
+ int err = 0;
+
+ MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n", _MALI_API_VERSION));
+ MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
+ MALI_DEBUG_PRINT(2, ("Driver revision: %s\n", SVN_REV_STRING));
+
+#if MALI_ENABLE_CPU_CYCLES
+ mali_init_cpu_time_counters_on_all_cpus(0);
+ MALI_DEBUG_PRINT(2, ("CPU cycle counter setup complete\n"));
+ /* Printing the current cpu counters */
+ mali_init_cpu_time_counters_on_all_cpus(1);
+#endif
+
+ /* Initialize module wide settings */
+#ifdef MALI_FAKE_PLATFORM_DEVICE
+#ifndef CONFIG_MALI_DT
+ MALI_DEBUG_PRINT(2, ("mali_module_init() registering device\n"));
+ err = mali_platform_device_register();
+ if (0 != err) {
+ return err;
+ }
+#endif
+#endif
+
+ MALI_DEBUG_PRINT(2, ("mali_module_init() registering driver\n"));
+
+ err = platform_driver_register(&mali_platform_driver);
+
+ if (0 != err) {
+ MALI_DEBUG_PRINT(2, ("mali_module_init() Failed to register driver (%d)\n", err));
+#ifdef MALI_FAKE_PLATFORM_DEVICE
+#ifndef CONFIG_MALI_DT
+ mali_platform_device_unregister();
+#endif
+#endif
+ mali_platform_device = NULL;
+ return err;
+ }
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ err = _mali_internal_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+ if (0 != err) {
+ /* No biggie if we wheren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+ }
+#endif
+
+ /* Tracing the current frequency and voltage from boot/insmod*/
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+ /* Just call mali_get_current_gpu_clk_item(),to record current clk info.*/
+ mali_get_current_gpu_clk_item(&mali_gpu_clk[0]);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ mali_gpu_clk[0].clock,
+ mali_gpu_clk[0].vol / 1000,
+ 0, 0, 0);
+#endif
+
+ MALI_PRINT(("Mali device driver loaded\n"));
+
+ return 0; /* Success */
+}
+
+void mali_module_exit(void)
+{
+ MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n", _MALI_API_VERSION));
+
+ MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering driver\n"));
+
+ platform_driver_unregister(&mali_platform_driver);
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+#ifndef CONFIG_MALI_DT
+ MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering device\n"));
+ mali_platform_device_unregister();
+#endif
+#endif
+
+ /* Tracing the current frequency and voltage from rmmod*/
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ 0,
+ 0,
+ 0, 0, 0);
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ _mali_internal_profiling_term();
+#endif
+
+ MALI_PRINT(("Mali device driver unloaded\n"));
+}
+
+#ifdef CONFIG_MALI_DEVFREQ
+struct mali_device *mali_device_alloc(void)
+{
+ return kzalloc(sizeof(struct mali_device), GFP_KERNEL);
+}
+
+void mali_device_free(struct mali_device *mdev)
+{
+ kfree(mdev);
+}
+#endif
+
+static int mali_probe(struct platform_device *pdev)
+{
+ int err;
+#ifdef CONFIG_MALI_DEVFREQ
+ struct mali_device *mdev;
+#endif
+
+ MALI_DEBUG_PRINT(2, ("mali_probe(): Called for platform device %s\n", pdev->name));
+
+ if (NULL != mali_platform_device) {
+ /* Already connected to a device, return error */
+ MALI_PRINT_ERROR(("mali_probe(): The Mali driver is already connected with a Mali device."));
+ return -EEXIST;
+ }
+
+ mali_platform_device = pdev;
+
+#ifdef CONFIG_MALI_DT
+ /* If we use DT to initialize our DDK, we have to prepare somethings. */
+ err = mali_platform_device_init(mali_platform_device);
+ if (0 != err) {
+ MALI_PRINT_ERROR(("mali_probe(): Failed to initialize platform device."));
+ mali_platform_device = NULL;
+ return -EFAULT;
+ }
+#endif
+
+#ifdef CONFIG_MALI_DEVFREQ
+ mdev = mali_device_alloc();
+ if (!mdev) {
+ MALI_PRINT_ERROR(("Can't allocate mali device private data\n"));
+ return -ENOMEM;
+ }
+
+ mdev->dev = &pdev->dev;
+ dev_set_drvdata(mdev->dev, mdev);
+
+ /*Initilization clock and regulator*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
+ && defined(CONFIG_REGULATOR)
+ mdev->regulator = regulator_get_optional(mdev->dev, "mali");
+ if (IS_ERR_OR_NULL(mdev->regulator)) {
+ MALI_DEBUG_PRINT(2, ("Continuing without Mali regulator control\n"));
+ mdev->regulator = NULL;
+ /* Allow probe to continue without regulator */
+ }
+#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) && defined(CONFIG_OF) \
+ && defined(CONFIG_PM_OPP)
+ /* Register the OPPs if they are available in device tree */
+ if (dev_pm_opp_of_add_table(mdev->dev) < 0)
+ MALI_DEBUG_PRINT(3, ("OPP table not found\n"));
+#endif
+
+ /* Need to name the gpu clock "clk_mali" in the device tree */
+ mdev->clock = clk_get(mdev->dev, "clk_mali");
+ if (IS_ERR_OR_NULL(mdev->clock)) {
+ MALI_DEBUG_PRINT(2, ("Continuing without Mali clock control\n"));
+ mdev->clock = NULL;
+ /* Allow probe to continue without clock. */
+ } else {
+ err = clk_prepare_enable(mdev->clock);
+ if (err) {
+ MALI_PRINT_ERROR(("Failed to prepare and enable clock (%d)\n", err));
+ goto clock_prepare_failed;
+ }
+ }
+
+ /* initilize pm metrics related */
+ if (mali_pm_metrics_init(mdev) < 0) {
+ MALI_DEBUG_PRINT(2, ("mali pm metrics init failed\n"));
+ goto pm_metrics_init_failed;
+ }
+
+ if (mali_devfreq_init(mdev) < 0) {
+ MALI_DEBUG_PRINT(2, ("mali devfreq init failed\n"));
+ goto devfreq_init_failed;
+ }
+#endif
+
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_wq_init()) {
+ /* Initialize the Mali GPU HW specified by pdev */
+ if (_MALI_OSK_ERR_OK == mali_initialize_subsystems()) {
+ /* Register a misc device (so we are accessible from user space) */
+ err = mali_miscdevice_register(pdev);
+ if (0 == err) {
+ /* Setup sysfs entries */
+ err = mali_sysfs_register(mali_dev_name);
+
+ if (0 == err) {
+ MALI_DEBUG_PRINT(2, ("mali_probe(): Successfully initialized driver for platform device %s\n", pdev->name));
+
+ return 0;
+ } else {
+ MALI_PRINT_ERROR(("mali_probe(): failed to register sysfs entries"));
+ }
+ mali_miscdevice_unregister();
+ } else {
+ MALI_PRINT_ERROR(("mali_probe(): failed to register Mali misc device."));
+ }
+ mali_terminate_subsystems();
+ } else {
+ MALI_PRINT_ERROR(("mali_probe(): Failed to initialize Mali device driver."));
+ }
+ _mali_osk_wq_term();
+ }
+
+#ifdef CONFIG_MALI_DEVFREQ
+ mali_devfreq_term(mdev);
+devfreq_init_failed:
+ mali_pm_metrics_term(mdev);
+pm_metrics_init_failed:
+ clk_disable_unprepare(mdev->clock);
+clock_prepare_failed:
+ clk_put(mdev->clock);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(CONFIG_OF) \
+ && defined(CONFIG_PM_OPP)
+ dev_pm_opp_of_remove_table(mdev->dev);
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
+ && defined(CONFIG_REGULATOR)
+ regulator_put(mdev->regulator);
+#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
+ mali_device_free(mdev);
+#endif
+
+#ifdef CONFIG_MALI_DT
+ mali_platform_device_deinit(mali_platform_device);
+#endif
+ mali_platform_device = NULL;
+ return -EFAULT;
+}
+
+static int mali_remove(struct platform_device *pdev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+ struct mali_device *mdev = dev_get_drvdata(&pdev->dev);
+#endif
+
+ MALI_DEBUG_PRINT(2, ("mali_remove() called for platform device %s\n", pdev->name));
+ mali_sysfs_unregister();
+ mali_miscdevice_unregister();
+ mali_terminate_subsystems();
+ _mali_osk_wq_term();
+
+#ifdef CONFIG_MALI_DEVFREQ
+ mali_devfreq_term(mdev);
+
+ mali_pm_metrics_term(mdev);
+
+ if (mdev->clock) {
+ clk_disable_unprepare(mdev->clock);
+ clk_put(mdev->clock);
+ mdev->clock = NULL;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(CONFIG_OF) \
+ && defined(CONFIG_PM_OPP)
+ dev_pm_opp_of_remove_table(mdev->dev);
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
+ && defined(CONFIG_REGULATOR)
+ regulator_put(mdev->regulator);
+#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
+ mali_device_free(mdev);
+#endif
+
+#ifdef CONFIG_MALI_DT
+ mali_platform_device_deinit(mali_platform_device);
+#endif
+ mali_platform_device = NULL;
+ return 0;
+}
+
+static int mali_miscdevice_register(struct platform_device *pdev)
+{
+ int err;
+
+ mali_miscdevice.minor = MISC_DYNAMIC_MINOR;
+ mali_miscdevice.name = mali_dev_name;
+ mali_miscdevice.fops = &mali_fops;
+ mali_miscdevice.parent = get_device(&pdev->dev);
+
+ err = misc_register(&mali_miscdevice);
+ if (0 != err) {
+ MALI_PRINT_ERROR(("Failed to register misc device, misc_register() returned %d\n", err));
+ }
+
+ return err;
+}
+
+static void mali_miscdevice_unregister(void)
+{
+ misc_deregister(&mali_miscdevice);
+}
+
+static int mali_driver_suspend_scheduler(struct device *dev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+ struct mali_device *mdev = dev_get_drvdata(dev);
+ if (!mdev)
+ return -ENODEV;
+#endif
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ devfreq_suspend_device(mdev->devfreq);
+#endif
+
+ mali_pm_os_suspend(MALI_TRUE);
+ /* Tracing the frequency and voltage after mali is suspended */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ 0,
+ 0,
+ 0, 0, 0);
+ return 0;
+}
+
+static int mali_driver_resume_scheduler(struct device *dev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+ struct mali_device *mdev = dev_get_drvdata(dev);
+ if (!mdev)
+ return -ENODEV;
+#endif
+
+ /* Tracing the frequency and voltage after mali is resumed */
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+ /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/
+ if (is_first_resume == 1) {
+ mali_get_current_gpu_clk_item(&mali_gpu_clk[1]);
+ is_first_resume = 0;
+ }
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ mali_gpu_clk[1].clock,
+ mali_gpu_clk[1].vol / 1000,
+ 0, 0, 0);
+#endif
+ mali_pm_os_resume();
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ devfreq_resume_device(mdev->devfreq);
+#endif
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+ struct mali_device *mdev = dev_get_drvdata(dev);
+ if (!mdev)
+ return -ENODEV;
+#endif
+
+ if (MALI_TRUE == mali_pm_runtime_suspend()) {
+ /* Tracing the frequency and voltage after mali is suspended */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ 0,
+ 0,
+ 0, 0, 0);
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ MALI_DEBUG_PRINT(4, ("devfreq_suspend_device: stop devfreq monitor\n"));
+ devfreq_suspend_device(mdev->devfreq);
+#endif
+
+ return 0;
+ } else {
+ return -EBUSY;
+ }
+}
+
+static int mali_driver_runtime_resume(struct device *dev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+ struct mali_device *mdev = dev_get_drvdata(dev);
+ if (!mdev)
+ return -ENODEV;
+#endif
+
+ /* Tracing the frequency and voltage after mali is resumed */
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+ /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/
+ if (is_first_resume == 1) {
+ mali_get_current_gpu_clk_item(&mali_gpu_clk[1]);
+ is_first_resume = 0;
+ }
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ mali_gpu_clk[1].clock,
+ mali_gpu_clk[1].vol / 1000,
+ 0, 0, 0);
+#endif
+
+ mali_pm_runtime_resume();
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ MALI_DEBUG_PRINT(4, ("devfreq_resume_device: start devfreq monitor\n"));
+ devfreq_resume_device(mdev->devfreq);
+#endif
+ return 0;
+}
+
+static int mali_driver_runtime_idle(struct device *dev)
+{
+ /* Nothing to do */
+ return 0;
+}
+#endif
+
+static int mali_open(struct inode *inode, struct file *filp)
+{
+ struct mali_session_data *session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (mali_miscdevice.minor != iminor(inode)) {
+ MALI_PRINT_ERROR(("mali_open() Minor does not match\n"));
+ return -ENODEV;
+ }
+
+ /* allocated struct to track this session */
+ err = _mali_ukk_open((void **)&session_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* initialize file pointer */
+ filp->f_pos = 0;
+
+ /* link in our session data */
+ filp->private_data = (void *)session_data;
+
+ filp->f_mapping = mali_mem_swap_get_global_swap_file()->f_mapping;
+
+ return 0;
+}
+
+static int mali_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (mali_miscdevice.minor != iminor(inode)) {
+ MALI_PRINT_ERROR(("mali_release() Minor does not match\n"));
+ return -ENODEV;
+ }
+
+ err = _mali_ukk_close((void **)&filp->private_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int map_errcode(_mali_osk_errcode_t err)
+{
+ switch (err) {
+ case _MALI_OSK_ERR_OK :
+ return 0;
+ case _MALI_OSK_ERR_FAULT:
+ return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC:
+ return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS:
+ return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM:
+ return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT:
+ return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL:
+ return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND:
+ return -ENOENT;
+ default:
+ return -EFAULT;
+ }
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err;
+ struct mali_session_data *session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ /* inode not used */
+ (void)inode;
+#endif
+
+ MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg));
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data) {
+ MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n"));
+ return -ENOTTY;
+ }
+
+ if (NULL == (void *)arg) {
+ MALI_DEBUG_PRINT(7, ("arg was NULL\n"));
+ return -ENOTTY;
+ }
+
+ switch (cmd) {
+ case MALI_IOC_WAIT_FOR_NOTIFICATION:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_wait_for_notification_s), sizeof(u64)));
+ err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_API_VERSION_V2:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_api_version_v2_s), sizeof(u64)));
+ err = get_api_version_v2_wrapper(session_data, (_mali_uk_get_api_version_v2_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_API_VERSION:
+ err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_POST_NOTIFICATION:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_post_notification_s), sizeof(u64)));
+ err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_USER_SETTINGS:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_user_settings_s), sizeof(u64)));
+ err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+ break;
+
+ case MALI_IOC_REQUEST_HIGH_PRIORITY:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_request_high_priority_s), sizeof(u64)));
+ err = request_high_priority_wrapper(session_data, (_mali_uk_request_high_priority_s __user *)arg);
+ break;
+
+ case MALI_IOC_PENDING_SUBMIT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pending_submit_s), sizeof(u64)));
+ err = pending_submit_wrapper(session_data, (_mali_uk_pending_submit_s __user *)arg);
+ break;
+
+#if defined(CONFIG_MALI400_PROFILING)
+ case MALI_IOC_PROFILING_ADD_EVENT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_add_event_s), sizeof(u64)));
+ err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_REPORT_SW_COUNTERS:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_sw_counters_report_s), sizeof(u64)));
+ err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_STREAM_FD_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_stream_fd_get_s), sizeof(u64)));
+ err = profiling_get_stream_fd_wrapper(session_data, (_mali_uk_profiling_stream_fd_get_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROILING_CONTROL_SET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_control_set_s), sizeof(u64)));
+ err = profiling_control_set_wrapper(session_data, (_mali_uk_profiling_control_set_s __user *)arg);
+ break;
+#else
+
+ case MALI_IOC_PROFILING_ADD_EVENT: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("Profiling not supported\n"));
+ err = -ENOTTY;
+ break;
+#endif
+
+ case MALI_IOC_PROFILING_MEMORY_USAGE_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_memory_usage_get_s), sizeof(u64)));
+ err = mem_usage_get_wrapper(session_data, (_mali_uk_profiling_memory_usage_get_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_ALLOC:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_alloc_mem_s), sizeof(u64)));
+ err = mem_alloc_wrapper(session_data, (_mali_uk_alloc_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_FREE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_free_mem_s), sizeof(u64)));
+ err = mem_free_wrapper(session_data, (_mali_uk_free_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_BIND:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_bind_mem_s), sizeof(u64)));
+ err = mem_bind_wrapper(session_data, (_mali_uk_bind_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_UNBIND:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_unbind_mem_s), sizeof(u64)));
+ err = mem_unbind_wrapper(session_data, (_mali_uk_unbind_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_COW:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_cow_mem_s), sizeof(u64)));
+ err = mem_cow_wrapper(session_data, (_mali_uk_cow_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_COW_MODIFY_RANGE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_cow_modify_range_s), sizeof(u64)));
+ err = mem_cow_modify_range_wrapper(session_data, (_mali_uk_cow_modify_range_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_RESIZE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_mem_resize_s), sizeof(u64)));
+ err = mem_resize_mem_wrapper(session_data, (_mali_uk_mem_resize_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_WRITE_SAFE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_mem_write_safe_s), sizeof(u64)));
+ err = mem_write_safe_wrapper(session_data, (_mali_uk_mem_write_safe_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_query_mmu_page_table_dump_size_s), sizeof(u64)));
+ err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dump_mmu_page_table_s), sizeof(u64)));
+ err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_DMA_BUF_GET_SIZE:
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dma_buf_get_size_s), sizeof(u64)));
+ err = mali_dma_buf_get_size(session_data, (_mali_uk_dma_buf_get_size_s __user *)arg);
+#else
+ MALI_DEBUG_PRINT(2, ("DMA-BUF not supported\n"));
+ err = -ENOTTY;
+#endif
+ break;
+
+ case MALI_IOC_PP_START_JOB:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_start_job_s), sizeof(u64)));
+ err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_AND_GP_START_JOB:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_and_gp_start_job_s), sizeof(u64)));
+ err = pp_and_gp_start_job_wrapper(session_data, (_mali_uk_pp_and_gp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_number_of_cores_s), sizeof(u64)));
+ err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_CORE_VERSION_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_core_version_s), sizeof(u64)));
+ err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_DISABLE_WB:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_disable_wb_s), sizeof(u64)));
+ err = pp_disable_wb_wrapper(session_data, (_mali_uk_pp_disable_wb_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_START_JOB:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_start_job_s), sizeof(u64)));
+ err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_number_of_cores_s), sizeof(u64)));
+ err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_CORE_VERSION_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_core_version_s), sizeof(u64)));
+ err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_SUSPEND_RESPONSE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_suspend_response_s), sizeof(u64)));
+ err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
+ break;
+
+ case MALI_IOC_VSYNC_EVENT_REPORT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_vsync_event_report_s), sizeof(u64)));
+ err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
+ break;
+
+ case MALI_IOC_TIMELINE_GET_LATEST_POINT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_get_latest_point_s), sizeof(u64)));
+ err = timeline_get_latest_point_wrapper(session_data, (_mali_uk_timeline_get_latest_point_s __user *)arg);
+ break;
+ case MALI_IOC_TIMELINE_WAIT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_wait_s), sizeof(u64)));
+ err = timeline_wait_wrapper(session_data, (_mali_uk_timeline_wait_s __user *)arg);
+ break;
+ case MALI_IOC_TIMELINE_CREATE_SYNC_FENCE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_create_sync_fence_s), sizeof(u64)));
+ err = timeline_create_sync_fence_wrapper(session_data, (_mali_uk_timeline_create_sync_fence_s __user *)arg);
+ break;
+ case MALI_IOC_SOFT_JOB_START:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_start_s), sizeof(u64)));
+ err = soft_job_start_wrapper(session_data, (_mali_uk_soft_job_start_s __user *)arg);
+ break;
+ case MALI_IOC_SOFT_JOB_SIGNAL:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_signal_s), sizeof(u64)));
+ err = soft_job_signal_wrapper(session_data, (_mali_uk_soft_job_signal_s __user *)arg);
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
+ err = -ENOTTY;
+ };
+
+ return err;
+}
+
+
+module_init(mali_module_init);
+module_exit(mali_module_exit);
+
+MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/gpu/arm/utgard/linux/mali_kernel_linux.h b/drivers/gpu/arm/utgard/linux/mali_kernel_linux.h
new file mode 100644
index 000000000000..be754cb15646
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_kernel_linux.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_LINUX_H__
+#define __MALI_KERNEL_LINUX_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/idr.h>
+#include <linux/rbtree.h>
+#include "mali_kernel_license.h"
+#include "mali_osk_types.h"
+#include <linux/version.h>
+
+extern struct platform_device *mali_platform_device;
+
+/* After 3.19.0 kenrel droped CONFIG_PM_RUNTIME define,define by ourself */
+#if defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
+#define CONFIG_PM_RUNTIME 1
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.c b/drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.c
new file mode 100644
index 000000000000..d34567263e1c
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.c
@@ -0,0 +1,1410 @@
+/**
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+/**
+ * @file mali_kernel_sysfs.c
+ * Implementation of some sysfs data exports
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include "mali_kernel_license.h"
+#include "mali_kernel_common.h"
+#include "mali_ukk.h"
+
+#if MALI_LICENSE_IS_GPL
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_sysfs.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include <linux/slab.h>
+#include "mali_osk_profiling.h"
+#endif
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_group.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_kernel_core.h"
+#include "mali_user_settings_db.h"
+#include "mali_profiling_internal.h"
+#include "mali_gp_job.h"
+#include "mali_pp_job.h"
+#include "mali_executor.h"
+
+#define PRIVATE_DATA_COUNTER_MAKE_GP(src) (src)
+#define PRIVATE_DATA_COUNTER_MAKE_PP(src) ((1 << 24) | src)
+#define PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(src, sub_job) ((1 << 24) | (1 << 16) | (sub_job << 8) | src)
+#define PRIVATE_DATA_COUNTER_IS_PP(a) ((((a) >> 24) & 0xFF) ? MALI_TRUE : MALI_FALSE)
+#define PRIVATE_DATA_COUNTER_GET_SRC(a) (a & 0xFF)
+#define PRIVATE_DATA_COUNTER_IS_SUB_JOB(a) ((((a) >> 16) & 0xFF) ? MALI_TRUE : MALI_FALSE)
+#define PRIVATE_DATA_COUNTER_GET_SUB_JOB(a) (((a) >> 8) & 0xFF)
+
+#define POWER_BUFFER_SIZE 3
+
+static struct dentry *mali_debugfs_dir = NULL;
+
+typedef enum {
+ _MALI_DEVICE_SUSPEND,
+ _MALI_DEVICE_RESUME,
+ _MALI_DEVICE_DVFS_PAUSE,
+ _MALI_DEVICE_DVFS_RESUME,
+ _MALI_MAX_EVENTS
+} _mali_device_debug_power_events;
+
+static const char *const mali_power_events[_MALI_MAX_EVENTS] = {
+ [_MALI_DEVICE_SUSPEND] = "suspend",
+ [_MALI_DEVICE_RESUME] = "resume",
+ [_MALI_DEVICE_DVFS_PAUSE] = "dvfs_pause",
+ [_MALI_DEVICE_DVFS_RESUME] = "dvfs_resume",
+};
+
+static mali_bool power_always_on_enabled = MALI_FALSE;
+
+static int open_copy_private_data(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t group_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ struct mali_group *group;
+
+ group = (struct mali_group *)filp->private_data;
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ r = snprintf(buffer, 64, "%u\n",
+ mali_executor_group_is_disabled(group) ? 0 : 1);
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static ssize_t group_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ unsigned long val;
+ struct mali_group *group;
+
+ group = (struct mali_group *)filp->private_data;
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ if (count >= sizeof(buffer)) {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count)) {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ r = kstrtoul(&buffer[0], 10, &val);
+ if (0 != r) {
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case 1:
+ mali_executor_group_enable(group);
+ break;
+ case 0:
+ mali_executor_group_disable(group);
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static const struct file_operations group_enabled_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = group_enabled_read,
+ .write = group_enabled_write,
+};
+
+static ssize_t hw_core_base_addr_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ struct mali_hw_core *hw_core;
+
+ hw_core = (struct mali_hw_core *)filp->private_data;
+ MALI_DEBUG_ASSERT_POINTER(hw_core);
+
+ r = snprintf(buffer, 64, "0x%lX\n", hw_core->phys_addr);
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations hw_core_base_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = hw_core_base_addr_read,
+};
+
+static ssize_t profiling_counter_src_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data);
+ u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data);
+ mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data);
+ u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data);
+ char buf[64];
+ int r;
+ u32 val;
+
+ if (MALI_TRUE == is_pp) {
+ /* PP counter */
+ if (MALI_TRUE == is_sub_job) {
+ /* Get counter for a particular sub job */
+ if (0 == src_id) {
+ val = mali_pp_job_get_pp_counter_sub_job_src0(sub_job);
+ } else {
+ val = mali_pp_job_get_pp_counter_sub_job_src1(sub_job);
+ }
+ } else {
+ /* Get default counter for all PP sub jobs */
+ if (0 == src_id) {
+ val = mali_pp_job_get_pp_counter_global_src0();
+ } else {
+ val = mali_pp_job_get_pp_counter_global_src1();
+ }
+ }
+ } else {
+ /* GP counter */
+ if (0 == src_id) {
+ val = mali_gp_job_get_gp_counter_src0();
+ } else {
+ val = mali_gp_job_get_gp_counter_src1();
+ }
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == val) {
+ r = snprintf(buf, 64, "-1\n");
+ } else {
+ r = snprintf(buf, 64, "%u\n", val);
+ }
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_counter_src_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data);
+ u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data);
+ mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data);
+ u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data);
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (val < 0) {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ if (MALI_TRUE == is_pp) {
+ /* PP counter */
+ if (MALI_TRUE == is_sub_job) {
+ /* Set counter for a particular sub job */
+ if (0 == src_id) {
+ mali_pp_job_set_pp_counter_sub_job_src0(sub_job, (u32)val);
+ } else {
+ mali_pp_job_set_pp_counter_sub_job_src1(sub_job, (u32)val);
+ }
+ } else {
+ /* Set default counter for all PP sub jobs */
+ if (0 == src_id) {
+ mali_pp_job_set_pp_counter_global_src0((u32)val);
+ } else {
+ mali_pp_job_set_pp_counter_global_src1((u32)val);
+ }
+ }
+ } else {
+ /* GP counter */
+ if (0 == src_id) {
+ mali_gp_job_set_gp_counter_src0((u32)val);
+ } else {
+ mali_gp_job_set_gp_counter_src1((u32)val);
+ }
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static const struct file_operations profiling_counter_src_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = profiling_counter_src_read,
+ .write = profiling_counter_src_write,
+};
+
+static ssize_t l2_l2x_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ int r;
+ u32 val;
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+ if (0 == src_id) {
+ val = mali_l2_cache_core_get_counter_src0(l2_core);
+ } else {
+ val = mali_l2_cache_core_get_counter_src1(l2_core);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == val) {
+ r = snprintf(buf, 64, "-1\n");
+ } else {
+ r = snprintf(buf, 64, "%u\n", val);
+ }
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (val < 0) {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ mali_l2_cache_core_set_counter_src(l2_core, src_id, (u32)val);
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t l2_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ long val;
+ int ret;
+ u32 l2_id;
+ struct mali_l2_cache_core *l2_cache;
+
+ if (cnt >= sizeof(buf)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (val < 0) {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ l2_id = 0;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ while (NULL != l2_cache) {
+ mali_l2_cache_core_set_counter_src(l2_cache, src_id, (u32)val);
+
+ /* try next L2 */
+ l2_id++;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t l2_l2x_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_l2x_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_src0_read,
+ .write = l2_l2x_counter_src0_write,
+};
+
+static const struct file_operations l2_l2x_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_src1_read,
+ .write = l2_l2x_counter_src1_write,
+};
+
+static const struct file_operations l2_all_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .write = l2_all_counter_src0_write,
+};
+
+static const struct file_operations l2_all_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .write = l2_all_counter_src1_write,
+};
+
+static ssize_t l2_l2x_counter_valx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ int r;
+ u32 src0 = 0;
+ u32 val0 = 0;
+ u32 src1 = 0;
+ u32 val1 = 0;
+ u32 val = -1;
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+ mali_l2_cache_core_get_counter_values(l2_core, &src0, &val0, &src1, &val1);
+
+ if (0 == src_id) {
+ if (MALI_HW_CORE_NO_COUNTER != val0) {
+ val = val0;
+ }
+ } else {
+ if (MALI_HW_CORE_NO_COUNTER != val1) {
+ val = val1;
+ }
+ }
+
+ r = snprintf(buf, 64, "%u\n", val);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_val0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_val1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_val0_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_val0_read,
+};
+
+static const struct file_operations l2_l2x_counter_val1_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_val1_read,
+};
+
+static ssize_t power_always_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ int ret;
+ char buf[32];
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+ buf[cnt] = '\0';
+
+ ret = kstrtoul(buf, 10, &val);
+ if (0 != ret) {
+ return ret;
+ }
+
+ /* Update setting (not exactly thread safe) */
+ if (1 == val && MALI_FALSE == power_always_on_enabled) {
+ power_always_on_enabled = MALI_TRUE;
+ _mali_osk_pm_dev_ref_get_sync();
+ } else if (0 == val && MALI_TRUE == power_always_on_enabled) {
+ power_always_on_enabled = MALI_FALSE;
+ _mali_osk_pm_dev_ref_put();
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t power_always_on_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ if (MALI_TRUE == power_always_on_enabled) {
+ return simple_read_from_buffer(ubuf, cnt, ppos, "1\n", 2);
+ } else {
+ return simple_read_from_buffer(ubuf, cnt, ppos, "0\n", 2);
+ }
+}
+
+static const struct file_operations power_always_on_fops = {
+ .owner = THIS_MODULE,
+ .read = power_always_on_read,
+ .write = power_always_on_write,
+};
+
+static ssize_t power_power_events_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_SUSPEND], strlen(mali_power_events[_MALI_DEVICE_SUSPEND]) - 1)) {
+ mali_pm_os_suspend(MALI_TRUE);
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_RESUME], strlen(mali_power_events[_MALI_DEVICE_RESUME]) - 1)) {
+ mali_pm_os_resume();
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_PAUSE], strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE]) - 1)) {
+ mali_dev_pause();
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_RESUME], strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME]) - 1)) {
+ mali_dev_resume();
+ }
+ *ppos += cnt;
+ return cnt;
+}
+
+static loff_t power_power_events_seek(struct file *file, loff_t offset, int orig)
+{
+ file->f_pos = offset;
+ return 0;
+}
+
+static const struct file_operations power_power_events_fops = {
+ .owner = THIS_MODULE,
+ .write = power_power_events_write,
+ .llseek = power_power_events_seek,
+};
+
+#if MALI_STATE_TRACKING
+static int mali_seq_internal_state_show(struct seq_file *seq_file, void *v)
+{
+ u32 len = 0;
+ u32 size;
+ char *buf;
+
+ size = seq_get_buf(seq_file, &buf);
+
+ if (!size) {
+ return -ENOMEM;
+ }
+
+ /* Create the internal state dump. */
+ len = snprintf(buf + len, size - len, "Mali device driver %s\n", SVN_REV_STRING);
+ len += snprintf(buf + len, size - len, "License: %s\n\n", MALI_KERNEL_LINUX_LICENSE);
+
+ len += _mali_kernel_core_dump_state(buf + len, size - len);
+
+ seq_commit(seq_file, len);
+
+ return 0;
+}
+
+static int mali_seq_internal_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mali_seq_internal_state_show, NULL);
+}
+
+static const struct file_operations mali_seq_internal_state_fops = {
+ .owner = THIS_MODULE,
+ .open = mali_seq_internal_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* MALI_STATE_TRACKING */
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+static ssize_t profiling_record_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int r;
+
+ r = snprintf(buf, 64, "%u\n", _mali_internal_profiling_is_recording() ? 1 : 0);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_record_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (val != 0) {
+ u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* This can be made configurable at a later stage if we need to */
+
+ /* check if we are already recording */
+ if (MALI_TRUE == _mali_internal_profiling_is_recording()) {
+ MALI_DEBUG_PRINT(3, ("Recording of profiling events already in progress\n"));
+ return -EFAULT;
+ }
+
+ /* check if we need to clear out an old recording first */
+ if (MALI_TRUE == _mali_internal_profiling_have_recording()) {
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_clear()) {
+ MALI_DEBUG_PRINT(3, ("Failed to clear existing recording of profiling events\n"));
+ return -EFAULT;
+ }
+ }
+
+ /* start recording profiling data */
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit)) {
+ MALI_DEBUG_PRINT(3, ("Failed to start recording of profiling events\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Profiling recording started (max %u events)\n", limit));
+ } else {
+ /* stop recording profiling data */
+ u32 count = 0;
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_stop(&count)) {
+ MALI_DEBUG_PRINT(2, ("Failed to stop recording of profiling events\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Profiling recording stopped (recorded %u events)\n", count));
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static const struct file_operations profiling_record_fops = {
+ .owner = THIS_MODULE,
+ .read = profiling_record_read,
+ .write = profiling_record_write,
+};
+
+static void *profiling_events_start(struct seq_file *s, loff_t *pos)
+{
+ loff_t *spos;
+
+ /* check if we have data avaiable */
+ if (MALI_TRUE != _mali_internal_profiling_have_recording()) {
+ return NULL;
+ }
+
+ spos = kmalloc(sizeof(loff_t), GFP_KERNEL);
+ if (NULL == spos) {
+ return NULL;
+ }
+
+ *spos = *pos;
+ return spos;
+}
+
+static void *profiling_events_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ loff_t *spos = v;
+
+ /* check if we have data avaiable */
+ if (MALI_TRUE != _mali_internal_profiling_have_recording()) {
+ return NULL;
+ }
+
+ /* check if the next entry actually is avaiable */
+ if (_mali_internal_profiling_get_count() <= (u32)(*spos + 1)) {
+ return NULL;
+ }
+
+ *pos = ++*spos;
+ return spos;
+}
+
+static void profiling_events_stop(struct seq_file *s, void *v)
+{
+ kfree(v);
+}
+
+static int profiling_events_show(struct seq_file *seq_file, void *v)
+{
+ loff_t *spos = v;
+ u32 index;
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+
+ index = (u32) * spos;
+
+ /* Retrieve all events */
+ if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data)) {
+ seq_printf(seq_file, "%llu %u %u %u %u %u %u\n", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int profiling_events_show_human_readable(struct seq_file *seq_file, void *v)
+{
+#define MALI_EVENT_ID_IS_HW(event_id) (((event_id & 0x00FF0000) >= MALI_PROFILING_EVENT_CHANNEL_GP0) && ((event_id & 0x00FF0000) <= MALI_PROFILING_EVENT_CHANNEL_PP7))
+
+ static u64 start_time = 0;
+ loff_t *spos = v;
+ u32 index;
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+
+ index = (u32) * spos;
+
+ /* Retrieve all events */
+ if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data)) {
+ seq_printf(seq_file, "%llu %u %u %u %u %u %u # ", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+
+ if (0 == index) {
+ start_time = timestamp;
+ }
+
+ seq_printf(seq_file, "[%06u] ", index);
+
+ switch (event_id & 0x0F000000) {
+ case MALI_PROFILING_EVENT_TYPE_SINGLE:
+ seq_printf(seq_file, "SINGLE | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_START:
+ seq_printf(seq_file, "START | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_STOP:
+ seq_printf(seq_file, "STOP | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_SUSPEND:
+ seq_printf(seq_file, "SUSPEND | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_RESUME:
+ seq_printf(seq_file, "RESUME | ");
+ break;
+ default:
+ seq_printf(seq_file, "0x%01X | ", (event_id & 0x0F000000) >> 24);
+ break;
+ }
+
+ switch (event_id & 0x00FF0000) {
+ case MALI_PROFILING_EVENT_CHANNEL_SOFTWARE:
+ seq_printf(seq_file, "SW | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_GP0:
+ seq_printf(seq_file, "GP0 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP0:
+ seq_printf(seq_file, "PP0 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP1:
+ seq_printf(seq_file, "PP1 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP2:
+ seq_printf(seq_file, "PP2 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP3:
+ seq_printf(seq_file, "PP3 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP4:
+ seq_printf(seq_file, "PP4 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP5:
+ seq_printf(seq_file, "PP5 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP6:
+ seq_printf(seq_file, "PP6 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP7:
+ seq_printf(seq_file, "PP7 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_GPU:
+ seq_printf(seq_file, "GPU | ");
+ break;
+ default:
+ seq_printf(seq_file, "0x%02X | ", (event_id & 0x00FF0000) >> 16);
+ break;
+ }
+
+ if (MALI_EVENT_ID_IS_HW(event_id)) {
+ if (((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_START) || ((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_STOP)) {
+ switch (event_id & 0x0000FFFF) {
+ case MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL:
+ seq_printf(seq_file, "PHYSICAL | ");
+ break;
+ case MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL:
+ seq_printf(seq_file, "VIRTUAL | ");
+ break;
+ default:
+ seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+ break;
+ }
+ } else {
+ seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+ }
+ } else {
+ seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+ }
+
+ seq_printf(seq_file, "T0 + 0x%016llX\n", timestamp - start_time);
+
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct seq_operations profiling_events_seq_ops = {
+ .start = profiling_events_start,
+ .next = profiling_events_next,
+ .stop = profiling_events_stop,
+ .show = profiling_events_show
+};
+
+static int profiling_events_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &profiling_events_seq_ops);
+}
+
+static const struct file_operations profiling_events_fops = {
+ .owner = THIS_MODULE,
+ .open = profiling_events_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const struct seq_operations profiling_events_human_readable_seq_ops = {
+ .start = profiling_events_start,
+ .next = profiling_events_next,
+ .stop = profiling_events_stop,
+ .show = profiling_events_show_human_readable
+};
+
+static int profiling_events_human_readable_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &profiling_events_human_readable_seq_ops);
+}
+
+static const struct file_operations profiling_events_human_readable_fops = {
+ .owner = THIS_MODULE,
+ .open = profiling_events_human_readable_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#endif
+
+static int memory_debugfs_show(struct seq_file *s, void *private_data)
+{
+#ifdef MALI_MEM_SWAP_TRACKING
+ seq_printf(s, " %-25s %-10s %-10s %-15s %-15s %-10s %-10s %-10s \n"\
+ "=================================================================================================================================\n",
+ "Name (:bytes)", "pid", "mali_mem", "max_mali_mem",
+ "external_mem", "ump_mem", "dma_mem", "swap_mem");
+#else
+ seq_printf(s, " %-25s %-10s %-10s %-15s %-15s %-10s %-10s \n"\
+ "========================================================================================================================\n",
+ "Name (:bytes)", "pid", "mali_mem", "max_mali_mem",
+ "external_mem", "ump_mem", "dma_mem");
+#endif
+ mali_session_memory_tracking(s);
+ return 0;
+}
+
+static int memory_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, memory_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations memory_usage_fops = {
+ .owner = THIS_MODULE,
+ .open = memory_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t utilization_gp_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 uval = _mali_ukk_utilization_gp_pp();
+
+ r = snprintf(buf, 64, "%u\n", uval);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_gp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 uval = _mali_ukk_utilization_gp();
+
+ r = snprintf(buf, 64, "%u\n", uval);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 uval = _mali_ukk_utilization_pp();
+
+ r = snprintf(buf, 64, "%u\n", uval);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+
+static const struct file_operations utilization_gp_pp_fops = {
+ .owner = THIS_MODULE,
+ .read = utilization_gp_pp_read,
+};
+
+static const struct file_operations utilization_gp_fops = {
+ .owner = THIS_MODULE,
+ .read = utilization_gp_read,
+};
+
+static const struct file_operations utilization_pp_fops = {
+ .owner = THIS_MODULE,
+ .read = utilization_pp_read,
+};
+
+static ssize_t user_settings_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ int ret;
+ _mali_uk_user_setting_t setting;
+ char buf[32];
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+ buf[cnt] = '\0';
+
+ ret = kstrtoul(buf, 10, &val);
+ if (0 != ret) {
+ return ret;
+ }
+
+ /* Update setting */
+ setting = (_mali_uk_user_setting_t)(filp->private_data);
+ mali_set_user_setting(setting, val);
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t user_settings_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 value;
+ _mali_uk_user_setting_t setting;
+
+ setting = (_mali_uk_user_setting_t)(filp->private_data);
+ value = mali_get_user_setting(setting);
+
+ r = snprintf(buf, 64, "%u\n", value);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations user_settings_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = user_settings_read,
+ .write = user_settings_write,
+};
+
+static int mali_sysfs_user_settings_register(void)
+{
+ struct dentry *mali_user_settings_dir = debugfs_create_dir("userspace_settings", mali_debugfs_dir);
+
+ if (mali_user_settings_dir != NULL) {
+ long i;
+ for (i = 0; i < _MALI_UK_USER_SETTING_MAX; i++) {
+ debugfs_create_file(_mali_uk_user_setting_descriptions[i],
+ 0600, mali_user_settings_dir, (void *)i,
+ &user_settings_fops);
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t pp_num_cores_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+
+ if (count >= sizeof(buffer)) {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count)) {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = kstrtoul(&buffer[0], 10, &val);
+ if (0 != ret) {
+ return -EINVAL;
+ }
+
+ ret = mali_executor_set_perf_level(val, MALI_TRUE); /* override even if core scaling is disabled */
+ if (ret) {
+ return ret;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static ssize_t pp_num_cores_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+
+ r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_enabled());
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations pp_num_cores_enabled_fops = {
+ .owner = THIS_MODULE,
+ .write = pp_num_cores_enabled_write,
+ .read = pp_num_cores_enabled_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t pp_num_cores_total_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+
+ r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_total());
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations pp_num_cores_total_fops = {
+ .owner = THIS_MODULE,
+ .read = pp_num_cores_total_read,
+};
+
+static ssize_t pp_core_scaling_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+
+ if (count >= sizeof(buffer)) {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count)) {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = kstrtoul(&buffer[0], 10, &val);
+ if (0 != ret) {
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case 1:
+ mali_executor_core_scaling_enable();
+ break;
+ case 0:
+ mali_executor_core_scaling_disable();
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static ssize_t pp_core_scaling_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ return simple_read_from_buffer(buf, count, offp, mali_executor_core_scaling_is_enabled() ? "1\n" : "0\n", 2);
+}
+static const struct file_operations pp_core_scaling_enabled_fops = {
+ .owner = THIS_MODULE,
+ .write = pp_core_scaling_enabled_write,
+ .read = pp_core_scaling_enabled_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t version_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r = 0;
+ char buffer[64];
+
+ switch (mali_kernel_core_get_product_id()) {
+ case _MALI_PRODUCT_ID_MALI200:
+ r = snprintf(buffer, 64, "Mali-200\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI300:
+ r = snprintf(buffer, 64, "Mali-300\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI400:
+ r = snprintf(buffer, 64, "Mali-400 MP\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI450:
+ r = snprintf(buffer, 64, "Mali-450 MP\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI470:
+ r = snprintf(buffer, 64, "Mali-470 MP\n");
+ break;
+ case _MALI_PRODUCT_ID_UNKNOWN:
+ return -EINVAL;
+ break;
+ };
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations version_fops = {
+ .owner = THIS_MODULE,
+ .read = version_read,
+};
+
+#if defined(DEBUG)
+static int timeline_debugfs_show(struct seq_file *s, void *private_data)
+{
+ struct mali_session_data *session, *tmp;
+ u32 session_seq = 1;
+
+ seq_printf(s, "timeline system info: \n=================\n\n");
+
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ seq_printf(s, "session %d <%p> start:\n", session_seq, session);
+ mali_timeline_debug_print_system(session->timeline_system, s);
+ seq_printf(s, "session %d end\n\n\n", session_seq++);
+ }
+ mali_session_unlock();
+
+ return 0;
+}
+
+static int timeline_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, timeline_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations timeline_dump_fops = {
+ .owner = THIS_MODULE,
+ .open = timeline_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+#endif
+
+int mali_sysfs_register(const char *mali_dev_name)
+{
+ mali_debugfs_dir = debugfs_create_dir(mali_dev_name, NULL);
+ if (ERR_PTR(-ENODEV) == mali_debugfs_dir) {
+ /* Debugfs not supported. */
+ mali_debugfs_dir = NULL;
+ } else {
+ if (NULL != mali_debugfs_dir) {
+ /* Debugfs directory created successfully; create files now */
+ struct dentry *mali_power_dir;
+ struct dentry *mali_gp_dir;
+ struct dentry *mali_pp_dir;
+ struct dentry *mali_l2_dir;
+ struct dentry *mali_profiling_dir;
+
+ debugfs_create_file("version", 0400, mali_debugfs_dir, NULL, &version_fops);
+
+ mali_power_dir = debugfs_create_dir("power", mali_debugfs_dir);
+ if (mali_power_dir != NULL) {
+ debugfs_create_file("always_on", 0600, mali_power_dir, NULL, &power_always_on_fops);
+ debugfs_create_file("power_events", 0200, mali_power_dir, NULL, &power_power_events_fops);
+ }
+
+ mali_gp_dir = debugfs_create_dir("gp", mali_debugfs_dir);
+ if (mali_gp_dir != NULL) {
+ u32 num_groups;
+ long i;
+
+ num_groups = mali_group_get_glob_num_groups();
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ if (NULL != gp_core) {
+ struct dentry *mali_gp_gpx_dir;
+ mali_gp_gpx_dir = debugfs_create_dir("gp0", mali_gp_dir);
+ if (NULL != mali_gp_gpx_dir) {
+ debugfs_create_file("base_addr", 0400, mali_gp_gpx_dir, &gp_core->hw_core, &hw_core_base_addr_fops);
+ debugfs_create_file("enabled", 0600, mali_gp_gpx_dir, group, &group_enabled_fops);
+ }
+ break; /* no need to look for any other GP cores */
+ }
+
+ }
+ }
+
+ mali_pp_dir = debugfs_create_dir("pp", mali_debugfs_dir);
+ if (mali_pp_dir != NULL) {
+ u32 num_groups;
+ long i;
+
+ debugfs_create_file("num_cores_total", 0400, mali_pp_dir, NULL, &pp_num_cores_total_fops);
+ debugfs_create_file("num_cores_enabled", 0600, mali_pp_dir, NULL, &pp_num_cores_enabled_fops);
+ debugfs_create_file("core_scaling_enabled", 0600, mali_pp_dir, NULL, &pp_core_scaling_enabled_fops);
+
+ num_groups = mali_group_get_glob_num_groups();
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ if (NULL != pp_core) {
+ char buf[16];
+ struct dentry *mali_pp_ppx_dir;
+ _mali_osk_snprintf(buf, sizeof(buf), "pp%u", mali_pp_core_get_id(pp_core));
+ mali_pp_ppx_dir = debugfs_create_dir(buf, mali_pp_dir);
+ if (NULL != mali_pp_ppx_dir) {
+ debugfs_create_file("base_addr", 0400, mali_pp_ppx_dir, &pp_core->hw_core, &hw_core_base_addr_fops);
+ if (!mali_group_is_virtual(group)) {
+ debugfs_create_file("enabled", 0600, mali_pp_ppx_dir, group, &group_enabled_fops);
+ }
+ }
+ }
+ }
+ }
+
+ mali_l2_dir = debugfs_create_dir("l2", mali_debugfs_dir);
+ if (mali_l2_dir != NULL) {
+ struct dentry *mali_l2_all_dir;
+ u32 l2_id;
+ struct mali_l2_cache_core *l2_cache;
+
+ mali_l2_all_dir = debugfs_create_dir("all", mali_l2_dir);
+ if (mali_l2_all_dir != NULL) {
+ debugfs_create_file("counter_src0", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src1_fops);
+ }
+
+ l2_id = 0;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ while (NULL != l2_cache) {
+ char buf[16];
+ struct dentry *mali_l2_l2x_dir;
+ _mali_osk_snprintf(buf, sizeof(buf), "l2%u", l2_id);
+ mali_l2_l2x_dir = debugfs_create_dir(buf, mali_l2_dir);
+ if (NULL != mali_l2_l2x_dir) {
+ debugfs_create_file("counter_src0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src1_fops);
+ debugfs_create_file("counter_val0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val0_fops);
+ debugfs_create_file("counter_val1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val1_fops);
+ debugfs_create_file("base_addr", 0400, mali_l2_l2x_dir, &l2_cache->hw_core, &hw_core_base_addr_fops);
+ }
+
+ /* try next L2 */
+ l2_id++;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ }
+ }
+
+ debugfs_create_file("gpu_memory", 0444, mali_debugfs_dir, NULL, &memory_usage_fops);
+
+ debugfs_create_file("utilization_gp_pp", 0400, mali_debugfs_dir, NULL, &utilization_gp_pp_fops);
+ debugfs_create_file("utilization_gp", 0400, mali_debugfs_dir, NULL, &utilization_gp_fops);
+ debugfs_create_file("utilization_pp", 0400, mali_debugfs_dir, NULL, &utilization_pp_fops);
+
+ mali_profiling_dir = debugfs_create_dir("profiling", mali_debugfs_dir);
+ if (mali_profiling_dir != NULL) {
+ u32 max_sub_jobs;
+ long i;
+ struct dentry *mali_profiling_gp_dir;
+ struct dentry *mali_profiling_pp_dir;
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ struct dentry *mali_profiling_proc_dir;
+#endif
+ /*
+ * Create directory where we can set GP HW counters.
+ */
+ mali_profiling_gp_dir = debugfs_create_dir("gp", mali_profiling_dir);
+ if (mali_profiling_gp_dir != NULL) {
+ debugfs_create_file("counter_src0", 0600, mali_profiling_gp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_GP(0), &profiling_counter_src_fops);
+ debugfs_create_file("counter_src1", 0600, mali_profiling_gp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_GP(1), &profiling_counter_src_fops);
+ }
+
+ /*
+ * Create directory where we can set PP HW counters.
+ * Possible override with specific HW counters for a particular sub job
+ * (Disable core scaling before using the override!)
+ */
+ mali_profiling_pp_dir = debugfs_create_dir("pp", mali_profiling_dir);
+ if (mali_profiling_pp_dir != NULL) {
+ debugfs_create_file("counter_src0", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(0), &profiling_counter_src_fops);
+ debugfs_create_file("counter_src1", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(1), &profiling_counter_src_fops);
+ }
+
+ max_sub_jobs = mali_executor_get_num_cores_total();
+ for (i = 0; i < max_sub_jobs; i++) {
+ char buf[16];
+ struct dentry *mali_profiling_pp_x_dir;
+ _mali_osk_snprintf(buf, sizeof(buf), "%u", i);
+ mali_profiling_pp_x_dir = debugfs_create_dir(buf, mali_profiling_pp_dir);
+ if (NULL != mali_profiling_pp_x_dir) {
+ debugfs_create_file("counter_src0",
+ 0600, mali_profiling_pp_x_dir,
+ (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(0, i),
+ &profiling_counter_src_fops);
+ debugfs_create_file("counter_src1",
+ 0600, mali_profiling_pp_x_dir,
+ (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(1, i),
+ &profiling_counter_src_fops);
+ }
+ }
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ mali_profiling_proc_dir = debugfs_create_dir("proc", mali_profiling_dir);
+ if (mali_profiling_proc_dir != NULL) {
+ struct dentry *mali_profiling_proc_default_dir = debugfs_create_dir("default", mali_profiling_proc_dir);
+ if (mali_profiling_proc_default_dir != NULL) {
+ debugfs_create_file("enable", 0600, mali_profiling_proc_default_dir, (void *)_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, &user_settings_fops);
+ }
+ }
+ debugfs_create_file("record", 0600, mali_profiling_dir, NULL, &profiling_record_fops);
+ debugfs_create_file("events", 0400, mali_profiling_dir, NULL, &profiling_events_fops);
+ debugfs_create_file("events_human_readable", 0400, mali_profiling_dir, NULL, &profiling_events_human_readable_fops);
+#endif
+ }
+
+#if MALI_STATE_TRACKING
+ debugfs_create_file("state_dump", 0400, mali_debugfs_dir, NULL, &mali_seq_internal_state_fops);
+#endif
+
+#if defined(DEBUG)
+ debugfs_create_file("timeline_dump", 0400, mali_debugfs_dir, NULL, &timeline_dump_fops);
+#endif
+ if (mali_sysfs_user_settings_register()) {
+ /* Failed to create the debugfs entries for the user settings DB. */
+ MALI_DEBUG_PRINT(2, ("Failed to create user setting debugfs files. Ignoring...\n"));
+ }
+ }
+ }
+
+ /* Success! */
+ return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+ if (NULL != mali_debugfs_dir) {
+ debugfs_remove_recursive(mali_debugfs_dir);
+ }
+ return 0;
+}
+
+#else /* MALI_LICENSE_IS_GPL */
+
+/* Dummy implementations for non-GPL */
+
+int mali_sysfs_register(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+ return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+ return 0;
+}
+
+#endif /* MALI_LICENSE_IS_GPL */
diff --git a/drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.h b/drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.h
new file mode 100644
index 000000000000..91580a87c1e1
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011-2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_SYSFS_H__
+#define __MALI_KERNEL_SYSFS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/device.h>
+
+#define MALI_PROC_DIR "driver/mali"
+
+int mali_sysfs_register(const char *mali_dev_name);
+int mali_sysfs_unregister(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_linux_trace.h b/drivers/gpu/arm/utgard/linux/mali_linux_trace.h
new file mode 100644
index 000000000000..e6c928dc7c9f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_linux_trace.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2012-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#if !defined (MALI_LINUX_TRACE_H) || defined (TRACE_HEADER_MULTI_READ)
+#define MALI_LINUX_TRACE_H
+
+#include <linux/types.h>
+
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+#define TRACE_SYSTEM_STRING __stringfy(TRACE_SYSTEM)
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE mali_linux_trace
+
+/**
+ * Define the tracepoint used to communicate the status of a GPU. Called
+ * when a GPU turns on or turns off.
+ *
+ * @param event_id The type of the event. This parameter is a bitfield
+ * encoding the type of the event.
+ *
+ * @param d0 First data parameter.
+ * @param d1 Second data parameter.
+ * @param d2 Third data parameter.
+ * @param d3 Fourth data parameter.
+ * @param d4 Fifth data parameter.
+ */
+TRACE_EVENT(mali_timeline_event,
+
+ TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1,
+ unsigned int d2, unsigned int d3, unsigned int d4),
+
+ TP_ARGS(event_id, d0, d1, d2, d3, d4),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, event_id)
+ __field(unsigned int, d0)
+ __field(unsigned int, d1)
+ __field(unsigned int, d2)
+ __field(unsigned int, d3)
+ __field(unsigned int, d4)
+ ),
+
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->d0 = d0;
+ __entry->d1 = d1;
+ __entry->d2 = d2;
+ __entry->d3 = d3;
+ __entry->d4 = d4;
+ ),
+
+ TP_printk("event=%d", __entry->event_id)
+ );
+
+/**
+ * Define a tracepoint used to regsiter the value of a hardware counter.
+ * Hardware counters belonging to the vertex or fragment processor are
+ * reported via this tracepoint each frame, whilst L2 cache hardware
+ * counters are reported continuously.
+ *
+ * @param counter_id The counter ID.
+ * @param value The value of the counter.
+ */
+TRACE_EVENT(mali_hw_counter,
+
+ TP_PROTO(unsigned int counter_id, unsigned int value),
+
+ TP_ARGS(counter_id, value),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, counter_id)
+ __field(unsigned int, value)
+ ),
+
+ TP_fast_assign(
+ __entry->counter_id = counter_id;
+ ),
+
+ TP_printk("event %d = %d", __entry->counter_id, __entry->value)
+ );
+
+/**
+ * Define a tracepoint used to send a bundle of software counters.
+ *
+ * @param counters The bundle of counters.
+ */
+TRACE_EVENT(mali_sw_counters,
+
+ TP_PROTO(pid_t pid, pid_t tid, void *surface_id, unsigned int *counters),
+
+ TP_ARGS(pid, tid, surface_id, counters),
+
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(pid_t, tid)
+ __field(void *, surface_id)
+ __field(unsigned int *, counters)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->tid = tid;
+ __entry->surface_id = surface_id;
+ __entry->counters = counters;
+ ),
+
+ TP_printk("counters were %s", __entry->counters == NULL ? "NULL" : "not NULL")
+ );
+
+/**
+ * Define a tracepoint used to gather core activity for systrace
+ * @param pid The process id for which the core activity originates from
+ * @param active If the core is active (1) or not (0)
+ * @param core_type The type of core active, either GP (1) or PP (0)
+ * @param core_id The core id that is active for the core_type
+ * @param frame_builder_id The frame builder id associated with this core activity
+ * @param flush_id The flush id associated with this core activity
+ */
+TRACE_EVENT(mali_core_active,
+
+ TP_PROTO(pid_t pid, unsigned int active, unsigned int core_type, unsigned int core_id, unsigned int frame_builder_id, unsigned int flush_id),
+
+ TP_ARGS(pid, active, core_type, core_id, frame_builder_id, flush_id),
+
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(unsigned int, active)
+ __field(unsigned int, core_type)
+ __field(unsigned int, core_id)
+ __field(unsigned int, frame_builder_id)
+ __field(unsigned int, flush_id)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->active = active;
+ __entry->core_type = core_type;
+ __entry->core_id = core_id;
+ __entry->frame_builder_id = frame_builder_id;
+ __entry->flush_id = flush_id;
+ ),
+
+ TP_printk("%s|%d|%s%i:%x|%d", __entry->active ? "S" : "F", __entry->pid, __entry->core_type ? "GP" : "PP", __entry->core_id, __entry->flush_id, __entry->frame_builder_id)
+ );
+
+#endif /* MALI_LINUX_TRACE_H */
+
+/* This part must exist outside the header guard. */
+#include <trace/define_trace.h>
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory.c b/drivers/gpu/arm/utgard/linux/mali_memory.c
new file mode 100644
index 000000000000..b7ef7391b652
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory.c
@@ -0,0 +1,530 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <linux/idr.h>
+
+#include "mali_osk.h"
+#include "mali_executor.h"
+
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_memory_util.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_swap_alloc.h"
+#include "mali_memory_defer_bind.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_secure.h"
+#endif
+
+extern unsigned int mali_dedicated_mem_size;
+extern unsigned int mali_shared_mem_size;
+
+#define MALI_VM_NUM_FAULT_PREFETCH (0x8)
+
+static void mali_mem_vma_open(struct vm_area_struct *vma)
+{
+ mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
+ MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
+
+ /* If need to share the allocation, add ref_count here */
+ mali_allocation_ref(alloc);
+ return;
+}
+static void mali_mem_vma_close(struct vm_area_struct *vma)
+{
+ /* If need to share the allocation, unref ref_count here */
+ mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
+
+ mali_allocation_unref(&alloc);
+ vma->vm_private_data = NULL;
+}
+
+static int mali_mem_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
+ mali_mem_backend *mem_bkend = NULL;
+ int ret;
+ int prefetch_num = MALI_VM_NUM_FAULT_PREFETCH;
+
+ unsigned long address = (unsigned long)vmf->virtual_address;
+ MALI_DEBUG_ASSERT(alloc->backend_handle);
+ MALI_DEBUG_ASSERT((unsigned long)alloc->cpu_mapping.addr <= address);
+
+ /* Get backend memory & Map on CPU */
+ mutex_lock(&mali_idr_mutex);
+ if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {
+ MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
+ mutex_unlock(&mali_idr_mutex);
+ return VM_FAULT_SIGBUS;
+ }
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(mem_bkend->type == alloc->type);
+
+ if ((mem_bkend->type == MALI_MEM_COW && (MALI_MEM_BACKEND_FLAG_SWAP_COWED !=
+ (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) &&
+ (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE)) {
+ /*check if use page fault to do COW*/
+ MALI_DEBUG_PRINT(4, ("mali_vma_fault: do cow allocate on demand!, address=0x%x\n", address));
+ mutex_lock(&mem_bkend->mutex);
+ ret = mali_mem_cow_allocate_on_demand(mem_bkend,
+ (address - vma->vm_start) / PAGE_SIZE);
+ mutex_unlock(&mem_bkend->mutex);
+
+ if (ret != _MALI_OSK_ERR_OK) {
+ return VM_FAULT_OOM;
+ }
+ prefetch_num = 1;
+
+ /* handle COW modified range cpu mapping
+ we zap the mapping in cow_modify_range, it will trigger page fault
+ when CPU access it, so here we map it to CPU*/
+ mutex_lock(&mem_bkend->mutex);
+ ret = mali_mem_cow_cpu_map_pages_locked(mem_bkend, vma, address, prefetch_num);
+ mutex_unlock(&mem_bkend->mutex);
+
+ if (unlikely(ret != _MALI_OSK_ERR_OK)) {
+ return VM_FAULT_SIGBUS;
+ }
+ } else if ((mem_bkend->type == MALI_MEM_SWAP) ||
+ (mem_bkend->type == MALI_MEM_COW && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
+ u32 offset_in_bkend = (address - vma->vm_start) / PAGE_SIZE;
+ int ret = _MALI_OSK_ERR_OK;
+
+ mutex_lock(&mem_bkend->mutex);
+ if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE) {
+ ret = mali_mem_swap_cow_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
+ } else {
+ ret = mali_mem_swap_allocate_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
+ }
+ mutex_unlock(&mem_bkend->mutex);
+
+ if (ret != _MALI_OSK_ERR_OK) {
+ MALI_DEBUG_PRINT(2, ("Mali swap memory page fault process failed, address=0x%x\n", address));
+ return VM_FAULT_OOM;
+ } else {
+ return VM_FAULT_LOCKED;
+ }
+ } else {
+ MALI_PRINT_ERROR(("Mali vma fault! It never happen, indicating some logic errors in caller.\n"));
+ /*NOT support yet or OOM*/
+ return VM_FAULT_OOM;
+ }
+ return VM_FAULT_NOPAGE;
+}
+
+static struct vm_operations_struct mali_kernel_vm_ops = {
+ .open = mali_mem_vma_open,
+ .close = mali_mem_vma_close,
+ .fault = mali_mem_vma_fault,
+};
+
+
+/** @ map mali allocation to CPU address
+*
+* Supported backend types:
+* --MALI_MEM_OS
+* -- need to add COW?
+ *Not supported backend types:
+* -_MALI_MEMORY_BIND_BACKEND_UMP
+* -_MALI_MEMORY_BIND_BACKEND_DMA_BUF
+* -_MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
+*
+*/
+int mali_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct mali_session_data *session;
+ mali_mem_allocation *mali_alloc = NULL;
+ u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_backend *mem_bkend = NULL;
+ int ret = -EFAULT;
+
+ session = (struct mali_session_data *)filp->private_data;
+ if (NULL == session) {
+ MALI_PRINT_ERROR(("mmap called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
+ (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
+ (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
+
+ /* Operations used on any memory system */
+ /* do not need to anything in vm open/close now */
+
+ /* find mali allocation structure by vaddress*/
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+ if (likely(mali_vma_node)) {
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
+ if (unlikely(mali_addr != mali_vma_node->vm_node.start)) {
+ /* only allow to use start address for mmap */
+ MALI_DEBUG_PRINT(1, ("mali_addr != mali_vma_node->vm_node.start\n"));
+ return -EFAULT;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(NULL == mali_vma_node);
+ return -EFAULT;
+ }
+
+ mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
+
+ if (mali_alloc->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
+ MALI_DEBUG_PRINT(1, ("ERROR : trying to access varying memory by CPU!\n"));
+ return -EFAULT;
+ }
+
+ /* Get backend memory & Map on CPU */
+ mutex_lock(&mali_idr_mutex);
+ if (!(mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle))) {
+ MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
+ mutex_unlock(&mali_idr_mutex);
+ return -EFAULT;
+ }
+ mutex_unlock(&mali_idr_mutex);
+
+ if (!(MALI_MEM_SWAP == mali_alloc->type ||
+ (MALI_MEM_COW == mali_alloc->type && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
+ /* Set some bits which indicate that, the memory is IO memory, meaning
+ * that no paging is to be performed and the memory should not be
+ * included in crash dumps. And that the memory is reserved, meaning
+ * that it's present and can never be paged out (see also previous
+ * entry)
+ */
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_DONTCOPY;
+ vma->vm_flags |= VM_PFNMAP;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
+ vma->vm_flags |= VM_RESERVED;
+#else
+ vma->vm_flags |= VM_DONTDUMP;
+ vma->vm_flags |= VM_DONTEXPAND;
+#endif
+ } else if (MALI_MEM_SWAP == mali_alloc->type) {
+ vma->vm_pgoff = mem_bkend->start_idx;
+ }
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &mali_kernel_vm_ops;
+
+ mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
+
+ /* If it's a copy-on-write mapping, map to read only */
+ if (!(vma->vm_flags & VM_WRITE)) {
+ MALI_DEBUG_PRINT(4, ("mmap allocation with read only !\n"));
+ /* add VM_WRITE for do_page_fault will check this when a write fault */
+ vma->vm_flags |= VM_WRITE | VM_READ;
+ vma->vm_page_prot = PAGE_READONLY;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE;
+ goto out;
+ }
+
+ if (mem_bkend->type == MALI_MEM_OS) {
+ ret = mali_mem_os_cpu_map(mem_bkend, vma);
+ } else if (mem_bkend->type == MALI_MEM_COW &&
+ (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
+ ret = mali_mem_cow_cpu_map(mem_bkend, vma);
+ } else if (mem_bkend->type == MALI_MEM_BLOCK) {
+ ret = mali_mem_block_cpu_map(mem_bkend, vma);
+ } else if ((mem_bkend->type == MALI_MEM_SWAP) || (mem_bkend->type == MALI_MEM_COW &&
+ (MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
+ /*For swappable memory, CPU page table will be created by page fault handler. */
+ ret = 0;
+ } else if (mem_bkend->type == MALI_MEM_SECURE) {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ ret = mali_mem_secure_cpu_map(mem_bkend, vma);
+#else
+ MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory\n"));
+ return -EFAULT;
+#endif
+ } else {
+ /* Not support yet*/
+ MALI_DEBUG_PRINT_ERROR(("Invalid type of backend memory! \n"));
+ return -EFAULT;
+ }
+
+ if (ret != 0) {
+ MALI_DEBUG_PRINT(1, ("ret != 0\n"));
+ return -EFAULT;
+ }
+out:
+ MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == mali_alloc->magic);
+
+ vma->vm_private_data = (void *)mali_alloc;
+ mali_alloc->cpu_mapping.vma = vma;
+
+ mali_allocation_ref(mali_alloc);
+
+ return 0;
+}
+
+_mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor)
+{
+ u32 size = descriptor->psize;
+ struct mali_session_data *session = descriptor->session;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+ /* Map dma-buf into this session's page tables */
+
+ if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ size += MALI_MMU_PAGE_SIZE;
+ }
+
+ return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start, size);
+}
+
+_mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size)
+{
+ u32 old_size = descriptor->psize;
+ struct mali_session_data *session = descriptor->session;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+ if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ new_size += MALI_MMU_PAGE_SIZE;
+ }
+
+ if (new_size > old_size) {
+ MALI_DEBUG_ASSERT(new_size <= descriptor->mali_vma_node.vm_node.size);
+ return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start + old_size, new_size - old_size);
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags)
+{
+ if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ size += MALI_MMU_PAGE_SIZE;
+ }
+
+ /* Umap and flush L2 */
+ mali_mmu_pagedir_unmap(session->page_directory, vaddr, size);
+ mali_executor_zap_all_active(session);
+}
+
+u32 _mali_ukk_report_memory_usage(void)
+{
+ u32 sum = 0;
+
+ if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
+ sum += mali_mem_block_allocator_stat();
+ }
+
+ sum += mali_mem_os_stat();
+
+ return sum;
+}
+
+u32 _mali_ukk_report_total_memory_size(void)
+{
+ return mali_dedicated_mem_size + mali_shared_mem_size;
+}
+
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 65536
+
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session_data)
+{
+ MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
+
+ session_data->memory_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_MEM_SESSION);
+
+ if (NULL == session_data->memory_lock) {
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ session_data->cow_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
+ if (NULL == session_data->cow_lock) {
+ _mali_osk_mutex_term(session_data->memory_lock);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mali_memory_manager_init(&session_data->allocation_mgr);
+
+ MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
+ MALI_SUCCESS;
+}
+
+void mali_memory_session_end(struct mali_session_data *session)
+{
+ MALI_DEBUG_PRINT(3, ("MMU session end\n"));
+
+ if (NULL == session) {
+ MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
+ return;
+ }
+ /* free allocation */
+ mali_free_session_allocations(session);
+ /* do some check in unint*/
+ mali_memory_manager_uninit(&session->allocation_mgr);
+
+ /* Free the lock */
+ _mali_osk_mutex_term(session->memory_lock);
+ _mali_osk_mutex_term(session->cow_lock);
+ return;
+}
+
+_mali_osk_errcode_t mali_memory_initialize(void)
+{
+ _mali_osk_errcode_t err;
+
+ idr_init(&mali_backend_idr);
+ mutex_init(&mali_idr_mutex);
+
+ err = mali_mem_swap_init();
+ if (err != _MALI_OSK_ERR_OK) {
+ return err;
+ }
+ err = mali_mem_os_init();
+ if (_MALI_OSK_ERR_OK == err) {
+ err = mali_mem_defer_bind_manager_init();
+ }
+
+ return err;
+}
+
+void mali_memory_terminate(void)
+{
+ mali_mem_swap_term();
+ mali_mem_defer_bind_manager_destory();
+ mali_mem_os_term();
+ if (mali_memory_have_dedicated_memory()) {
+ mali_mem_block_allocator_destroy();
+ }
+}
+
+
+struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type)
+{
+ mali_page_node *page_node = NULL;
+
+ page_node = kzalloc(sizeof(mali_page_node), GFP_KERNEL);
+ MALI_DEBUG_ASSERT(NULL != page_node);
+
+ if (page_node) {
+ page_node->type = type;
+ INIT_LIST_HEAD(&page_node->list);
+ }
+
+ return page_node;
+}
+
+void _mali_page_node_ref(struct mali_page_node *node)
+{
+ if (node->type == MALI_PAGE_NODE_OS) {
+ /* add ref to this page */
+ get_page(node->page);
+ } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+ mali_mem_block_add_ref(node);
+ } else if (node->type == MALI_PAGE_NODE_SWAP) {
+ atomic_inc(&node->swap_it->ref_count);
+ } else {
+ MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
+ }
+}
+
+void _mali_page_node_unref(struct mali_page_node *node)
+{
+ if (node->type == MALI_PAGE_NODE_OS) {
+ /* unref to this page */
+ put_page(node->page);
+ } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+ mali_mem_block_dec_ref(node);
+ } else {
+ MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
+ }
+}
+
+
+void _mali_page_node_add_page(struct mali_page_node *node, struct page *page)
+{
+ MALI_DEBUG_ASSERT(MALI_PAGE_NODE_OS == node->type);
+ node->page = page;
+}
+
+
+void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item)
+{
+ MALI_DEBUG_ASSERT(MALI_PAGE_NODE_SWAP == node->type);
+ node->swap_it = item;
+}
+
+void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item)
+{
+ MALI_DEBUG_ASSERT(MALI_PAGE_NODE_BLOCK == node->type);
+ node->blk_it = item;
+}
+
+
+int _mali_page_node_get_ref_count(struct mali_page_node *node)
+{
+ if (node->type == MALI_PAGE_NODE_OS) {
+ /* get ref count of this page */
+ return page_count(node->page);
+ } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+ return mali_mem_block_get_ref_count(node);
+ } else if (node->type == MALI_PAGE_NODE_SWAP) {
+ return atomic_read(&node->swap_it->ref_count);
+ } else {
+ MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
+ }
+ return -1;
+}
+
+
+dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node)
+{
+ if (node->type == MALI_PAGE_NODE_OS) {
+ return page_private(node->page);
+ } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+ return _mali_blk_item_get_phy_addr(node->blk_it);
+ } else if (node->type == MALI_PAGE_NODE_SWAP) {
+ return node->swap_it->dma_addr;
+ } else {
+ MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
+ }
+ return 0;
+}
+
+
+unsigned long _mali_page_node_get_pfn(struct mali_page_node *node)
+{
+ if (node->type == MALI_PAGE_NODE_OS) {
+ return page_to_pfn(node->page);
+ } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+ /* get phy addr for BLOCK page*/
+ return _mali_blk_item_get_pfn(node->blk_it);
+ } else if (node->type == MALI_PAGE_NODE_SWAP) {
+ return page_to_pfn(node->swap_it->page);
+ } else {
+ MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
+ }
+ return 0;
+}
+
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory.h b/drivers/gpu/arm/utgard/linux/mali_memory.h
new file mode 100644
index 000000000000..efebbef235d8
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_H__
+#define __MALI_MEMORY_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+#include <linux/list.h>
+#include <linux/mm.h>
+
+#include "mali_memory_types.h"
+#include "mali_memory_os_alloc.h"
+
+_mali_osk_errcode_t mali_memory_initialize(void);
+void mali_memory_terminate(void);
+
+/** @brief Allocate a page table page
+ *
+ * Allocate a page for use as a page directory or page table. The page is
+ * mapped into kernel space.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise an error code
+ * @param table_page GPU pointer to the allocated page
+ * @param mapping CPU pointer to the mapping of the allocated page
+ */
+MALI_STATIC_INLINE _mali_osk_errcode_t
+mali_mmu_get_table_page(mali_dma_addr *table_page, mali_io_address *mapping)
+{
+ return mali_mem_os_get_table_page(table_page, mapping);
+}
+
+/** @brief Release a page table page
+ *
+ * Release a page table page allocated through \a mali_mmu_get_table_page
+ *
+ * @param pa the GPU address of the page to release
+ */
+MALI_STATIC_INLINE void
+mali_mmu_release_table_page(mali_dma_addr phys, void *virt)
+{
+ mali_mem_os_release_table_page(phys, virt);
+}
+
+/** @brief mmap function
+ *
+ * mmap syscalls on the Mali device node will end up here.
+ *
+ * This function allocates Mali memory and maps it on CPU and Mali.
+ */
+int mali_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/** @brief Start a new memory session
+ *
+ * Called when a process opens the Mali device node.
+ *
+ * @param session Pointer to session to initialize
+ */
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session);
+
+/** @brief Close a memory session
+ *
+ * Called when a process closes the Mali device node.
+ *
+ * Memory allocated by the session will be freed
+ *
+ * @param session Pointer to the session to terminate
+ */
+void mali_memory_session_end(struct mali_session_data *session);
+
+/** @brief Prepare Mali page tables for mapping
+ *
+ * This function will prepare the Mali page tables for mapping the memory
+ * described by \a descriptor.
+ *
+ * Page tables will be reference counted and allocated, if not yet present.
+ *
+ * @param descriptor Pointer to the memory descriptor to the mapping
+ */
+_mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor);
+
+/** @brief Resize Mali page tables for mapping
+ *
+ * This function will Resize the Mali page tables for mapping the memory
+ * described by \a descriptor.
+ *
+ * Page tables will be reference counted and allocated, if not yet present.
+ *
+ * @param descriptor Pointer to the memory descriptor to the mapping
+ * @param new_size The new size of descriptor
+ */
+_mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size);
+
+/** @brief Free Mali page tables for mapping
+ *
+ * This function will unmap pages from Mali memory and free the page tables
+ * that are now unused.
+ *
+ * The updated pages in the Mali L2 cache will be invalidated, and the MMU TLBs will be zapped if necessary.
+ *
+ * @param descriptor Pointer to the memory descriptor to unmap
+ */
+void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags);
+
+/** @brief Parse resource and prepare the OS memory allocator
+ *
+ * @param size Maximum size to allocate for Mali GPU.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size);
+
+/** @brief Parse resource and prepare the dedicated memory allocator
+ *
+ * @param start Physical start address of dedicated Mali GPU memory.
+ * @param size Size of dedicated Mali GPU memory.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size);
+
+
+struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type);
+
+void _mali_page_node_ref(struct mali_page_node *node);
+void _mali_page_node_unref(struct mali_page_node *node);
+void _mali_page_node_add_page(struct mali_page_node *node, struct page *page);
+
+void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item);
+
+void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item);
+
+int _mali_page_node_get_ref_count(struct mali_page_node *node);
+dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node);
+unsigned long _mali_page_node_get_pfn(struct mali_page_node *node);
+
+#endif /* __MALI_MEMORY_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.c b/drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.c
new file mode 100644
index 000000000000..3bd01926df38
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_memory.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_osk.h"
+#include <linux/mutex.h>
+
+
+static mali_block_allocator *mali_mem_block_gobal_allocator = NULL;
+
+unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item)
+{
+ return (item->phy_addr & ~(MALI_BLOCK_REF_MASK));
+}
+
+
+unsigned long _mali_blk_item_get_pfn(mali_block_item *item)
+{
+ return (item->phy_addr / MALI_BLOCK_SIZE);
+}
+
+
+u32 mali_mem_block_get_ref_count(mali_page_node *node)
+{
+ MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+ return (node->blk_it->phy_addr & MALI_BLOCK_REF_MASK);
+}
+
+
+/* Increase the refence count
+* It not atomic, so it need to get sp_lock before call this function
+*/
+
+u32 mali_mem_block_add_ref(mali_page_node *node)
+{
+ MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+ MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) < MALI_BLOCK_MAX_REF_COUNT);
+ return (node->blk_it->phy_addr++ & MALI_BLOCK_REF_MASK);
+}
+
+/* Decase the refence count
+* It not atomic, so it need to get sp_lock before call this function
+*/
+u32 mali_mem_block_dec_ref(mali_page_node *node)
+{
+ MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+ MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) > 0);
+ return (node->blk_it->phy_addr-- & MALI_BLOCK_REF_MASK);
+}
+
+
+static mali_block_allocator *mali_mem_block_allocator_create(u32 base_address, u32 size)
+{
+ mali_block_allocator *info;
+ u32 usable_size;
+ u32 num_blocks;
+ mali_page_node *m_node;
+ mali_block_item *mali_blk_items = NULL;
+ int i = 0;
+
+ usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+ MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+ MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+ num_blocks = usable_size / MALI_BLOCK_SIZE;
+ MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+ if (usable_size == 0) {
+ MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+ return NULL;
+ }
+
+ info = _mali_osk_calloc(1, sizeof(mali_block_allocator));
+ if (NULL != info) {
+ INIT_LIST_HEAD(&info->free);
+ spin_lock_init(&info->sp_lock);
+ info->total_num = num_blocks;
+ mali_blk_items = _mali_osk_calloc(1, sizeof(mali_block_item) * num_blocks);
+
+ if (mali_blk_items) {
+ info->items = mali_blk_items;
+ /* add blocks(4k size) to free list*/
+ for (i = 0 ; i < num_blocks ; i++) {
+ /* add block information*/
+ mali_blk_items[i].phy_addr = base_address + (i * MALI_BLOCK_SIZE);
+ /* add to free list */
+ m_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
+ if (m_node == NULL)
+ goto fail;
+ _mali_page_node_add_block_item(m_node, &(mali_blk_items[i]));
+ list_add_tail(&m_node->list, &info->free);
+ atomic_add(1, &info->free_num);
+ }
+ return info;
+ }
+ }
+fail:
+ mali_mem_block_allocator_destroy();
+ return NULL;
+}
+
+void mali_mem_block_allocator_destroy(void)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ mali_block_allocator *info = mali_mem_block_gobal_allocator;
+ MALI_DEBUG_ASSERT_POINTER(info);
+ MALI_DEBUG_PRINT(4, ("Memory block destroy !\n"));
+
+ if (NULL == info)
+ return;
+
+ list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+ list_del(&m_page->list);
+ kfree(m_page);
+ }
+
+ _mali_osk_free(info->items);
+ _mali_osk_free(info);
+}
+
+u32 mali_mem_block_release(mali_mem_backend *mem_bkend)
+{
+ mali_mem_allocation *alloc = mem_bkend->mali_allocation;
+ u32 free_pages_nr = 0;
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
+
+ /* Unmap the memory from the mali virtual address space. */
+ mali_mem_block_mali_unmap(alloc);
+ mutex_lock(&mem_bkend->mutex);
+ free_pages_nr = mali_mem_block_free(&mem_bkend->block_mem);
+ mutex_unlock(&mem_bkend->mutex);
+ return free_pages_nr;
+}
+
+
+int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
+ mali_block_allocator *info = mali_mem_block_gobal_allocator;
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ MALI_DEBUG_PRINT(4, ("BLOCK Mem: Allocate size = 0x%x\n", size));
+ /*do some init */
+ INIT_LIST_HEAD(&block_mem->pfns);
+
+ spin_lock(&info->sp_lock);
+ /*check if have enough space*/
+ if (atomic_read(&info->free_num) > page_count) {
+ list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
+ if (page_count > 0) {
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+ MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(m_page) == 0);
+ list_move(&m_page->list, &block_mem->pfns);
+ block_mem->count++;
+ atomic_dec(&info->free_num);
+ _mali_page_node_ref(m_page);
+ } else {
+ break;
+ }
+ page_count--;
+ }
+ } else {
+ /* can't allocate from BLOCK memory*/
+ spin_unlock(&info->sp_lock);
+ return -1;
+ }
+
+ spin_unlock(&info->sp_lock);
+ return 0;
+}
+
+u32 mali_mem_block_free(mali_mem_block_mem *block_mem)
+{
+ u32 free_pages_nr = 0;
+
+ free_pages_nr = mali_mem_block_free_list(&block_mem->pfns);
+ MALI_DEBUG_PRINT(4, ("BLOCK Mem free : allocated size = 0x%x, free size = 0x%x\n", block_mem->count * _MALI_OSK_MALI_PAGE_SIZE,
+ free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
+ block_mem->count = 0;
+ MALI_DEBUG_ASSERT(list_empty(&block_mem->pfns));
+
+ return free_pages_nr;
+}
+
+
+u32 mali_mem_block_free_list(struct list_head *list)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ mali_block_allocator *info = mali_mem_block_gobal_allocator;
+ u32 free_pages_nr = 0;
+
+ if (info) {
+ spin_lock(&info->sp_lock);
+ list_for_each_entry_safe(m_page, m_tmp , list, list) {
+ if (1 == _mali_page_node_get_ref_count(m_page)) {
+ free_pages_nr++;
+ }
+ mali_mem_block_free_node(m_page);
+ }
+ spin_unlock(&info->sp_lock);
+ }
+ return free_pages_nr;
+}
+
+/* free the node,*/
+void mali_mem_block_free_node(struct mali_page_node *node)
+{
+ mali_block_allocator *info = mali_mem_block_gobal_allocator;
+
+ /* only handle BLOCK node */
+ if (node->type == MALI_PAGE_NODE_BLOCK && info) {
+ /*Need to make this atomic?*/
+ if (1 == _mali_page_node_get_ref_count(node)) {
+ /*Move to free list*/
+ _mali_page_node_unref(node);
+ list_move_tail(&node->list, &info->free);
+ atomic_add(1, &info->free_num);
+ } else {
+ _mali_page_node_unref(node);
+ list_del(&node->list);
+ kfree(node);
+ }
+ }
+}
+
+/* unref the node, but not free it */
+_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node)
+{
+ mali_block_allocator *info = mali_mem_block_gobal_allocator;
+ mali_page_node *new_node;
+
+ /* only handle BLOCK node */
+ if (node->type == MALI_PAGE_NODE_BLOCK && info) {
+ /*Need to make this atomic?*/
+ if (1 == _mali_page_node_get_ref_count(node)) {
+ /* allocate a new node, Add to free list, keep the old node*/
+ _mali_page_node_unref(node);
+ new_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
+ if (new_node) {
+ memcpy(new_node, node, sizeof(mali_page_node));
+ list_add(&new_node->list, &info->free);
+ atomic_add(1, &info->free_num);
+ } else
+ return _MALI_OSK_ERR_FAULT;
+
+ } else {
+ _mali_page_node_unref(node);
+ }
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+
+int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props)
+{
+ struct mali_page_directory *pagedir = session->page_directory;
+ struct mali_page_node *m_page;
+ dma_addr_t phys;
+ u32 virt = vaddr;
+ u32 prop = props;
+
+ list_for_each_entry(m_page, &block_mem->pfns, list) {
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+ phys = _mali_page_node_get_dma_addr(m_page);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ /* Verify that the "physical" address is 32-bit and
+ * usable for Mali, when on a system with bus addresses
+ * wider than 32-bit. */
+ MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+ mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+ virt += MALI_MMU_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+void mali_mem_block_mali_unmap(mali_mem_allocation *alloc)
+{
+ struct mali_session_data *session;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ mali_session_memory_lock(session);
+ mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+ alloc->flags);
+ mali_session_memory_unlock(session);
+}
+
+
+int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+ int ret;
+ mali_mem_block_mem *block_mem = &mem_bkend->block_mem;
+ unsigned long addr = vma->vm_start;
+ struct mali_page_node *m_page;
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
+
+ list_for_each_entry(m_page, &block_mem->pfns, list) {
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+ ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
+
+ if (unlikely(0 != ret)) {
+ return -EFAULT;
+ }
+ addr += _MALI_OSK_MALI_PAGE_SIZE;
+
+ }
+
+ return 0;
+}
+
+
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size)
+{
+ mali_block_allocator *allocator;
+
+ /* Do the low level linux operation first */
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(start, size, "Dedicated Mali GPU memory")) {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region for frame buffer (0x%08X - 0x%08X)\n", start, start + size - 1));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create generic block allocator object to handle it */
+ allocator = mali_mem_block_allocator_create(start, size);
+
+ if (NULL == allocator) {
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(start, size);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mali_mem_block_gobal_allocator = (mali_block_allocator *)allocator;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+mali_bool mali_memory_have_dedicated_memory(void)
+{
+ return mali_mem_block_gobal_allocator ? MALI_TRUE : MALI_FALSE;
+}
+
+u32 mali_mem_block_allocator_stat(void)
+{
+ mali_block_allocator *allocator = mali_mem_block_gobal_allocator;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+
+ return (allocator->total_num - atomic_read(&allocator->free_num)) * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.h b/drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.h
new file mode 100644
index 000000000000..70fd9ec25f50
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2010, 2013, 2015-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_BLOCK_ALLOCATOR_H__
+#define __MALI_BLOCK_ALLOCATOR_H__
+
+#include "mali_session.h"
+#include "mali_memory.h"
+#include <linux/spinlock.h>
+
+#include "mali_memory_types.h"
+
+#define MALI_BLOCK_SIZE (PAGE_SIZE) /* 4 kB, manage BLOCK memory as page size */
+#define MALI_BLOCK_REF_MASK (0xFFF)
+#define MALI_BLOCK_MAX_REF_COUNT (0xFFF)
+
+
+
+typedef struct mali_block_allocator {
+ /*
+ * In free list, each node's ref_count is 0,
+ * ref_count added when allocated or referenced in COW
+ */
+ mali_block_item *items; /* information for each block item*/
+ struct list_head free; /*free list of mali_memory_node*/
+ spinlock_t sp_lock; /*lock for reference count & free list opertion*/
+ u32 total_num; /* Number of total pages*/
+ atomic_t free_num; /*number of free pages*/
+} mali_block_allocator;
+
+unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item);
+unsigned long _mali_blk_item_get_pfn(mali_block_item *item);
+u32 mali_mem_block_get_ref_count(mali_page_node *node);
+u32 mali_mem_block_add_ref(mali_page_node *node);
+u32 mali_mem_block_dec_ref(mali_page_node *node);
+u32 mali_mem_block_release(mali_mem_backend *mem_bkend);
+int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size);
+int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props);
+void mali_mem_block_mali_unmap(mali_mem_allocation *alloc);
+
+int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size);
+mali_bool mali_memory_have_dedicated_memory(void);
+u32 mali_mem_block_free(mali_mem_block_mem *block_mem);
+u32 mali_mem_block_free_list(struct list_head *list);
+void mali_mem_block_free_node(struct mali_page_node *node);
+void mali_mem_block_allocator_destroy(void);
+_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node);
+u32 mali_mem_block_allocator_stat(void);
+
+#endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_cow.c b/drivers/gpu/arm/utgard/linux/mali_memory_cow.c
new file mode 100644
index 000000000000..827458ff61ae
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_cow.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#ifdef CONFIG_ARM
+#include <asm/outercache.h>
+#endif
+#include <asm/dma-mapping.h>
+
+#include "mali_memory.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_memory_swap_alloc.h"
+
+/**
+* allocate pages for COW backend and flush cache
+*/
+static struct page *mali_mem_cow_alloc_page(void)
+
+{
+ mali_mem_os_mem os_mem;
+ struct mali_page_node *node;
+ struct page *new_page;
+
+ int ret = 0;
+ /* allocate pages from os mem */
+ ret = mali_mem_os_alloc_pages(&os_mem, _MALI_OSK_MALI_PAGE_SIZE);
+
+ if (ret) {
+ return NULL;
+ }
+
+ MALI_DEBUG_ASSERT(1 == os_mem.count);
+
+ node = _MALI_OSK_CONTAINER_OF(os_mem.pages.next, struct mali_page_node, list);
+ new_page = node->page;
+ node->page = NULL;
+ list_del(&node->list);
+ kfree(node);
+
+ return new_page;
+}
+
+
+static struct list_head *_mali_memory_cow_get_node_list(mali_mem_backend *target_bk,
+ u32 target_offset,
+ u32 target_size)
+{
+ MALI_DEBUG_ASSERT(MALI_MEM_OS == target_bk->type || MALI_MEM_COW == target_bk->type ||
+ MALI_MEM_BLOCK == target_bk->type || MALI_MEM_SWAP == target_bk->type);
+
+ if (MALI_MEM_OS == target_bk->type) {
+ MALI_DEBUG_ASSERT(&target_bk->os_mem);
+ MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->os_mem.count);
+ return &target_bk->os_mem.pages;
+ } else if (MALI_MEM_COW == target_bk->type) {
+ MALI_DEBUG_ASSERT(&target_bk->cow_mem);
+ MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->cow_mem.count);
+ return &target_bk->cow_mem.pages;
+ } else if (MALI_MEM_BLOCK == target_bk->type) {
+ MALI_DEBUG_ASSERT(&target_bk->block_mem);
+ MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->block_mem.count);
+ return &target_bk->block_mem.pfns;
+ } else if (MALI_MEM_SWAP == target_bk->type) {
+ MALI_DEBUG_ASSERT(&target_bk->swap_mem);
+ MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->swap_mem.count);
+ return &target_bk->swap_mem.pages;
+ }
+
+ return NULL;
+}
+
+/**
+* Do COW for os memory - support do COW for memory from bank memory
+* The range_start/size can be zero, which means it will call cow_modify_range
+* latter.
+* This function allocate new pages for COW backend from os mem for a modified range
+* It will keep the page which not in the modified range and Add ref to it
+*
+* @target_bk - target allocation's backend(the allocation need to do COW)
+* @target_offset - the offset in target allocation to do COW(for support COW a memory allocated from memory_bank, 4K align)
+* @target_size - size of target allocation to do COW (for support memory bank)
+* @backend -COW backend
+* @range_start - offset of modified range (4K align)
+* @range_size - size of modified range
+*/
+_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk,
+ u32 target_offset,
+ u32 target_size,
+ mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size)
+{
+ mali_mem_cow *cow = &backend->cow_mem;
+ struct mali_page_node *m_page, *m_tmp, *page_node;
+ int target_page = 0;
+ struct page *new_page;
+ struct list_head *pages = NULL;
+
+ pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);
+
+ if (NULL == pages) {
+ MALI_DEBUG_PRINT_ERROR(("No memory page need to cow ! \n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT(0 == cow->count);
+
+ INIT_LIST_HEAD(&cow->pages);
+ mutex_lock(&target_bk->mutex);
+ list_for_each_entry_safe(m_page, m_tmp, pages, list) {
+ /* add page from (target_offset,target_offset+size) to cow backend */
+ if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&
+ (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {
+
+ /* allocate a new page node, alway use OS memory for COW */
+ page_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
+
+ if (NULL == page_node) {
+ mutex_unlock(&target_bk->mutex);
+ goto error;
+ }
+
+ INIT_LIST_HEAD(&page_node->list);
+
+ /* check if in the modified range*/
+ if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
+ (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
+ /* need to allocate a new page */
+ /* To simplify the case, All COW memory is allocated from os memory ?*/
+ new_page = mali_mem_cow_alloc_page();
+
+ if (NULL == new_page) {
+ kfree(page_node);
+ mutex_unlock(&target_bk->mutex);
+ goto error;
+ }
+
+ _mali_page_node_add_page(page_node, new_page);
+ } else {
+ /*Add Block memory case*/
+ if (m_page->type != MALI_PAGE_NODE_BLOCK) {
+ _mali_page_node_add_page(page_node, m_page->page);
+ } else {
+ page_node->type = MALI_PAGE_NODE_BLOCK;
+ _mali_page_node_add_block_item(page_node, m_page->blk_it);
+ }
+
+ /* add ref to this page */
+ _mali_page_node_ref(m_page);
+ }
+
+ /* add it to COW backend page list */
+ list_add_tail(&page_node->list, &cow->pages);
+ cow->count++;
+ }
+ target_page++;
+ }
+ mutex_unlock(&target_bk->mutex);
+ return _MALI_OSK_ERR_OK;
+error:
+ mali_mem_cow_release(backend, MALI_FALSE);
+ return _MALI_OSK_ERR_FAULT;
+}
+
+_mali_osk_errcode_t mali_memory_cow_swap_memory(mali_mem_backend *target_bk,
+ u32 target_offset,
+ u32 target_size,
+ mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size)
+{
+ mali_mem_cow *cow = &backend->cow_mem;
+ struct mali_page_node *m_page, *m_tmp, *page_node;
+ int target_page = 0;
+ struct mali_swap_item *swap_item;
+ struct list_head *pages = NULL;
+
+ pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);
+ if (NULL == pages) {
+ MALI_DEBUG_PRINT_ERROR(("No swap memory page need to cow ! \n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT(0 == cow->count);
+
+ INIT_LIST_HEAD(&cow->pages);
+ mutex_lock(&target_bk->mutex);
+
+ backend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN;
+
+ list_for_each_entry_safe(m_page, m_tmp, pages, list) {
+ /* add page from (target_offset,target_offset+size) to cow backend */
+ if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&
+ (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {
+
+ /* allocate a new page node, use swap memory for COW memory swap cowed flag. */
+ page_node = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP);
+
+ if (NULL == page_node) {
+ mutex_unlock(&target_bk->mutex);
+ goto error;
+ }
+
+ /* check if in the modified range*/
+ if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
+ (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
+ /* need to allocate a new page */
+ /* To simplify the case, All COW memory is allocated from os memory ?*/
+ swap_item = mali_mem_swap_alloc_swap_item();
+
+ if (NULL == swap_item) {
+ kfree(page_node);
+ mutex_unlock(&target_bk->mutex);
+ goto error;
+ }
+
+ swap_item->idx = mali_mem_swap_idx_alloc();
+
+ if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW.\n"));
+ kfree(page_node);
+ kfree(swap_item);
+ mutex_unlock(&target_bk->mutex);
+ goto error;
+ }
+
+ _mali_page_node_add_swap_item(page_node, swap_item);
+ } else {
+ _mali_page_node_add_swap_item(page_node, m_page->swap_it);
+
+ /* add ref to this page */
+ _mali_page_node_ref(m_page);
+ }
+
+ list_add_tail(&page_node->list, &cow->pages);
+ cow->count++;
+ }
+ target_page++;
+ }
+ mutex_unlock(&target_bk->mutex);
+
+ return _MALI_OSK_ERR_OK;
+error:
+ mali_mem_swap_release(backend, MALI_FALSE);
+ return _MALI_OSK_ERR_FAULT;
+
+}
+
+
+_mali_osk_errcode_t _mali_mem_put_page_node(mali_page_node *node)
+{
+ if (node->type == MALI_PAGE_NODE_OS) {
+ return mali_mem_os_put_page(node->page);
+ } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+ return mali_mem_block_unref_node(node);
+ } else if (node->type == MALI_PAGE_NODE_SWAP) {
+ return _mali_mem_swap_put_page_node(node);
+ } else
+ MALI_DEBUG_ASSERT(0);
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+/**
+* Modify a range of a exist COW backend
+* @backend -COW backend
+* @range_start - offset of modified range (4K align)
+* @range_size - size of modified range(in byte)
+*/
+_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size)
+{
+ mali_mem_allocation *alloc = NULL;
+ struct mali_session_data *session;
+ mali_mem_cow *cow = &backend->cow_mem;
+ struct mali_page_node *m_page, *m_tmp;
+ LIST_HEAD(pages);
+ struct page *new_page;
+ u32 count = 0;
+ s32 change_pages_nr = 0;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+
+ if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ alloc = backend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);
+ MALI_DEBUG_ASSERT(((range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE) <= cow->count);
+
+ mutex_lock(&backend->mutex);
+
+ /* free pages*/
+ list_for_each_entry_safe(m_page, m_tmp, &cow->pages, list) {
+
+ /* check if in the modified range*/
+ if ((count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
+ (count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
+ if (MALI_PAGE_NODE_SWAP != m_page->type) {
+ new_page = mali_mem_cow_alloc_page();
+
+ if (NULL == new_page) {
+ goto error;
+ }
+ if (1 != _mali_page_node_get_ref_count(m_page))
+ change_pages_nr++;
+ /* unref old page*/
+ _mali_osk_mutex_wait(session->cow_lock);
+ if (_mali_mem_put_page_node(m_page)) {
+ __free_page(new_page);
+ _mali_osk_mutex_signal(session->cow_lock);
+ goto error;
+ }
+ _mali_osk_mutex_signal(session->cow_lock);
+ /* add new page*/
+ /* always use OS for COW*/
+ m_page->type = MALI_PAGE_NODE_OS;
+ _mali_page_node_add_page(m_page, new_page);
+ } else {
+ struct mali_swap_item *swap_item;
+
+ swap_item = mali_mem_swap_alloc_swap_item();
+
+ if (NULL == swap_item) {
+ goto error;
+ }
+
+ swap_item->idx = mali_mem_swap_idx_alloc();
+
+ if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW modify range.\n"));
+ kfree(swap_item);
+ goto error;
+ }
+
+ if (1 != _mali_page_node_get_ref_count(m_page)) {
+ change_pages_nr++;
+ }
+
+ if (_mali_mem_put_page_node(m_page)) {
+ mali_mem_swap_free_swap_item(swap_item);
+ goto error;
+ }
+
+ _mali_page_node_add_swap_item(m_page, swap_item);
+ }
+ }
+ count++;
+ }
+ cow->change_pages_nr = change_pages_nr;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == alloc->type);
+
+ /* ZAP cpu mapping(modified range), and do cpu mapping here if need */
+ if (NULL != alloc->cpu_mapping.vma) {
+ MALI_DEBUG_ASSERT(0 != alloc->backend_handle);
+ MALI_DEBUG_ASSERT(NULL != alloc->cpu_mapping.vma);
+ MALI_DEBUG_ASSERT(alloc->cpu_mapping.vma->vm_end - alloc->cpu_mapping.vma->vm_start >= range_size);
+
+ if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
+ zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);
+
+ ret = mali_mem_cow_cpu_map_pages_locked(backend, alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size / _MALI_OSK_MALI_PAGE_SIZE);
+
+ if (unlikely(ret != _MALI_OSK_ERR_OK)) {
+ MALI_DEBUG_PRINT(2, ("mali_memory_cow_modify_range: cpu mapping failed !\n"));
+ ret = _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ /* used to trigger page fault for swappable cowed memory. */
+ alloc->cpu_mapping.vma->vm_flags |= VM_PFNMAP;
+ alloc->cpu_mapping.vma->vm_flags |= VM_MIXEDMAP;
+
+ zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);
+ /* delete this flag to let swappble is ummapped regard to stauct page not page frame. */
+ alloc->cpu_mapping.vma->vm_flags &= ~VM_PFNMAP;
+ alloc->cpu_mapping.vma->vm_flags &= ~VM_MIXEDMAP;
+ }
+ }
+
+error:
+ mutex_unlock(&backend->mutex);
+ return ret;
+
+}
+
+
+/**
+* Allocate pages for COW backend
+* @alloc -allocation for COW allocation
+* @target_bk - target allocation's backend(the allocation need to do COW)
+* @target_offset - the offset in target allocation to do COW(for support COW a memory allocated from memory_bank, 4K align)
+* @target_size - size of target allocation to do COW (for support memory bank)(in byte)
+* @backend -COW backend
+* @range_start - offset of modified range (4K align)
+* @range_size - size of modified range(in byte)
+*/
+_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk,
+ u32 target_offset,
+ u32 target_size,
+ mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size)
+{
+ struct mali_session_data *session = backend->mali_allocation->session;
+
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size & offset must be a multiple of the system page size */
+ if (target_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ if (target_offset % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check backend type */
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);
+
+ switch (target_bk->type) {
+ case MALI_MEM_OS:
+ case MALI_MEM_BLOCK:
+ return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
+ break;
+ case MALI_MEM_COW:
+ if (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) {
+ return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
+ } else {
+ return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
+ }
+ break;
+ case MALI_MEM_SWAP:
+ return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
+ break;
+ case MALI_MEM_EXTERNAL:
+ /*NOT support yet*/
+ MALI_DEBUG_PRINT_ERROR(("External physical memory not supported ! \n"));
+ return _MALI_OSK_ERR_UNSUPPORTED;
+ break;
+ case MALI_MEM_DMA_BUF:
+ /*NOT support yet*/
+ MALI_DEBUG_PRINT_ERROR(("DMA buffer not supported ! \n"));
+ return _MALI_OSK_ERR_UNSUPPORTED;
+ break;
+ case MALI_MEM_UMP:
+ /*NOT support yet*/
+ MALI_DEBUG_PRINT_ERROR(("UMP buffer not supported ! \n"));
+ return _MALI_OSK_ERR_UNSUPPORTED;
+ break;
+ default:
+ /*Not support yet*/
+ MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported ! \n"));
+ return _MALI_OSK_ERR_UNSUPPORTED;
+ break;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+
+/**
+* Map COW backend memory to mali
+* Support OS/BLOCK for mali_page_node
+*/
+int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size)
+{
+ mali_mem_allocation *cow_alloc;
+ struct mali_page_node *m_page;
+ struct mali_session_data *session;
+ struct mali_page_directory *pagedir;
+ u32 virt, start;
+
+ cow_alloc = mem_bkend->mali_allocation;
+ virt = cow_alloc->mali_vma_node.vm_node.start;
+ start = virt;
+
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+ MALI_DEBUG_ASSERT_POINTER(cow_alloc);
+
+ session = cow_alloc->session;
+ pagedir = session->page_directory;
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+ list_for_each_entry(m_page, &mem_bkend->cow_mem.pages, list) {
+ if ((virt - start >= range_start) && (virt - start < range_start + range_size)) {
+ dma_addr_t phys = _mali_page_node_get_dma_addr(m_page);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+ mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys,
+ MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+ }
+ virt += MALI_MMU_PAGE_SIZE;
+ }
+ return 0;
+}
+
+/**
+* Map COW backend to cpu
+* support OS/BLOCK memory
+*/
+int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+ mali_mem_cow *cow = &mem_bkend->cow_mem;
+ struct mali_page_node *m_page;
+ int ret;
+ unsigned long addr = vma->vm_start;
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);
+
+ list_for_each_entry(m_page, &cow->pages, list) {
+ /* We should use vm_insert_page, but it does a dcache
+ * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
+ ret = vm_insert_page(vma, addr, page);
+ */
+ ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
+
+ if (unlikely(0 != ret)) {
+ return ret;
+ }
+ addr += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+/**
+* Map some pages(COW backend) to CPU vma@vaddr
+*@ mem_bkend - COW backend
+*@ vma
+*@ vaddr -start CPU vaddr mapped to
+*@ num - max number of pages to map to CPU vaddr
+*/
+_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend,
+ struct vm_area_struct *vma,
+ unsigned long vaddr,
+ int num)
+{
+ mali_mem_cow *cow = &mem_bkend->cow_mem;
+ struct mali_page_node *m_page;
+ int ret;
+ int offset;
+ int count ;
+ unsigned long vstart = vma->vm_start;
+ count = 0;
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);
+ MALI_DEBUG_ASSERT(0 == vaddr % _MALI_OSK_MALI_PAGE_SIZE);
+ MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);
+ offset = (vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;
+
+ list_for_each_entry(m_page, &cow->pages, list) {
+ if ((count >= offset) && (count < offset + num)) {
+ ret = vm_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page));
+
+ if (unlikely(0 != ret)) {
+ if (count == offset) {
+ return _MALI_OSK_ERR_FAULT;
+ } else {
+ /* ret is EBUSY when page isn't in modify range, but now it's OK*/
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+ vaddr += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+ count++;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+* Release COW backend memory
+* free it directly(put_page--unref page), not put into pool
+*/
+u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)
+{
+ mali_mem_allocation *alloc;
+ struct mali_session_data *session;
+ u32 free_pages_nr = 0;
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+ alloc = mem_bkend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags)) {
+ /* Unmap the memory from the mali virtual address space. */
+ if (MALI_TRUE == is_mali_mapped)
+ mali_mem_os_mali_unmap(alloc);
+ /* free cow backend list*/
+ _mali_osk_mutex_wait(session->cow_lock);
+ free_pages_nr = mali_mem_os_free(&mem_bkend->cow_mem.pages, mem_bkend->cow_mem.count, MALI_TRUE);
+ _mali_osk_mutex_signal(session->cow_lock);
+
+ free_pages_nr += mali_mem_block_free_list(&mem_bkend->cow_mem.pages);
+
+ MALI_DEBUG_ASSERT(list_empty(&mem_bkend->cow_mem.pages));
+ } else {
+ free_pages_nr = mali_mem_swap_release(mem_bkend, is_mali_mapped);
+ }
+
+
+ MALI_DEBUG_PRINT(4, ("COW Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->cow_mem.count * _MALI_OSK_MALI_PAGE_SIZE,
+ free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
+
+ mem_bkend->cow_mem.count = 0;
+ return free_pages_nr;
+}
+
+
+/* Dst node could os node or swap node. */
+void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node)
+{
+ void *dst, *src;
+ struct page *dst_page;
+ dma_addr_t dma_addr;
+
+ MALI_DEBUG_ASSERT(src_node != NULL);
+ MALI_DEBUG_ASSERT(dst_node != NULL);
+ MALI_DEBUG_ASSERT(dst_node->type == MALI_PAGE_NODE_OS
+ || dst_node->type == MALI_PAGE_NODE_SWAP);
+
+ if (dst_node->type == MALI_PAGE_NODE_OS) {
+ dst_page = dst_node->page;
+ } else {
+ dst_page = dst_node->swap_it->page;
+ }
+
+ dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(dst_node),
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ /* map it , and copy the content*/
+ dst = kmap_atomic(dst_page);
+
+ if (src_node->type == MALI_PAGE_NODE_OS ||
+ src_node->type == MALI_PAGE_NODE_SWAP) {
+ struct page *src_page;
+
+ if (src_node->type == MALI_PAGE_NODE_OS) {
+ src_page = src_node->page;
+ } else {
+ src_page = src_node->swap_it->page;
+ }
+
+ /* Clear and invaliate cache */
+ /* In ARM architecture, speculative read may pull stale data into L1 cache
+ * for kernel linear mapping page table. DMA_BIDIRECTIONAL could
+ * invalidate the L1 cache so that following read get the latest data
+ */
+ dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(src_node),
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ src = kmap_atomic(src_page);
+ memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);
+ kunmap_atomic(src);
+ dma_addr = dma_map_page(&mali_platform_device->dev, src_page,
+ 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ if (src_node->type == MALI_PAGE_NODE_SWAP) {
+ src_node->swap_it->dma_addr = dma_addr;
+ }
+ } else if (src_node->type == MALI_PAGE_NODE_BLOCK) {
+ /*
+ * use ioremap to map src for BLOCK memory
+ */
+ src = ioremap_nocache(_mali_page_node_get_dma_addr(src_node), _MALI_OSK_MALI_PAGE_SIZE);
+ memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);
+ iounmap(src);
+ }
+ kunmap_atomic(dst);
+ dma_addr = dma_map_page(&mali_platform_device->dev, dst_page,
+ 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ if (dst_node->type == MALI_PAGE_NODE_SWAP) {
+ dst_node->swap_it->dma_addr = dma_addr;
+ }
+}
+
+
+/*
+* allocate page on demand when CPU access it,
+* THis used in page fault handler
+*/
+_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page)
+{
+ struct page *new_page = NULL;
+ struct mali_page_node *new_node = NULL;
+ int i = 0;
+ struct mali_page_node *m_page, *found_node = NULL;
+ struct mali_session_data *session = NULL;
+ mali_mem_cow *cow = &mem_bkend->cow_mem;
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+ MALI_DEBUG_ASSERT(offset_page < mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE);
+ MALI_DEBUG_PRINT(4, ("mali_mem_cow_allocate_on_demand !, offset_page =0x%x\n", offset_page));
+
+ /* allocate new page here */
+ new_page = mali_mem_cow_alloc_page();
+ if (!new_page)
+ return _MALI_OSK_ERR_NOMEM;
+
+ new_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
+ if (!new_node) {
+ __free_page(new_page);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* find the page in backend*/
+ list_for_each_entry(m_page, &cow->pages, list) {
+ if (i == offset_page) {
+ found_node = m_page;
+ break;
+ }
+ i++;
+ }
+ MALI_DEBUG_ASSERT(found_node);
+ if (NULL == found_node) {
+ __free_page(new_page);
+ kfree(new_node);
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ _mali_page_node_add_page(new_node, new_page);
+
+ /* Copy the src page's content to new page */
+ _mali_mem_cow_copy_page(found_node, new_node);
+
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend->mali_allocation);
+ session = mem_bkend->mali_allocation->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+ if (1 != _mali_page_node_get_ref_count(found_node)) {
+ atomic_add(1, &session->mali_mem_allocated_pages);
+ if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+ session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+ mem_bkend->cow_mem.change_pages_nr++;
+ }
+
+ _mali_osk_mutex_wait(session->cow_lock);
+ if (_mali_mem_put_page_node(found_node)) {
+ __free_page(new_page);
+ kfree(new_node);
+ _mali_osk_mutex_signal(session->cow_lock);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ _mali_osk_mutex_signal(session->cow_lock);
+
+ list_replace(&found_node->list, &new_node->list);
+
+ kfree(found_node);
+
+ /* map to GPU side*/
+ _mali_osk_mutex_wait(session->memory_lock);
+ mali_mem_cow_mali_map(mem_bkend, offset_page * _MALI_OSK_MALI_PAGE_SIZE, _MALI_OSK_MALI_PAGE_SIZE);
+ _mali_osk_mutex_signal(session->memory_lock);
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_cow.h b/drivers/gpu/arm/utgard/linux/mali_memory_cow.h
new file mode 100644
index 000000000000..5f83a37fc8f8
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_cow.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_COW_H__
+#define __MALI_MEMORY_COW_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+#include "mali_memory_types.h"
+
+int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);
+_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend,
+ struct vm_area_struct *vma,
+ unsigned long vaddr,
+ int num);
+
+_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk,
+ u32 target_offset,
+ u32 target_size,
+ mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size);
+
+_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size);
+
+_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk,
+ u32 target_offset,
+ u32 target_size,
+ mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size);
+
+void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node);
+
+int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size);
+u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped);
+_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page);
+#endif
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.c b/drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.c
new file mode 100644
index 000000000000..a9db577cb851
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#ifdef CONFIG_ARM
+#include <asm/outercache.h>
+#endif
+#include <asm/dma-mapping.h>
+
+#include "mali_memory.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_memory_defer_bind.h"
+#include "mali_executor.h"
+#include "mali_osk.h"
+#include "mali_scheduler.h"
+#include "mali_gp_job.h"
+
+mali_defer_bind_manager *mali_dmem_man = NULL;
+
+static u32 mali_dmem_get_gp_varying_size(struct mali_gp_job *gp_job)
+{
+ return gp_job->required_varying_memsize / _MALI_OSK_MALI_PAGE_SIZE;
+}
+
+_mali_osk_errcode_t mali_mem_defer_bind_manager_init(void)
+{
+ mali_dmem_man = _mali_osk_calloc(1, sizeof(struct mali_defer_bind_manager));
+ if (!mali_dmem_man)
+ return _MALI_OSK_ERR_NOMEM;
+
+ atomic_set(&mali_dmem_man->num_used_pages, 0);
+ atomic_set(&mali_dmem_man->num_dmem, 0);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+void mali_mem_defer_bind_manager_destory(void)
+{
+ if (mali_dmem_man) {
+ MALI_DEBUG_ASSERT(0 == atomic_read(&mali_dmem_man->num_dmem));
+ kfree(mali_dmem_man);
+ }
+ mali_dmem_man = NULL;
+}
+
+
+/*allocate pages from OS memory*/
+_mali_osk_errcode_t mali_mem_defer_alloc_mem(u32 require, struct mali_session_data *session, mali_defer_mem_block *dblock)
+{
+ int retval = 0;
+ u32 num_pages = require;
+ mali_mem_os_mem os_mem;
+
+ retval = mali_mem_os_alloc_pages(&os_mem, num_pages * _MALI_OSK_MALI_PAGE_SIZE);
+
+ /* add to free pages list */
+ if (0 == retval) {
+ MALI_DEBUG_PRINT(4, ("mali_mem_defer_alloc_mem ,,*** pages allocate = 0x%x \n", num_pages));
+ list_splice(&os_mem.pages, &dblock->free_pages);
+ atomic_add(os_mem.count, &dblock->num_free_pages);
+ atomic_add(os_mem.count, &session->mali_mem_allocated_pages);
+ if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+ session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+ return _MALI_OSK_ERR_OK;
+ } else
+ return _MALI_OSK_ERR_FAULT;
+}
+
+_mali_osk_errcode_t mali_mem_prepare_mem_for_job(struct mali_gp_job *next_gp_job, mali_defer_mem_block *dblock)
+{
+ u32 require_page;
+
+ if (!next_gp_job)
+ return _MALI_OSK_ERR_FAULT;
+
+ require_page = mali_dmem_get_gp_varying_size(next_gp_job);
+
+ MALI_DEBUG_PRINT(4, ("mali_mem_defer_prepare_mem_work, require alloc page 0x%x\n",
+ require_page));
+ /* allocate more pages from OS */
+ if (_MALI_OSK_ERR_OK != mali_mem_defer_alloc_mem(require_page, next_gp_job->session, dblock)) {
+ MALI_DEBUG_PRINT(1, ("ERROR##mali_mem_defer_prepare_mem_work, allocate page failed!!"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ next_gp_job->bind_flag = MALI_DEFER_BIND_MEMORY_PREPARED;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+/* do preparetion for allocation before defer bind */
+_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list, u32 *required_varying_memsize)
+{
+ mali_mem_backend *mem_bkend = NULL;
+ struct mali_backend_bind_list *bk_list = _mali_osk_calloc(1, sizeof(struct mali_backend_bind_list));
+ if (NULL == bk_list)
+ return _MALI_OSK_ERR_FAULT;
+
+ INIT_LIST_HEAD(&bk_list->node);
+ /* Get backend memory */
+ mutex_lock(&mali_idr_mutex);
+ if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {
+ MALI_DEBUG_PRINT(1, ("Can't find memory backend in defer bind!\n"));
+ mutex_unlock(&mali_idr_mutex);
+ _mali_osk_free(bk_list);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mutex_unlock(&mali_idr_mutex);
+
+ /* If the mem backend has already been bound, no need to bind again.*/
+ if (mem_bkend->os_mem.count > 0) {
+ _mali_osk_free(bk_list);
+ return _MALI_OSK_ERR_OK;
+ }
+
+ MALI_DEBUG_PRINT(4, ("bind_allocation_prepare:: allocation =%x vaddr=0x%x!\n", alloc, alloc->mali_vma_node.vm_node.start));
+
+ INIT_LIST_HEAD(&mem_bkend->os_mem.pages);
+
+ bk_list->bkend = mem_bkend;
+ bk_list->vaddr = alloc->mali_vma_node.vm_node.start;
+ bk_list->session = alloc->session;
+ bk_list->page_num = mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE;
+ *required_varying_memsize += mem_bkend->size;
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
+
+ /* add to job to do list */
+ list_add(&bk_list->node, list);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+
+/* bind phyiscal memory to allocation
+This function will be called in IRQ handler*/
+static _mali_osk_errcode_t mali_mem_defer_bind_allocation(struct mali_backend_bind_list *bk_node,
+ struct list_head *pages)
+{
+ struct mali_session_data *session = bk_node->session;
+ mali_mem_backend *mem_bkend = bk_node->bkend;
+ MALI_DEBUG_PRINT(4, ("mali_mem_defer_bind_allocation, bind bkend = %x page num=0x%x vaddr=%x session=%x\n", mem_bkend, bk_node->page_num, bk_node->vaddr, session));
+
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
+ list_splice(pages, &mem_bkend->os_mem.pages);
+ mem_bkend->os_mem.count = bk_node->page_num;
+
+ if (mem_bkend->type == MALI_MEM_OS) {
+ mali_mem_os_mali_map(&mem_bkend->os_mem, session, bk_node->vaddr, 0,
+ mem_bkend->os_mem.count, MALI_MMU_FLAGS_DEFAULT);
+ }
+ smp_wmb();
+ bk_node->flag = MALI_DEFER_BIND_MEMORY_BINDED;
+ mem_bkend->flags &= ~MALI_MEM_BACKEND_FLAG_NOT_BINDED;
+ mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_BINDED;
+ return _MALI_OSK_ERR_OK;
+}
+
+
+static struct list_head *mali_mem_defer_get_free_page_list(u32 count, struct list_head *pages, mali_defer_mem_block *dblock)
+{
+ int i = 0;
+ struct mali_page_node *m_page, *m_tmp;
+
+ if (atomic_read(&dblock->num_free_pages) < count) {
+ return NULL;
+ } else {
+ list_for_each_entry_safe(m_page, m_tmp, &dblock->free_pages, list) {
+ if (i < count) {
+ list_move_tail(&m_page->list, pages);
+ } else {
+ break;
+ }
+ i++;
+ }
+ MALI_DEBUG_ASSERT(i == count);
+ atomic_sub(count, &dblock->num_free_pages);
+ return pages;
+ }
+}
+
+
+/* called in job start IOCTL to bind physical memory for each allocations
+@ bk_list backend list to do defer bind
+@ pages page list to do this bind
+@ count number of pages
+*/
+_mali_osk_errcode_t mali_mem_defer_bind(struct mali_gp_job *gp,
+ struct mali_defer_mem_block *dmem_block)
+{
+ struct mali_defer_mem *dmem = NULL;
+ struct mali_backend_bind_list *bkn, *bkn_tmp;
+ LIST_HEAD(pages);
+
+ if (gp->required_varying_memsize != (atomic_read(&dmem_block->num_free_pages) * _MALI_OSK_MALI_PAGE_SIZE)) {
+ MALI_DEBUG_PRINT_ERROR(("#BIND: The memsize of varying buffer not match to the pagesize of the dmem_block!!## \n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_PRINT(4, ("#BIND: GP job=%x## \n", gp));
+ dmem = (mali_defer_mem *)_mali_osk_calloc(1, sizeof(struct mali_defer_mem));
+ if (dmem) {
+ INIT_LIST_HEAD(&dmem->node);
+ gp->dmem = dmem;
+ } else {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ atomic_add(1, &mali_dmem_man->num_dmem);
+ /* for each bk_list backend, do bind */
+ list_for_each_entry_safe(bkn, bkn_tmp , &gp->vary_todo, node) {
+ INIT_LIST_HEAD(&pages);
+ if (likely(mali_mem_defer_get_free_page_list(bkn->page_num, &pages, dmem_block))) {
+ list_del(&bkn->node);
+ mali_mem_defer_bind_allocation(bkn, &pages);
+ _mali_osk_free(bkn);
+ } else {
+ /* not enough memory will not happen */
+ MALI_DEBUG_PRINT_ERROR(("#BIND: NOT enough memory when binded !!## \n"));
+ _mali_osk_free(gp->dmem);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ }
+
+ if (!list_empty(&gp->vary_todo)) {
+ MALI_DEBUG_PRINT_ERROR(("#BIND: The deferbind backend list isn't empty !!## \n"));
+ _mali_osk_free(gp->dmem);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ dmem->flag = MALI_DEFER_BIND_MEMORY_BINDED;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_defer_dmem_free(struct mali_gp_job *gp)
+{
+ if (gp->dmem) {
+ atomic_dec(&mali_dmem_man->num_dmem);
+ _mali_osk_free(gp->dmem);
+ }
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.h b/drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.h
new file mode 100644
index 000000000000..defa08d52a46
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __MALI_MEMORY_DEFER_BIND_H_
+#define __MALI_MEMORY_DEFER_BIND_H_
+
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+
+#include "mali_memory_types.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_uk_types.h"
+
+struct mali_gp_job;
+
+typedef struct mali_defer_mem {
+ struct list_head node; /*dlist node in bind manager */
+ u32 flag;
+} mali_defer_mem;
+
+
+typedef struct mali_defer_mem_block {
+ struct list_head free_pages; /* page pool */
+ atomic_t num_free_pages;
+} mali_defer_mem_block;
+
+/* varying memory list need to bind */
+typedef struct mali_backend_bind_list {
+ struct list_head node;
+ struct mali_mem_backend *bkend;
+ u32 vaddr;
+ u32 page_num;
+ struct mali_session_data *session;
+ u32 flag;
+} mali_backend_bind_lists;
+
+
+typedef struct mali_defer_bind_manager {
+ atomic_t num_used_pages;
+ atomic_t num_dmem;
+} mali_defer_bind_manager;
+
+_mali_osk_errcode_t mali_mem_defer_bind_manager_init(void);
+void mali_mem_defer_bind_manager_destory(void);
+_mali_osk_errcode_t mali_mem_defer_bind(struct mali_gp_job *gp, struct mali_defer_mem_block *dmem_block);
+_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list, u32 *required_varying_memsize);
+_mali_osk_errcode_t mali_mem_prepare_mem_for_job(struct mali_gp_job *next_gp_job, mali_defer_mem_block *dblock);
+void mali_mem_defer_dmem_free(struct mali_gp_job *gp);
+
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.c b/drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.c
new file mode 100644
index 000000000000..2e0d82299b36
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.c
@@ -0,0 +1,369 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/rbtree.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_linux.h"
+
+#include "mali_memory.h"
+#include "mali_memory_dma_buf.h"
+#include "mali_memory_virtual.h"
+#include "mali_pp_job.h"
+
+/*
+ * Map DMA buf attachment \a mem into \a session at virtual address \a virt.
+ */
+static int mali_dma_buf_map(mali_mem_backend *mem_backend)
+{
+ mali_mem_allocation *alloc;
+ struct mali_dma_buf_attachment *mem;
+ struct mali_session_data *session;
+ struct mali_page_directory *pagedir;
+ _mali_osk_errcode_t err;
+ struct scatterlist *sg;
+ u32 virt, flags;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+
+ alloc = mem_backend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ mem = mem_backend->dma_buf.attachment;
+ MALI_DEBUG_ASSERT_POINTER(mem);
+
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(mem->session == session);
+
+ virt = alloc->mali_vma_node.vm_node.start;
+ flags = alloc->flags;
+
+ mali_session_memory_lock(session);
+ mem->map_ref++;
+
+ MALI_DEBUG_PRINT(5, ("Mali DMA-buf: map attachment %p, new map_ref = %d\n", mem, mem->map_ref));
+
+ if (1 == mem->map_ref) {
+
+ /* First reference taken, so we need to map the dma buf */
+ MALI_DEBUG_ASSERT(!mem->is_mapped);
+
+ mem->sgt = dma_buf_map_attachment(mem->attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(mem->sgt)) {
+ MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf attachment\n"));
+ mem->map_ref--;
+ mali_session_memory_unlock(session);
+ return -EFAULT;
+ }
+
+ err = mali_mem_mali_map_prepare(alloc);
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_DEBUG_PRINT(1, ("Mapping of DMA memory failed\n"));
+ mem->map_ref--;
+ mali_session_memory_unlock(session);
+ return -ENOMEM;
+ }
+
+ pagedir = mali_session_get_page_directory(session);
+ MALI_DEBUG_ASSERT_POINTER(pagedir);
+
+ for_each_sg(mem->sgt->sgl, sg, mem->sgt->nents, i) {
+ u32 size = sg_dma_len(sg);
+ dma_addr_t phys = sg_dma_address(sg);
+
+ /* sg must be page aligned. */
+ MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
+ MALI_DEBUG_ASSERT(0 == (phys & ~(uintptr_t)0xFFFFFFFF));
+
+ mali_mmu_pagedir_update(pagedir, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
+
+ virt += size;
+ }
+
+ if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ u32 guard_phys;
+ MALI_DEBUG_PRINT(7, ("Mapping in extra guard page\n"));
+
+ guard_phys = sg_dma_address(mem->sgt->sgl);
+ mali_mmu_pagedir_update(pagedir, virt, guard_phys, MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+ }
+
+ mem->is_mapped = MALI_TRUE;
+ mali_session_memory_unlock(session);
+ /* Wake up any thread waiting for buffer to become mapped */
+ wake_up_all(&mem->wait_queue);
+ } else {
+ MALI_DEBUG_ASSERT(mem->is_mapped);
+ mali_session_memory_unlock(session);
+ }
+
+ return 0;
+}
+
+static void mali_dma_buf_unmap(mali_mem_allocation *alloc, struct mali_dma_buf_attachment *mem)
+{
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+ MALI_DEBUG_ASSERT_POINTER(mem->buf);
+ MALI_DEBUG_ASSERT_POINTER(alloc->session);
+
+ mali_session_memory_lock(alloc->session);
+ mem->map_ref--;
+
+ MALI_DEBUG_PRINT(5, ("Mali DMA-buf: unmap attachment %p, new map_ref = %d\n", mem, mem->map_ref));
+
+ if (0 == mem->map_ref) {
+ dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL);
+ if (MALI_TRUE == mem->is_mapped) {
+ mali_mem_mali_map_free(alloc->session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+ alloc->flags);
+ }
+ mem->is_mapped = MALI_FALSE;
+ }
+ mali_session_memory_unlock(alloc->session);
+ /* Wake up any thread waiting for buffer to become unmapped */
+ wake_up_all(&mem->wait_queue);
+}
+
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+int mali_dma_buf_map_job(struct mali_pp_job *job)
+{
+ struct mali_dma_buf_attachment *mem;
+ _mali_osk_errcode_t err;
+ int i;
+ int ret = 0;
+ u32 num_memory_cookies;
+ struct mali_session_data *session;
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_allocation *mali_alloc = NULL;
+ mali_mem_backend *mem_bkend = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+
+ session = mali_pp_job_get_session(job);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ for (i = 0; i < num_memory_cookies; i++) {
+ u32 mali_addr = mali_pp_job_get_memory_cookie(job, i);
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+ MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ MALI_DEBUG_ASSERT(NULL != mali_alloc);
+ if (MALI_MEM_DMA_BUF != mali_alloc->type) {
+ continue;
+ }
+
+ /* Get backend memory & Map on CPU */
+ mutex_lock(&mali_idr_mutex);
+ mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+ mem = mem_bkend->dma_buf.attachment;
+
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT(mem->session == mali_pp_job_get_session(job));
+
+ err = mali_dma_buf_map(mem_bkend);
+ if (0 != err) {
+ MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to map dma-buf for mali address %x\n", mali_addr));
+ ret = -EFAULT;
+ continue;
+ }
+ }
+ return ret;
+}
+
+void mali_dma_buf_unmap_job(struct mali_pp_job *job)
+{
+ struct mali_dma_buf_attachment *mem;
+ int i;
+ u32 num_memory_cookies;
+ struct mali_session_data *session;
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_allocation *mali_alloc = NULL;
+ mali_mem_backend *mem_bkend = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+
+ session = mali_pp_job_get_session(job);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ for (i = 0; i < num_memory_cookies; i++) {
+ u32 mali_addr = mali_pp_job_get_memory_cookie(job, i);
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+ MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ MALI_DEBUG_ASSERT(NULL != mali_alloc);
+ if (MALI_MEM_DMA_BUF != mali_alloc->type) {
+ continue;
+ }
+
+ /* Get backend memory & Map on CPU */
+ mutex_lock(&mali_idr_mutex);
+ mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+ mem = mem_bkend->dma_buf.attachment;
+
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT(mem->session == mali_pp_job_get_session(job));
+ mali_dma_buf_unmap(mem_bkend->mali_allocation, mem);
+ }
+}
+#endif /* !CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH */
+
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *user_arg)
+{
+ _mali_uk_dma_buf_get_size_s args;
+ int fd;
+ struct dma_buf *buf;
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if (0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_dma_buf_get_size_s))) {
+ return -EFAULT;
+ }
+
+ /* Do DMA-BUF stuff */
+ fd = args.mem_fd;
+
+ buf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(buf)) {
+ MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd));
+ return PTR_RET(buf);
+ }
+
+ if (0 != put_user(buf->size, &user_arg->size)) {
+ dma_buf_put(buf);
+ return -EFAULT;
+ }
+
+ dma_buf_put(buf);
+
+ return 0;
+}
+
+_mali_osk_errcode_t mali_mem_bind_dma_buf(mali_mem_allocation *alloc,
+ mali_mem_backend *mem_backend,
+ int fd, u32 flags)
+{
+ struct dma_buf *buf;
+ struct mali_dma_buf_attachment *dma_mem;
+ struct mali_session_data *session = alloc->session;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ /* get dma buffer */
+ buf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(buf)) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Currently, mapping of the full buffer are supported. */
+ if (alloc->psize != buf->size) {
+ goto failed_alloc_mem;
+ }
+
+ dma_mem = _mali_osk_calloc(1, sizeof(struct mali_dma_buf_attachment));
+ if (NULL == dma_mem) {
+ goto failed_alloc_mem;
+ }
+
+ dma_mem->buf = buf;
+ dma_mem->session = session;
+ dma_mem->map_ref = 0;
+ init_waitqueue_head(&dma_mem->wait_queue);
+
+ dma_mem->attachment = dma_buf_attach(dma_mem->buf, &mali_platform_device->dev);
+ if (NULL == dma_mem->attachment) {
+ goto failed_dma_attach;
+ }
+
+ mem_backend->dma_buf.attachment = dma_mem;
+
+ alloc->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
+ if (flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+ alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE;
+ }
+
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ /* Map memory into session's Mali virtual address space. */
+ if (0 != mali_dma_buf_map(mem_backend)) {
+ goto Failed_dma_map;
+ }
+#endif
+
+ return _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+Failed_dma_map:
+ mali_dma_buf_unmap(alloc, dma_mem);
+#endif
+ /* Wait for buffer to become unmapped */
+ wait_event(dma_mem->wait_queue, !dma_mem->is_mapped);
+ MALI_DEBUG_ASSERT(!dma_mem->is_mapped);
+ dma_buf_detach(dma_mem->buf, dma_mem->attachment);
+failed_dma_attach:
+ _mali_osk_free(dma_mem);
+failed_alloc_mem:
+ dma_buf_put(buf);
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_mem_unbind_dma_buf(mali_mem_backend *mem_backend)
+{
+ struct mali_dma_buf_attachment *mem;
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT(MALI_MEM_DMA_BUF == mem_backend->type);
+
+ mem = mem_backend->dma_buf.attachment;
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+ MALI_DEBUG_ASSERT_POINTER(mem->buf);
+ MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release attachment %p\n", mem));
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ MALI_DEBUG_ASSERT_POINTER(mem_backend->mali_allocation);
+ /* We mapped implicitly on attach, so we need to unmap on release */
+ mali_dma_buf_unmap(mem_backend->mali_allocation, mem);
+#endif
+ /* Wait for buffer to become unmapped */
+ wait_event(mem->wait_queue, !mem->is_mapped);
+ MALI_DEBUG_ASSERT(!mem->is_mapped);
+
+ dma_buf_detach(mem->buf, mem->attachment);
+ dma_buf_put(mem->buf);
+
+ _mali_osk_free(mem);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.h b/drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.h
new file mode 100644
index 000000000000..a9b2870389ff
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_DMA_BUF_H__
+#define __MALI_MEMORY_DMA_BUF_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+#include "mali_memory.h"
+
+struct mali_pp_job;
+
+struct mali_dma_buf_attachment;
+struct mali_dma_buf_attachment {
+ struct dma_buf *buf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ struct mali_session_data *session;
+ int map_ref;
+ struct mutex map_lock;
+ mali_bool is_mapped;
+ wait_queue_head_t wait_queue;
+};
+
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *arg);
+
+void mali_mem_unbind_dma_buf(mali_mem_backend *mem_backend);
+
+_mali_osk_errcode_t mali_mem_bind_dma_buf(mali_mem_allocation *alloc,
+ mali_mem_backend *mem_backend,
+ int fd, u32 flags);
+
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+int mali_dma_buf_map_job(struct mali_pp_job *job);
+void mali_dma_buf_unmap_job(struct mali_pp_job *job);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_MEMORY_DMA_BUF_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_external.c b/drivers/gpu/arm/utgard/linux/mali_memory_external.c
new file mode 100644
index 000000000000..d9bd7eee2870
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_external.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_memory.h"
+#include "mali_mem_validation.h"
+#include "mali_uk_types.h"
+
+void mali_mem_unbind_ext_buf(mali_mem_backend *mem_backend)
+{
+ mali_mem_allocation *alloc;
+ struct mali_session_data *session;
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ alloc = mem_backend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ MALI_DEBUG_ASSERT(MALI_MEM_EXTERNAL == mem_backend->type);
+
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+ mali_session_memory_lock(session);
+ mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+ alloc->flags);
+ mali_session_memory_unlock(session);
+}
+
+_mali_osk_errcode_t mali_mem_bind_ext_buf(mali_mem_allocation *alloc,
+ mali_mem_backend *mem_backend,
+ u32 phys_addr,
+ u32 flag)
+{
+ struct mali_session_data *session;
+ _mali_osk_errcode_t err;
+ u32 virt, phys, size;
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ size = alloc->psize;
+ session = (struct mali_session_data *)(uintptr_t)alloc->session;
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if (!size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if (size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+#if 0
+ /* Validate the mali physical range */
+ if (_MALI_OSK_ERR_OK != mali_mem_validation_check(phys_addr, size)) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+
+ if (flag & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+ alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE;
+ }
+
+ mali_session_memory_lock(session);
+
+ virt = alloc->mali_vma_node.vm_node.start;
+ phys = phys_addr;
+
+ err = mali_mem_mali_map_prepare(alloc);
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_session_memory_unlock(session);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ mali_mmu_pagedir_update(session->page_directory, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
+
+ if (alloc->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ mali_mmu_pagedir_update(session->page_directory, virt + size, phys, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+ }
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+ phys_addr, (phys_addr + size - 1),
+ virt));
+ mali_session_memory_unlock(session);
+
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_external.h b/drivers/gpu/arm/utgard/linux/mali_memory_external.h
new file mode 100644
index 000000000000..2db178d96233
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_external.h
@@ -0,0 +1,29 @@
+
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_EXTERNAL_H__
+#define __MALI_MEMORY_EXTERNAL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_mali_osk_errcode_t mali_mem_bind_ext_buf(mali_mem_allocation *alloc,
+ mali_mem_backend *mem_backend,
+ u32 phys_addr,
+ u32 flag);
+void mali_mem_unbind_ext_buf(mali_mem_backend *mem_backend);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_manager.c b/drivers/gpu/arm/utgard/linux/mali_memory_manager.c
new file mode 100644
index 000000000000..27dee0f19c81
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_manager.c
@@ -0,0 +1,993 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+
+#include <linux/platform_device.h>
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include <linux/dma-buf.h>
+#endif
+#include <linux/idr.h>
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_linux.h"
+#include "mali_scheduler.h"
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#include "mali_memory_secure.h"
+#endif
+#if defined(CONFIG_MALI400_UMP)
+#include "mali_memory_ump.h"
+#endif
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_util.h"
+#include "mali_memory_external.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_ukk.h"
+#include "mali_memory_swap_alloc.h"
+
+/*
+* New memory system interface
+*/
+
+/*inti idr for backend memory */
+struct idr mali_backend_idr;
+struct mutex mali_idr_mutex;
+
+/* init allocation manager */
+int mali_memory_manager_init(struct mali_allocation_manager *mgr)
+{
+ /* init Locks */
+ rwlock_init(&mgr->vm_lock);
+ mutex_init(&mgr->list_mutex);
+
+ /* init link */
+ INIT_LIST_HEAD(&mgr->head);
+
+ /* init RB tree */
+ mgr->allocation_mgr_rb = RB_ROOT;
+ mgr->mali_allocation_num = 0;
+ return 0;
+}
+
+/* Deinit allocation manager
+* Do some check for debug
+*/
+void mali_memory_manager_uninit(struct mali_allocation_manager *mgr)
+{
+ /* check RB tree is empty */
+ MALI_DEBUG_ASSERT(((void *)(mgr->allocation_mgr_rb.rb_node) == (void *)rb_last(&mgr->allocation_mgr_rb)));
+ /* check allocation List */
+ MALI_DEBUG_ASSERT(list_empty(&mgr->head));
+}
+
+/* Prepare memory descriptor */
+static mali_mem_allocation *mali_mem_allocation_struct_create(struct mali_session_data *session)
+{
+ mali_mem_allocation *mali_allocation;
+
+ /* Allocate memory */
+ mali_allocation = (mali_mem_allocation *)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL);
+ if (NULL == mali_allocation) {
+ MALI_DEBUG_PRINT(1, ("mali_mem_allocation_struct_create: descriptor was NULL\n"));
+ return NULL;
+ }
+
+ MALI_DEBUG_CODE(mali_allocation->magic = MALI_MEM_ALLOCATION_VALID_MAGIC);
+
+ /* do init */
+ mali_allocation->flags = 0;
+ mali_allocation->session = session;
+
+ INIT_LIST_HEAD(&mali_allocation->list);
+ _mali_osk_atomic_init(&mali_allocation->mem_alloc_refcount, 1);
+
+ /**
+ *add to session list
+ */
+ mutex_lock(&session->allocation_mgr.list_mutex);
+ list_add_tail(&mali_allocation->list, &session->allocation_mgr.head);
+ session->allocation_mgr.mali_allocation_num++;
+ mutex_unlock(&session->allocation_mgr.list_mutex);
+
+ return mali_allocation;
+}
+
+void mali_mem_allocation_struct_destory(mali_mem_allocation *alloc)
+{
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ MALI_DEBUG_ASSERT_POINTER(alloc->session);
+ mutex_lock(&alloc->session->allocation_mgr.list_mutex);
+ list_del(&alloc->list);
+ alloc->session->allocation_mgr.mali_allocation_num--;
+ mutex_unlock(&alloc->session->allocation_mgr.list_mutex);
+
+ kfree(alloc);
+}
+
+int mali_mem_backend_struct_create(mali_mem_backend **backend, u32 psize)
+{
+ mali_mem_backend *mem_backend = NULL;
+ s32 ret = -ENOSPC;
+ s32 index = -1;
+ *backend = (mali_mem_backend *)kzalloc(sizeof(mali_mem_backend), GFP_KERNEL);
+ if (NULL == *backend) {
+ MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: backend descriptor was NULL\n"));
+ return -1;
+ }
+ mem_backend = *backend;
+ mem_backend->size = psize;
+ mutex_init(&mem_backend->mutex);
+ INIT_LIST_HEAD(&mem_backend->list);
+ mem_backend->using_count = 0;
+
+
+ /* link backend with id */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+again:
+ if (!idr_pre_get(&mali_backend_idr, GFP_KERNEL)) {
+ kfree(mem_backend);
+ return -ENOMEM;
+ }
+ mutex_lock(&mali_idr_mutex);
+ ret = idr_get_new_above(&mali_backend_idr, mem_backend, 1, &index);
+ mutex_unlock(&mali_idr_mutex);
+
+ if (-ENOSPC == ret) {
+ kfree(mem_backend);
+ return -ENOSPC;
+ }
+ if (-EAGAIN == ret)
+ goto again;
+#else
+ mutex_lock(&mali_idr_mutex);
+ ret = idr_alloc(&mali_backend_idr, mem_backend, 1, MALI_S32_MAX, GFP_KERNEL);
+ mutex_unlock(&mali_idr_mutex);
+ index = ret;
+ if (ret < 0) {
+ MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: Can't allocate idr for backend! \n"));
+ kfree(mem_backend);
+ return -ENOSPC;
+ }
+#endif
+ return index;
+}
+
+
+static void mali_mem_backend_struct_destory(mali_mem_backend **backend, s32 backend_handle)
+{
+ mali_mem_backend *mem_backend = *backend;
+
+ mutex_lock(&mali_idr_mutex);
+ idr_remove(&mali_backend_idr, backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ kfree(mem_backend);
+ *backend = NULL;
+}
+
+mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address)
+{
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_backend *mem_bkend = NULL;
+ mali_mem_allocation *mali_alloc = NULL;
+ MALI_DEBUG_ASSERT_POINTER(session);
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_address, 0);
+ if (NULL == mali_vma_node) {
+ MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_search:vma node was NULL\n"));
+ return NULL;
+ }
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ /* Get backend memory & Map on CPU */
+ mutex_lock(&mali_idr_mutex);
+ mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(NULL != mem_bkend);
+ return mem_bkend;
+}
+
+static _mali_osk_errcode_t mali_mem_resize(struct mali_session_data *session, mali_mem_backend *mem_backend, u32 physical_size)
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+ int retval = 0;
+ mali_mem_allocation *mali_allocation = NULL;
+ mali_mem_os_mem tmp_os_mem;
+ s32 change_page_count;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
+ MALI_DEBUG_ASSERT(0 == physical_size % MALI_MMU_PAGE_SIZE);
+
+ mali_allocation = mem_backend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(mali_allocation);
+
+ MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE & mali_allocation->flags);
+ MALI_DEBUG_ASSERT(MALI_MEM_OS == mali_allocation->type);
+
+ mutex_lock(&mem_backend->mutex);
+
+ /* Do resize*/
+ if (physical_size > mem_backend->size) {
+ u32 add_size = physical_size - mem_backend->size;
+
+ MALI_DEBUG_ASSERT(0 == add_size % MALI_MMU_PAGE_SIZE);
+
+ /* Allocate new pages from os mem */
+ retval = mali_mem_os_alloc_pages(&tmp_os_mem, add_size);
+
+ if (retval) {
+ if (-ENOMEM == retval) {
+ ret = _MALI_OSK_ERR_NOMEM;
+ } else {
+ ret = _MALI_OSK_ERR_FAULT;
+ }
+ MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory allocation failed !\n"));
+ goto failed_alloc_memory;
+ }
+
+ MALI_DEBUG_ASSERT(tmp_os_mem.count == add_size / MALI_MMU_PAGE_SIZE);
+
+ /* Resize the memory of the backend */
+ ret = mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
+
+ if (ret) {
+ MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory resizing failed !\n"));
+ goto failed_resize_pages;
+ }
+
+ /*Resize cpu mapping */
+ if (NULL != mali_allocation->cpu_mapping.vma) {
+ ret = mali_mem_os_resize_cpu_map_locked(mem_backend, mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + mem_backend->size, add_size);
+ if (unlikely(ret != _MALI_OSK_ERR_OK)) {
+ MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: cpu mapping failed !\n"));
+ goto failed_cpu_map;
+ }
+ }
+
+ /* Resize mali mapping */
+ _mali_osk_mutex_wait(session->memory_lock);
+ ret = mali_mem_mali_map_resize(mali_allocation, physical_size);
+
+ if (ret) {
+ MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_resize: mali map resize fail !\n"));
+ goto failed_gpu_map;
+ }
+
+ ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, mali_allocation->mali_vma_node.vm_node.start,
+ mali_allocation->psize / MALI_MMU_PAGE_SIZE, add_size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
+ if (ret) {
+ MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: mali mapping failed !\n"));
+ goto failed_gpu_map;
+ }
+
+ _mali_osk_mutex_signal(session->memory_lock);
+ } else {
+ u32 dec_size, page_count;
+ u32 vaddr = 0;
+ INIT_LIST_HEAD(&tmp_os_mem.pages);
+ tmp_os_mem.count = 0;
+
+ dec_size = mem_backend->size - physical_size;
+ MALI_DEBUG_ASSERT(0 == dec_size % MALI_MMU_PAGE_SIZE);
+
+ page_count = dec_size / MALI_MMU_PAGE_SIZE;
+ vaddr = mali_allocation->mali_vma_node.vm_node.start + physical_size;
+
+ /* Resize the memory of the backend */
+ ret = mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, physical_size / MALI_MMU_PAGE_SIZE, page_count);
+
+ if (ret) {
+ MALI_DEBUG_PRINT(4, ("_mali_ukk_mem_resize: mali map resize failed!\n"));
+ goto failed_resize_pages;
+ }
+
+ /* Resize mali map */
+ _mali_osk_mutex_wait(session->memory_lock);
+ mali_mem_mali_map_free(session, dec_size, vaddr, mali_allocation->flags);
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ /* Zap cpu mapping */
+ if (0 != mali_allocation->cpu_mapping.addr) {
+ MALI_DEBUG_ASSERT(NULL != mali_allocation->cpu_mapping.vma);
+ zap_vma_ptes(mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + physical_size, dec_size);
+ }
+
+ /* Free those extra pages */
+ mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
+ }
+
+ /* Resize memory allocation and memory backend */
+ change_page_count = (s32)(physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE;
+ mali_allocation->psize = physical_size;
+ mem_backend->size = physical_size;
+ mutex_unlock(&mem_backend->mutex);
+
+ if (change_page_count > 0) {
+ atomic_add(change_page_count, &session->mali_mem_allocated_pages);
+ if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+ session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+
+ } else {
+ atomic_sub((s32)(-change_page_count), &session->mali_mem_allocated_pages);
+ }
+
+ return _MALI_OSK_ERR_OK;
+
+failed_gpu_map:
+ _mali_osk_mutex_signal(session->memory_lock);
+failed_cpu_map:
+ if (physical_size > mem_backend->size) {
+ mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, mem_backend->size / MALI_MMU_PAGE_SIZE,
+ (physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE);
+ } else {
+ mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
+ }
+failed_resize_pages:
+ if (0 != tmp_os_mem.count)
+ mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
+failed_alloc_memory:
+
+ mutex_unlock(&mem_backend->mutex);
+ return ret;
+}
+
+
+/* Set GPU MMU properties */
+static void _mali_memory_gpu_map_property_set(u32 *properties, u32 flags)
+{
+ if (_MALI_MEMORY_GPU_READ_ALLOCATE & flags) {
+ *properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
+ } else {
+ *properties = MALI_MMU_FLAGS_DEFAULT;
+ }
+}
+
+_mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size)
+{
+ mali_mem_backend *mem_backend = NULL;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+ mali_mem_allocation *mali_allocation = NULL;
+ u32 new_physical_size;
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(0 == add_size % MALI_MMU_PAGE_SIZE);
+
+ /* Get the memory backend that need to be resize. */
+ mem_backend = mali_mem_backend_struct_search(session, mali_addr);
+
+ if (NULL == mem_backend) {
+ MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
+ return ret;
+ }
+
+ mali_allocation = mem_backend->mali_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(mali_allocation);
+
+ new_physical_size = add_size + mem_backend->size;
+
+ if (new_physical_size > (mali_allocation->mali_vma_node.vm_node.size))
+ return ret;
+
+ MALI_DEBUG_ASSERT(new_physical_size != mem_backend->size);
+
+ ret = mali_mem_resize(session, mem_backend, new_physical_size);
+
+ return ret;
+}
+
+/**
+* function@_mali_ukk_mem_allocate - allocate mali memory
+*/
+_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
+{
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ mali_mem_backend *mem_backend = NULL;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+ int retval = 0;
+ mali_mem_allocation *mali_allocation = NULL;
+ struct mali_vma_node *mali_vma_node = NULL;
+
+ MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));
+
+ /* Check if the address is allocated
+ */
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
+
+ if (unlikely(mali_vma_node)) {
+ MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ /**
+ *create mali memory allocation
+ */
+
+ mali_allocation = mali_mem_allocation_struct_create(session);
+
+ if (mali_allocation == NULL) {
+ MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ mali_allocation->psize = args->psize;
+ mali_allocation->vsize = args->vsize;
+
+ /* MALI_MEM_OS if need to support mem resize,
+ * or MALI_MEM_BLOCK if have dedicated memory,
+ * or MALI_MEM_OS,
+ * or MALI_MEM_SWAP.
+ */
+ if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) {
+ mali_allocation->type = MALI_MEM_SWAP;
+ } else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) {
+ mali_allocation->type = MALI_MEM_OS;
+ mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE;
+ } else if (args->flags & _MALI_MEMORY_ALLOCATE_SECURE) {
+ mali_allocation->type = MALI_MEM_SECURE;
+ } else if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
+ mali_allocation->type = MALI_MEM_BLOCK;
+ } else {
+ mali_allocation->type = MALI_MEM_OS;
+ }
+
+ /**
+ *add allocation node to RB tree for index
+ */
+ mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
+ mali_allocation->mali_vma_node.vm_node.size = args->vsize;
+
+ mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+
+ mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
+ if (mali_allocation->backend_handle < 0) {
+ ret = _MALI_OSK_ERR_NOMEM;
+ MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
+ goto failed_alloc_backend;
+ }
+
+
+ mem_backend->mali_allocation = mali_allocation;
+ mem_backend->type = mali_allocation->type;
+
+ mali_allocation->mali_mapping.addr = args->gpu_vaddr;
+
+ /* set gpu mmu propery */
+ _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
+ /* do prepare for MALI mapping */
+ if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
+ _mali_osk_mutex_wait(session->memory_lock);
+
+ ret = mali_mem_mali_map_prepare(mali_allocation);
+ if (0 != ret) {
+ _mali_osk_mutex_signal(session->memory_lock);
+ goto failed_prepare_map;
+ }
+ _mali_osk_mutex_signal(session->memory_lock);
+ }
+
+ if (mali_allocation->psize == 0) {
+ mem_backend->os_mem.count = 0;
+ INIT_LIST_HEAD(&mem_backend->os_mem.pages);
+ goto done;
+ }
+
+ if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
+ mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND;
+ mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED;
+ /* init for defer bind backend*/
+ mem_backend->os_mem.count = 0;
+ INIT_LIST_HEAD(&mem_backend->os_mem.pages);
+
+ goto done;
+ }
+
+ if (likely(mali_allocation->psize > 0)) {
+
+ if (MALI_MEM_SECURE == mem_backend->type) {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ ret = mali_mem_secure_attach_dma_buf(&mem_backend->secure_mem, mem_backend->size, args->secure_shared_fd);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_DEBUG_PRINT(1, ("Failed to attach dma buf for secure memory! \n"));
+ goto failed_alloc_pages;
+ }
+#else
+ ret = _MALI_OSK_ERR_UNSUPPORTED;
+ MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory! \n"));
+ goto failed_alloc_pages;
+#endif
+ } else {
+
+ /**
+ *allocate physical memory
+ */
+ if (mem_backend->type == MALI_MEM_OS) {
+ retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
+ } else if (mem_backend->type == MALI_MEM_BLOCK) {
+ /* try to allocated from BLOCK memory first, then try OS memory if failed.*/
+ if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
+ retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
+ mem_backend->type = MALI_MEM_OS;
+ mali_allocation->type = MALI_MEM_OS;
+ }
+ } else if (MALI_MEM_SWAP == mem_backend->type) {
+ retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx);
+ } else {
+ /* ONLY support mem_os type */
+ MALI_DEBUG_ASSERT(0);
+ }
+
+ if (retval) {
+ ret = _MALI_OSK_ERR_NOMEM;
+ MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
+ goto failed_alloc_pages;
+ }
+ }
+ }
+
+ /**
+ *map to GPU side
+ */
+ if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
+ _mali_osk_mutex_wait(session->memory_lock);
+ /* Map on Mali */
+
+ if (mem_backend->type == MALI_MEM_OS) {
+ ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0,
+ mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
+
+ } else if (mem_backend->type == MALI_MEM_BLOCK) {
+ mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
+ mali_allocation->mali_mapping.properties);
+ } else if (mem_backend->type == MALI_MEM_SWAP) {
+ ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr,
+ mali_allocation->mali_mapping.properties);
+ } else if (mem_backend->type == MALI_MEM_SECURE) {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ ret = mali_mem_secure_mali_map(&mem_backend->secure_mem, session, args->gpu_vaddr, mali_allocation->mali_mapping.properties);
+#endif
+ } else { /* unsupport type */
+ MALI_DEBUG_ASSERT(0);
+ }
+
+ _mali_osk_mutex_signal(session->memory_lock);
+ }
+done:
+ if (MALI_MEM_OS == mem_backend->type) {
+ atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages);
+ } else if (MALI_MEM_BLOCK == mem_backend->type) {
+ atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages);
+ } else if (MALI_MEM_SECURE == mem_backend->type) {
+ atomic_add(mem_backend->secure_mem.count, &session->mali_mem_allocated_pages);
+ } else {
+ MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type);
+ atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages);
+ atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]);
+ }
+
+ if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+ session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+ return _MALI_OSK_ERR_OK;
+
+failed_alloc_pages:
+ mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags);
+failed_prepare_map:
+ mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
+failed_alloc_backend:
+
+ mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+ mali_mem_allocation_struct_destory(mali_allocation);
+
+ return ret;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args)
+{
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ u32 vaddr = args->gpu_vaddr;
+ mali_mem_allocation *mali_alloc = NULL;
+ struct mali_vma_node *mali_vma_node = NULL;
+
+ /* find mali allocation structure by vaddress*/
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, vaddr, 0);
+ if (NULL == mali_vma_node) {
+ MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_free: invalid addr: 0x%x\n", vaddr));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+ MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+
+ if (mali_alloc)
+ /* check ref_count */
+ args->free_pages_nr = mali_allocation_unref(&mali_alloc);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+/**
+* Function _mali_ukk_mem_bind -- bind a external memory to a new GPU address
+* It will allocate a new mem allocation and bind external memory to it.
+* Supported backend type are:
+* _MALI_MEMORY_BIND_BACKEND_UMP
+* _MALI_MEMORY_BIND_BACKEND_DMA_BUF
+* _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
+* CPU access is not supported yet
+*/
+_mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args)
+{
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ mali_mem_backend *mem_backend = NULL;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+ mali_mem_allocation *mali_allocation = NULL;
+ MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_bind, vaddr=0x%x, size =0x%x! \n", args->vaddr, args->size));
+
+ /**
+ * allocate mali allocation.
+ */
+ mali_allocation = mali_mem_allocation_struct_create(session);
+
+ if (mali_allocation == NULL) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ mali_allocation->psize = args->size;
+ mali_allocation->vsize = args->size;
+ mali_allocation->mali_mapping.addr = args->vaddr;
+
+ /* add allocation node to RB tree for index */
+ mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
+ mali_allocation->mali_vma_node.vm_node.size = args->size;
+ mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+
+ /* allocate backend*/
+ if (mali_allocation->psize > 0) {
+ mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
+ if (mali_allocation->backend_handle < 0) {
+ goto Failed_alloc_backend;
+ }
+
+ } else {
+ goto Failed_alloc_backend;
+ }
+
+ mem_backend->size = mali_allocation->psize;
+ mem_backend->mali_allocation = mali_allocation;
+
+ switch (args->flags & _MALI_MEMORY_BIND_BACKEND_MASK) {
+ case _MALI_MEMORY_BIND_BACKEND_UMP:
+#if defined(CONFIG_MALI400_UMP)
+ mali_allocation->type = MALI_MEM_UMP;
+ mem_backend->type = MALI_MEM_UMP;
+ ret = mali_mem_bind_ump_buf(mali_allocation, mem_backend,
+ args->mem_union.bind_ump.secure_id, args->mem_union.bind_ump.flags);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_DEBUG_PRINT(1, ("Bind ump buf failed\n"));
+ goto Failed_bind_backend;
+ }
+#else
+ MALI_DEBUG_PRINT(1, ("UMP not supported\n"));
+ goto Failed_bind_backend;
+#endif
+ break;
+ case _MALI_MEMORY_BIND_BACKEND_DMA_BUF:
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ mali_allocation->type = MALI_MEM_DMA_BUF;
+ mem_backend->type = MALI_MEM_DMA_BUF;
+ ret = mali_mem_bind_dma_buf(mali_allocation, mem_backend,
+ args->mem_union.bind_dma_buf.mem_fd, args->mem_union.bind_dma_buf.flags);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_DEBUG_PRINT(1, ("Bind dma buf failed\n"));
+ goto Failed_bind_backend;
+ }
+#else
+ MALI_DEBUG_PRINT(1, ("DMA not supported\n"));
+ goto Failed_bind_backend;
+#endif
+ break;
+ case _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY:
+ /* not allowed */
+ MALI_DEBUG_PRINT_ERROR(("Mali internal memory type not supported !\n"));
+ goto Failed_bind_backend;
+ break;
+
+ case _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY:
+ mali_allocation->type = MALI_MEM_EXTERNAL;
+ mem_backend->type = MALI_MEM_EXTERNAL;
+ ret = mali_mem_bind_ext_buf(mali_allocation, mem_backend, args->mem_union.bind_ext_memory.phys_addr,
+ args->mem_union.bind_ext_memory.flags);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_DEBUG_PRINT(1, ("Bind external buf failed\n"));
+ goto Failed_bind_backend;
+ }
+ break;
+
+ case _MALI_MEMORY_BIND_BACKEND_EXT_COW:
+ /* not allowed */
+ MALI_DEBUG_PRINT_ERROR(("External cow memory type not supported !\n"));
+ goto Failed_bind_backend;
+ break;
+
+ default:
+ MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported !\n"));
+ goto Failed_bind_backend;
+ break;
+ }
+ MALI_DEBUG_ASSERT(0 == mem_backend->size % MALI_MMU_PAGE_SIZE);
+ atomic_add(mem_backend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_backend->type]);
+ return _MALI_OSK_ERR_OK;
+
+Failed_bind_backend:
+ mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
+
+Failed_alloc_backend:
+ mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+ mali_mem_allocation_struct_destory(mali_allocation);
+
+ MALI_DEBUG_PRINT(1, (" _mali_ukk_mem_bind, return ERROR! \n"));
+ return ret;
+}
+
+
+/*
+* Function _mali_ukk_mem_unbind -- unbind a external memory to a new GPU address
+* This function unbind the backend memory and free the allocation
+* no ref_count for this type of memory
+*/
+_mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args)
+{
+ /**/
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ mali_mem_allocation *mali_allocation = NULL;
+ struct mali_vma_node *mali_vma_node = NULL;
+ u32 mali_addr = args->vaddr;
+ MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_unbind, vaddr=0x%x! \n", args->vaddr));
+
+ /* find the allocation by vaddr */
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+ if (likely(mali_vma_node)) {
+ MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
+ mali_allocation = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ } else {
+ MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ if (NULL != mali_allocation)
+ /* check ref_count */
+ mali_allocation_unref(&mali_allocation);
+ return _MALI_OSK_ERR_OK;
+}
+
+/*
+* Function _mali_ukk_mem_cow -- COW for an allocation
+* This function allocate new pages for a range (range, range+size) of allocation
+* And Map it(keep use the not in range pages from target allocation ) to an GPU vaddr
+*/
+_mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args)
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+ mali_mem_backend *target_backend = NULL;
+ mali_mem_backend *mem_backend = NULL;
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_allocation *mali_allocation = NULL;
+
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ /* Get the target backend for cow */
+ target_backend = mali_mem_backend_struct_search(session, args->target_handle);
+
+ if (NULL == target_backend || 0 == target_backend->size) {
+ MALI_DEBUG_ASSERT_POINTER(target_backend);
+ MALI_DEBUG_ASSERT(0 != target_backend->size);
+ return ret;
+ }
+
+ /*Cow not support resized mem */
+ MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE != (MALI_MEM_FLAG_CAN_RESIZE & target_backend->mali_allocation->flags));
+
+ /* Check if the new mali address is allocated */
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->vaddr, 0);
+
+ if (unlikely(mali_vma_node)) {
+ MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
+ return ret;
+ }
+
+ /* create new alloction for COW*/
+ mali_allocation = mali_mem_allocation_struct_create(session);
+ if (mali_allocation == NULL) {
+ MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to create allocation struct!\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ mali_allocation->psize = args->target_size;
+ mali_allocation->vsize = args->target_size;
+ mali_allocation->type = MALI_MEM_COW;
+
+ /*add allocation node to RB tree for index*/
+ mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
+ mali_allocation->mali_vma_node.vm_node.size = mali_allocation->vsize;
+ mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+
+ /* create new backend for COW memory */
+ mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
+ if (mali_allocation->backend_handle < 0) {
+ ret = _MALI_OSK_ERR_NOMEM;
+ MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
+ goto failed_alloc_backend;
+ }
+ mem_backend->mali_allocation = mali_allocation;
+ mem_backend->type = mali_allocation->type;
+
+ if (target_backend->type == MALI_MEM_SWAP ||
+ (MALI_MEM_COW == target_backend->type && (MALI_MEM_BACKEND_FLAG_SWAP_COWED & target_backend->flags))) {
+ mem_backend->flags |= MALI_MEM_BACKEND_FLAG_SWAP_COWED;
+ /**
+ * CoWed swap backends couldn't be mapped as non-linear vma, because if one
+ * vma is set with flag VM_NONLINEAR, the vma->vm_private_data will be used by kernel,
+ * while in mali driver, we use this variable to store the pointer of mali_allocation, so there
+ * is a conflict.
+ * To resolve this problem, we have to do some fake things, we reserved about 64MB
+ * space from index 0, there isn't really page's index will be set from 0 to (64MB>>PAGE_SHIFT_NUM),
+ * and all of CoWed swap memory backends' start_idx will be assigned with 0, and these
+ * backends will be mapped as linear and will add to priority tree of global swap file, while
+ * these vmas will never be found by using normal page->index, these pages in those vma
+ * also couldn't be swapped out.
+ */
+ mem_backend->start_idx = 0;
+ }
+
+ /* Add the target backend's cow count, also allocate new pages for COW backend from os mem
+ *for a modified range and keep the page which not in the modified range and Add ref to it
+ */
+ MALI_DEBUG_PRINT(3, ("Cow mapping: target_addr: 0x%x; cow_addr: 0x%x, size: %u\n", target_backend->mali_allocation->mali_vma_node.vm_node.start,
+ mali_allocation->mali_vma_node.vm_node.start, mali_allocation->mali_vma_node.vm_node.size));
+
+ ret = mali_memory_do_cow(target_backend, args->target_offset, args->target_size, mem_backend, args->range_start, args->range_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to cow!\n"));
+ goto failed_do_cow;
+ }
+
+ /**
+ *map to GPU side
+ */
+ mali_allocation->mali_mapping.addr = args->vaddr;
+ /* set gpu mmu propery */
+ _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
+
+ _mali_osk_mutex_wait(session->memory_lock);
+ /* Map on Mali */
+ ret = mali_mem_mali_map_prepare(mali_allocation);
+ if (0 != ret) {
+ MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
+ goto failed_gpu_map;
+ }
+
+ if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
+ mali_mem_cow_mali_map(mem_backend, 0, mem_backend->size);
+ }
+
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ mutex_lock(&target_backend->mutex);
+ target_backend->flags |= MALI_MEM_BACKEND_FLAG_COWED;
+ mutex_unlock(&target_backend->mutex);
+
+ atomic_add(args->range_size / MALI_MMU_PAGE_SIZE, &session->mali_mem_allocated_pages);
+ if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+ session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+ return _MALI_OSK_ERR_OK;
+
+failed_gpu_map:
+ _mali_osk_mutex_signal(session->memory_lock);
+ mali_mem_cow_release(mem_backend, MALI_FALSE);
+ mem_backend->cow_mem.count = 0;
+failed_do_cow:
+ mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
+failed_alloc_backend:
+ mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+ mali_mem_allocation_struct_destory(mali_allocation);
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args)
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+ mali_mem_backend *mem_backend = NULL;
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_cow_modify_range called! \n"));
+ /* Get the backend that need to be modified. */
+ mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
+
+ if (NULL == mem_backend || 0 == mem_backend->size) {
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT(0 != mem_backend->size);
+ return ret;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_backend->type);
+
+ ret = mali_memory_cow_modify_range(mem_backend, args->range_start, args->size);
+ args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
+ if (_MALI_OSK_ERR_OK != ret)
+ return ret;
+ _mali_osk_mutex_wait(session->memory_lock);
+ if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
+ mali_mem_cow_mali_map(mem_backend, args->range_start, args->size);
+ }
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ atomic_add(args->change_pages_nr, &session->mali_mem_allocated_pages);
+ if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+ session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args)
+{
+ mali_mem_backend *mem_backend = NULL;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
+ MALI_DEBUG_ASSERT(0 == args->psize % MALI_MMU_PAGE_SIZE);
+
+ /* Get the memory backend that need to be resize. */
+ mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
+
+ if (NULL == mem_backend) {
+ MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
+ return ret;
+ }
+
+ MALI_DEBUG_ASSERT(args->psize != mem_backend->size);
+
+ ret = mali_mem_resize(session, mem_backend, args->psize);
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args)
+{
+ args->memory_usage = _mali_ukk_report_memory_usage();
+ if (0 != args->vaddr) {
+ mali_mem_backend *mem_backend = NULL;
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ /* Get the backend that need to be modified. */
+ mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
+ if (NULL == mem_backend) {
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (MALI_MEM_COW == mem_backend->type)
+ args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
+ }
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_manager.h b/drivers/gpu/arm/utgard/linux/mali_memory_manager.h
new file mode 100644
index 000000000000..23d8cde753a1
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_manager.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_MANAGER_H__
+#define __MALI_MEMORY_MANAGER_H__
+
+#include "mali_osk.h"
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "mali_memory_types.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_uk_types.h"
+
+struct mali_allocation_manager {
+ rwlock_t vm_lock;
+ struct rb_root allocation_mgr_rb;
+ struct list_head head;
+ struct mutex list_mutex;
+ u32 mali_allocation_num;
+};
+
+extern struct idr mali_backend_idr;
+extern struct mutex mali_idr_mutex;
+
+int mali_memory_manager_init(struct mali_allocation_manager *mgr);
+void mali_memory_manager_uninit(struct mali_allocation_manager *mgr);
+
+void mali_mem_allocation_struct_destory(mali_mem_allocation *alloc);
+_mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size);
+mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address);
+_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args);
+
+#endif
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.c b/drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.c
new file mode 100644
index 000000000000..91d670362eb9
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.c
@@ -0,0 +1,830 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#include "mali_osk.h"
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_kernel_linux.h"
+
+/* Minimum size of allocator page pool */
+#define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
+#define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+static unsigned long dma_attrs_wc= 0;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+/* Write combine dma_attrs */
+static DEFINE_DMA_ATTRS(dma_attrs_wc);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask);
+#endif
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
+#else
+static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
+static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc);
+#endif
+#endif
+static void mali_mem_os_trim_pool(struct work_struct *work);
+
+struct mali_mem_os_allocator mali_mem_os_allocator = {
+ .pool_lock = __SPIN_LOCK_UNLOCKED(pool_lock),
+ .pool_pages = LIST_HEAD_INIT(mali_mem_os_allocator.pool_pages),
+ .pool_count = 0,
+
+ .allocated_pages = ATOMIC_INIT(0),
+ .allocation_limit = 0,
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+ .shrinker.shrink = mali_mem_os_shrink,
+#else
+ .shrinker.count_objects = mali_mem_os_shrink_count,
+ .shrinker.scan_objects = mali_mem_os_shrink,
+#endif
+ .shrinker.seeks = DEFAULT_SEEKS,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool, TIMER_DEFERRABLE),
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+ .timed_shrinker = __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
+#else
+ .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
+#endif
+};
+
+u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag)
+{
+ LIST_HEAD(pages);
+ struct mali_page_node *m_page, *m_tmp;
+ u32 free_pages_nr = 0;
+
+ if (MALI_TRUE == cow_flag) {
+ list_for_each_entry_safe(m_page, m_tmp, os_pages, list) {
+ /*only handle OS node here */
+ if (m_page->type == MALI_PAGE_NODE_OS) {
+ if (1 == _mali_page_node_get_ref_count(m_page)) {
+ list_move(&m_page->list, &pages);
+ atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
+ free_pages_nr ++;
+ } else {
+ _mali_page_node_unref(m_page);
+ m_page->page = NULL;
+ list_del(&m_page->list);
+ kfree(m_page);
+ }
+ }
+ }
+ } else {
+ list_cut_position(&pages, os_pages, os_pages->prev);
+ atomic_sub(pages_count, &mali_mem_os_allocator.allocated_pages);
+ free_pages_nr = pages_count;
+ }
+
+ /* Put pages on pool. */
+ spin_lock(&mali_mem_os_allocator.pool_lock);
+ list_splice(&pages, &mali_mem_os_allocator.pool_pages);
+ mali_mem_os_allocator.pool_count += free_pages_nr;
+ spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+ MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
+ queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
+ }
+ return free_pages_nr;
+}
+
+/**
+* put page without put it into page pool
+*/
+_mali_osk_errcode_t mali_mem_os_put_page(struct page *page)
+{
+ MALI_DEBUG_ASSERT_POINTER(page);
+ if (1 == page_count(page)) {
+ atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
+ dma_unmap_page(&mali_platform_device->dev, page_private(page),
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+ ClearPagePrivate(page);
+ }
+ put_page(page);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_os_resize_pages(mali_mem_os_mem *mem_from, mali_mem_os_mem *mem_to, u32 start_page, u32 page_count)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ u32 i = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(mem_from);
+ MALI_DEBUG_ASSERT_POINTER(mem_to);
+
+ if (mem_from->count < start_page + page_count) {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ list_for_each_entry_safe(m_page, m_tmp, &mem_from->pages, list) {
+ if (i >= start_page && i < start_page + page_count) {
+ list_move_tail(&m_page->list, &mem_to->pages);
+ mem_from->count--;
+ mem_to->count++;
+ }
+ i++;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size)
+{
+ struct page *new_page;
+ LIST_HEAD(pages_list);
+ size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
+ size_t remaining = page_count;
+ struct mali_page_node *m_page, *m_tmp;
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(os_mem);
+
+ if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
+ MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
+ size,
+ atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
+ mali_mem_os_allocator.allocation_limit));
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&os_mem->pages);
+ os_mem->count = page_count;
+
+ /* Grab pages from pool. */
+ {
+ size_t pool_pages;
+ spin_lock(&mali_mem_os_allocator.pool_lock);
+ pool_pages = min(remaining, mali_mem_os_allocator.pool_count);
+ for (i = pool_pages; i > 0; i--) {
+ BUG_ON(list_empty(&mali_mem_os_allocator.pool_pages));
+ list_move(mali_mem_os_allocator.pool_pages.next, &pages_list);
+ }
+ mali_mem_os_allocator.pool_count -= pool_pages;
+ remaining -= pool_pages;
+ spin_unlock(&mali_mem_os_allocator.pool_lock);
+ }
+
+ /* Process pages from pool. */
+ i = 0;
+ list_for_each_entry_safe(m_page, m_tmp, &pages_list, list) {
+ BUG_ON(NULL == m_page);
+
+ list_move_tail(&m_page->list, &os_mem->pages);
+ }
+
+ /* Allocate new pages, if needed. */
+ for (i = 0; i < remaining; i++) {
+ dma_addr_t dma_addr;
+ gfp_t flags = __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD;
+ int err;
+
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
+ flags |= GFP_HIGHUSER;
+#else
+#ifdef CONFIG_ZONE_DMA32
+ flags |= GFP_DMA32;
+#else
+#ifdef CONFIG_ZONE_DMA
+ flags |= GFP_DMA;
+#else
+ /* arm64 utgard only work on < 4G, but the kernel
+ * didn't provide method to allocte memory < 4G
+ */
+ MALI_DEBUG_ASSERT(0);
+#endif
+#endif
+#endif
+
+ new_page = alloc_page(flags);
+
+ if (unlikely(NULL == new_page)) {
+ /* Calculate the number of pages actually allocated, and free them. */
+ os_mem->count = (page_count - remaining) + i;
+ atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+ mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+ return -ENOMEM;
+ }
+
+ /* Ensure page is flushed from CPU caches. */
+ dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
+ 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(&mali_platform_device->dev, dma_addr,
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
+ 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ err = dma_mapping_error(&mali_platform_device->dev, dma_addr);
+ if (unlikely(err)) {
+ MALI_DEBUG_PRINT_ERROR(("OS Mem: Failed to DMA map page %p: %u",
+ new_page, err));
+ __free_page(new_page);
+ os_mem->count = (page_count - remaining) + i;
+ atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+ mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+ return -EFAULT;
+ }
+
+ /* Store page phys addr */
+ SetPagePrivate(new_page);
+ set_page_private(new_page, dma_addr);
+
+ m_page = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
+ if (unlikely(NULL == m_page)) {
+ MALI_PRINT_ERROR(("OS Mem: Can't allocate mali_page node! \n"));
+ dma_unmap_page(&mali_platform_device->dev, page_private(new_page),
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+ ClearPagePrivate(new_page);
+ __free_page(new_page);
+ os_mem->count = (page_count - remaining) + i;
+ atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+ mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+ return -EFAULT;
+ }
+ m_page->page = new_page;
+
+ list_add_tail(&m_page->list, &os_mem->pages);
+ }
+
+ atomic_add(page_count, &mali_mem_os_allocator.allocated_pages);
+
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
+ MALI_DEBUG_PRINT(4, ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
+ cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
+ }
+
+ return 0;
+}
+
+
+_mali_osk_errcode_t mali_mem_os_mali_map(mali_mem_os_mem *os_mem, struct mali_session_data *session, u32 vaddr, u32 start_page, u32 mapping_pgae_num, u32 props)
+{
+ struct mali_page_directory *pagedir = session->page_directory;
+ struct mali_page_node *m_page;
+ u32 virt;
+ u32 prop = props;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(os_mem);
+
+ MALI_DEBUG_ASSERT(start_page <= os_mem->count);
+ MALI_DEBUG_ASSERT((start_page + mapping_pgae_num) <= os_mem->count);
+
+ if ((start_page + mapping_pgae_num) == os_mem->count) {
+
+ virt = vaddr + MALI_MMU_PAGE_SIZE * (start_page + mapping_pgae_num);
+
+ list_for_each_entry_reverse(m_page, &os_mem->pages, list) {
+
+ virt -= MALI_MMU_PAGE_SIZE;
+ if (mapping_pgae_num > 0) {
+ dma_addr_t phys = page_private(m_page->page);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ /* Verify that the "physical" address is 32-bit and
+ * usable for Mali, when on a system with bus addresses
+ * wider than 32-bit. */
+ MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+ mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+ } else {
+ break;
+ }
+ mapping_pgae_num--;
+ }
+
+ } else {
+ u32 i = 0;
+ virt = vaddr;
+ list_for_each_entry(m_page, &os_mem->pages, list) {
+
+ if (i >= start_page) {
+ dma_addr_t phys = page_private(m_page->page);
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ /* Verify that the "physical" address is 32-bit and
+ * usable for Mali, when on a system with bus addresses
+ * wider than 32-bit. */
+ MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+ mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+ }
+ i++;
+ virt += MALI_MMU_PAGE_SIZE;
+ }
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+
+void mali_mem_os_mali_unmap(mali_mem_allocation *alloc)
+{
+ struct mali_session_data *session;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ mali_session_memory_lock(session);
+ mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+ alloc->flags);
+ mali_session_memory_unlock(session);
+}
+
+int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+ mali_mem_os_mem *os_mem = &mem_bkend->os_mem;
+ struct mali_page_node *m_page;
+ struct page *page;
+ int ret;
+ unsigned long addr = vma->vm_start;
+ MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
+
+ list_for_each_entry(m_page, &os_mem->pages, list) {
+ /* We should use vm_insert_page, but it does a dcache
+ * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
+ ret = vm_insert_page(vma, addr, page);
+ */
+ page = m_page->page;
+ ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
+
+ if (unlikely(0 != ret)) {
+ return -EFAULT;
+ }
+ addr += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+_mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bkend, struct vm_area_struct *vma, unsigned long start_vaddr, u32 mappig_size)
+{
+ mali_mem_os_mem *os_mem = &mem_bkend->os_mem;
+ struct mali_page_node *m_page;
+ int ret;
+ int offset;
+ int mapping_page_num;
+ int count ;
+
+ unsigned long vstart = vma->vm_start;
+ count = 0;
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
+ MALI_DEBUG_ASSERT(0 == start_vaddr % _MALI_OSK_MALI_PAGE_SIZE);
+ MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);
+ offset = (start_vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;
+ MALI_DEBUG_ASSERT(offset <= os_mem->count);
+ mapping_page_num = mappig_size / _MALI_OSK_MALI_PAGE_SIZE;
+ MALI_DEBUG_ASSERT((offset + mapping_page_num) <= os_mem->count);
+
+ if ((offset + mapping_page_num) == os_mem->count) {
+
+ unsigned long vm_end = start_vaddr + mappig_size;
+
+ list_for_each_entry_reverse(m_page, &os_mem->pages, list) {
+
+ vm_end -= _MALI_OSK_MALI_PAGE_SIZE;
+ if (mapping_page_num > 0) {
+ ret = vm_insert_pfn(vma, vm_end, page_to_pfn(m_page->page));
+
+ if (unlikely(0 != ret)) {
+ /*will return -EBUSY If the page has already been mapped into table, but it's OK*/
+ if (-EBUSY == ret) {
+ break;
+ } else {
+ MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, offset is %d,page_count is %d\n",
+ ret, offset + mapping_page_num, os_mem->count));
+ }
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ break;
+ }
+ mapping_page_num--;
+
+ }
+ } else {
+
+ list_for_each_entry(m_page, &os_mem->pages, list) {
+ if (count >= offset) {
+
+ ret = vm_insert_pfn(vma, vstart, page_to_pfn(m_page->page));
+
+ if (unlikely(0 != ret)) {
+ /*will return -EBUSY If the page has already been mapped into table, but it's OK*/
+ if (-EBUSY == ret) {
+ break;
+ } else {
+ MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, count is %d, offset is %d,page_count is %d\n",
+ ret, count, offset, os_mem->count));
+ }
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+ count++;
+ vstart += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 mali_mem_os_release(mali_mem_backend *mem_bkend)
+{
+
+ mali_mem_allocation *alloc;
+ struct mali_session_data *session;
+ u32 free_pages_nr = 0;
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+ MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
+
+ alloc = mem_bkend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ /* Unmap the memory from the mali virtual address space. */
+ mali_mem_os_mali_unmap(alloc);
+ mutex_lock(&mem_bkend->mutex);
+ /* Free pages */
+ if (MALI_MEM_BACKEND_FLAG_COWED & mem_bkend->flags) {
+ /* Lock to avoid the free race condition for the cow shared memory page node. */
+ _mali_osk_mutex_wait(session->cow_lock);
+ free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_TRUE);
+ _mali_osk_mutex_signal(session->cow_lock);
+ } else {
+ free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_FALSE);
+ }
+ mutex_unlock(&mem_bkend->mutex);
+
+ MALI_DEBUG_PRINT(4, ("OS Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->os_mem.count * _MALI_OSK_MALI_PAGE_SIZE,
+ free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
+
+ mem_bkend->os_mem.count = 0;
+ return free_pages_nr;
+}
+
+
+#define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
+static struct {
+ struct {
+ mali_dma_addr phys;
+ mali_io_address mapping;
+ } page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
+ size_t count;
+ spinlock_t lock;
+} mali_mem_page_table_page_pool = {
+ .count = 0,
+ .lock = __SPIN_LOCK_UNLOCKED(pool_lock),
+};
+
+_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping)
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
+ dma_addr_t tmp_phys;
+
+ spin_lock(&mali_mem_page_table_page_pool.lock);
+ if (0 < mali_mem_page_table_page_pool.count) {
+ u32 i = --mali_mem_page_table_page_pool.count;
+ *phys = mali_mem_page_table_page_pool.page[i].phys;
+ *mapping = mali_mem_page_table_page_pool.page[i].mapping;
+
+ ret = _MALI_OSK_ERR_OK;
+ }
+ spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+ if (_MALI_OSK_ERR_OK != ret) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+ *mapping = dma_alloc_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+ GFP_KERNEL, dma_attrs_wc);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ *mapping = dma_alloc_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+ GFP_KERNEL, &dma_attrs_wc);
+#else
+ *mapping = dma_alloc_writecombine(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys, GFP_KERNEL);
+#endif
+ if (NULL != *mapping) {
+ ret = _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ /* Verify that the "physical" address is 32-bit and
+ * usable for Mali, when on a system with bus addresses
+ * wider than 32-bit. */
+ MALI_DEBUG_ASSERT(0 == (tmp_phys >> 32));
+#endif
+
+ *phys = (mali_dma_addr)tmp_phys;
+ }
+ }
+
+ return ret;
+}
+
+void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt)
+{
+ spin_lock(&mali_mem_page_table_page_pool.lock);
+ if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
+ u32 i = mali_mem_page_table_page_pool.count;
+ mali_mem_page_table_page_pool.page[i].phys = phys;
+ mali_mem_page_table_page_pool.page[i].mapping = virt;
+
+ ++mali_mem_page_table_page_pool.count;
+
+ spin_unlock(&mali_mem_page_table_page_pool.lock);
+ } else {
+ spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+ dma_free_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+ dma_attrs_wc);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ dma_free_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+ &dma_attrs_wc);
+#else
+ dma_free_writecombine(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
+#endif
+ }
+}
+
+void mali_mem_os_free_page_node(struct mali_page_node *m_page)
+{
+ struct page *page = m_page->page;
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_OS);
+
+ if (1 == page_count(page)) {
+ dma_unmap_page(&mali_platform_device->dev, page_private(page),
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+ ClearPagePrivate(page);
+ }
+ __free_page(page);
+ m_page->page = NULL;
+ list_del(&m_page->list);
+ kfree(m_page);
+}
+
+/* The maximum number of page table pool pages to free in one go. */
+#define MALI_MEM_OS_CHUNK_TO_FREE 64UL
+
+/* Free a certain number of pages from the page table page pool.
+ * The pool lock must be held when calling the function, and the lock will be
+ * released before returning.
+ */
+static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
+{
+ mali_dma_addr phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+ void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+ u32 i;
+
+ MALI_DEBUG_ASSERT(nr_to_free <= MALI_MEM_OS_CHUNK_TO_FREE);
+
+ /* Remove nr_to_free pages from the pool and store them locally on stack. */
+ for (i = 0; i < nr_to_free; i++) {
+ u32 pool_index = mali_mem_page_table_page_pool.count - i - 1;
+
+ phys_arr[i] = mali_mem_page_table_page_pool.page[pool_index].phys;
+ virt_arr[i] = mali_mem_page_table_page_pool.page[pool_index].mapping;
+ }
+
+ mali_mem_page_table_page_pool.count -= nr_to_free;
+
+ spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+ /* After releasing the spinlock: free the pages we removed from the pool. */
+ for (i = 0; i < nr_to_free; i++) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+ dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+ virt_arr[i], (dma_addr_t)phys_arr[i], dma_attrs_wc);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+ virt_arr[i], (dma_addr_t)phys_arr[i], &dma_attrs_wc);
+#else
+ dma_free_writecombine(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE,
+ virt_arr[i], (dma_addr_t)phys_arr[i]);
+#endif
+ }
+}
+
+static void mali_mem_os_trim_page_table_page_pool(void)
+{
+ size_t nr_to_free = 0;
+ size_t nr_to_keep;
+
+ /* Keep 2 page table pages for each 1024 pages in the page cache. */
+ nr_to_keep = mali_mem_os_allocator.pool_count / 512;
+ /* And a minimum of eight pages, to accomodate new sessions. */
+ nr_to_keep += 8;
+
+ if (0 == spin_trylock(&mali_mem_page_table_page_pool.lock)) return;
+
+ if (nr_to_keep < mali_mem_page_table_page_pool.count) {
+ nr_to_free = mali_mem_page_table_page_pool.count - nr_to_keep;
+ nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, nr_to_free);
+ }
+
+ /* Pool lock will be released by the callee. */
+ mali_mem_os_page_table_pool_free(nr_to_free);
+}
+
+static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ return mali_mem_os_allocator.pool_count;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask)
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
+#endif /* Linux < 2.6.35 */
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#else
+static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#endif /* Linux < 3.12.0 */
+#endif /* Linux < 3.0.0 */
+{
+ struct mali_page_node *m_page, *m_tmp;
+ unsigned long flags;
+ struct list_head *le, pages;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+ int nr = nr_to_scan;
+#else
+ int nr = sc->nr_to_scan;
+#endif
+
+ if (0 == nr) {
+ return mali_mem_os_shrink_count(shrinker, sc);
+ }
+
+ if (0 == spin_trylock_irqsave(&mali_mem_os_allocator.pool_lock, flags)) {
+ /* Not able to lock. */
+ return -1;
+ }
+
+ if (0 == mali_mem_os_allocator.pool_count) {
+ /* No pages availble */
+ spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
+ return 0;
+ }
+
+ /* Release from general page pool */
+ nr = min((size_t)nr, mali_mem_os_allocator.pool_count);
+ mali_mem_os_allocator.pool_count -= nr;
+ list_for_each(le, &mali_mem_os_allocator.pool_pages) {
+ --nr;
+ if (0 == nr) break;
+ }
+ list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
+ spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
+
+ list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
+ mali_mem_os_free_page_node(m_page);
+ }
+
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
+ /* Pools are empty, stop timer */
+ MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
+ cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+ return mali_mem_os_shrink_count(shrinker, sc);
+#else
+ return nr;
+#endif
+}
+
+static void mali_mem_os_trim_pool(struct work_struct *data)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ struct list_head *le;
+ LIST_HEAD(pages);
+ size_t nr_to_free;
+
+ MALI_IGNORE(data);
+
+ MALI_DEBUG_PRINT(3, ("OS Mem: Trimming pool %u\n", mali_mem_os_allocator.pool_count));
+
+ /* Release from general page pool */
+ spin_lock(&mali_mem_os_allocator.pool_lock);
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+ size_t count = mali_mem_os_allocator.pool_count - MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES;
+ const size_t min_to_free = min(64, MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES);
+
+ /* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */
+ nr_to_free = max(count / 2, min_to_free);
+
+ mali_mem_os_allocator.pool_count -= nr_to_free;
+ list_for_each(le, &mali_mem_os_allocator.pool_pages) {
+ --nr_to_free;
+ if (0 == nr_to_free) break;
+ }
+ list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
+ }
+ spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+ list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
+ mali_mem_os_free_page_node(m_page);
+ }
+
+ /* Release some pages from page table page pool */
+ mali_mem_os_trim_page_table_page_pool();
+
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+ MALI_DEBUG_PRINT(4, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
+ queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
+ }
+}
+
+_mali_osk_errcode_t mali_mem_os_init(void)
+{
+ mali_mem_os_allocator.wq = alloc_workqueue("mali-mem", WQ_UNBOUND, 1);
+ if (NULL == mali_mem_os_allocator.wq) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+ dma_attrs_wc = DMA_ATTR_WRITE_COMBINE;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
+#endif
+
+ register_shrinker(&mali_mem_os_allocator.shrinker);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_os_term(void)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ unregister_shrinker(&mali_mem_os_allocator.shrinker);
+ cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
+
+ if (NULL != mali_mem_os_allocator.wq) {
+ destroy_workqueue(mali_mem_os_allocator.wq);
+ mali_mem_os_allocator.wq = NULL;
+ }
+
+ spin_lock(&mali_mem_os_allocator.pool_lock);
+ list_for_each_entry_safe(m_page, m_tmp, &mali_mem_os_allocator.pool_pages, list) {
+ mali_mem_os_free_page_node(m_page);
+
+ --mali_mem_os_allocator.pool_count;
+ }
+ BUG_ON(mali_mem_os_allocator.pool_count);
+ spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+ /* Release from page table page pool */
+ do {
+ u32 nr_to_free;
+
+ spin_lock(&mali_mem_page_table_page_pool.lock);
+
+ nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, mali_mem_page_table_page_pool.count);
+
+ /* Pool lock will be released by the callee. */
+ mali_mem_os_page_table_pool_free(nr_to_free);
+ } while (0 != mali_mem_page_table_page_pool.count);
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size)
+{
+ mali_mem_os_allocator.allocation_limit = size;
+
+ MALI_SUCCESS;
+}
+
+u32 mali_mem_os_stat(void)
+{
+ return atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.h b/drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.h
new file mode 100644
index 000000000000..8c9b35d0b230
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_OS_ALLOC_H__
+#define __MALI_MEMORY_OS_ALLOC_H__
+
+#include "mali_osk.h"
+#include "mali_memory_types.h"
+
+
+/** @brief Release Mali OS memory
+ *
+ * The session memory_lock must be held when calling this function.
+ *
+ * @param mem_bkend Pointer to the mali_mem_backend to release
+ */
+u32 mali_mem_os_release(mali_mem_backend *mem_bkend);
+
+_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping);
+
+void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt);
+
+_mali_osk_errcode_t mali_mem_os_init(void);
+
+void mali_mem_os_term(void);
+
+u32 mali_mem_os_stat(void);
+
+void mali_mem_os_free_page_node(struct mali_page_node *m_page);
+
+int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size);
+
+u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag);
+
+_mali_osk_errcode_t mali_mem_os_put_page(struct page *page);
+
+_mali_osk_errcode_t mali_mem_os_resize_pages(mali_mem_os_mem *mem_from, mali_mem_os_mem *mem_to, u32 start_page, u32 page_count);
+
+_mali_osk_errcode_t mali_mem_os_mali_map(mali_mem_os_mem *os_mem, struct mali_session_data *session, u32 vaddr, u32 start_page, u32 mapping_pgae_num, u32 props);
+
+void mali_mem_os_mali_unmap(mali_mem_allocation *alloc);
+
+int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);
+
+_mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bkend, struct vm_area_struct *vma, unsigned long start_vaddr, u32 mappig_size);
+
+#endif /* __MALI_MEMORY_OS_ALLOC_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_secure.c b/drivers/gpu/arm/utgard/linux/mali_memory_secure.c
new file mode 100644
index 000000000000..2836b1b76a03
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_secure.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_memory.h"
+#include "mali_memory_secure.h"
+#include "mali_osk.h"
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+
+_mali_osk_errcode_t mali_mem_secure_attach_dma_buf(mali_mem_secure *secure_mem, u32 size, int mem_fd)
+{
+ struct dma_buf *buf;
+ MALI_DEBUG_ASSERT_POINTER(secure_mem);
+
+ /* get dma buffer */
+ buf = dma_buf_get(mem_fd);
+ if (IS_ERR_OR_NULL(buf)) {
+ MALI_DEBUG_PRINT_ERROR(("Failed to get dma buf!\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (size != buf->size) {
+ MALI_DEBUG_PRINT_ERROR(("The secure mem size not match to the dma buf size!\n"));
+ goto failed_alloc_mem;
+ }
+
+ secure_mem->buf = buf;
+ secure_mem->attachment = dma_buf_attach(secure_mem->buf, &mali_platform_device->dev);
+ if (NULL == secure_mem->attachment) {
+ MALI_DEBUG_PRINT_ERROR(("Failed to get dma buf attachment!\n"));
+ goto failed_dma_attach;
+ }
+
+ secure_mem->sgt = dma_buf_map_attachment(secure_mem->attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(secure_mem->sgt)) {
+ MALI_DEBUG_PRINT_ERROR(("Failed to map dma buf attachment\n"));
+ goto failed_dma_map;
+ }
+
+ secure_mem->count = size / MALI_MMU_PAGE_SIZE;
+
+ return _MALI_OSK_ERR_OK;
+
+failed_dma_map:
+ dma_buf_detach(secure_mem->buf, secure_mem->attachment);
+failed_dma_attach:
+failed_alloc_mem:
+ dma_buf_put(buf);
+ return _MALI_OSK_ERR_FAULT;
+}
+
+_mali_osk_errcode_t mali_mem_secure_mali_map(mali_mem_secure *secure_mem, struct mali_session_data *session, u32 vaddr, u32 props)
+{
+ struct mali_page_directory *pagedir;
+ struct scatterlist *sg;
+ u32 virt = vaddr;
+ u32 prop = props;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(secure_mem);
+ MALI_DEBUG_ASSERT_POINTER(secure_mem->sgt);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ pagedir = session->page_directory;
+
+ for_each_sg(secure_mem->sgt->sgl, sg, secure_mem->sgt->nents, i) {
+ u32 size = sg_dma_len(sg);
+ dma_addr_t phys = sg_dma_address(sg);
+
+ /* sg must be page aligned. */
+ MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
+ MALI_DEBUG_ASSERT(0 == (phys & ~(uintptr_t)0xFFFFFFFF));
+
+ mali_mmu_pagedir_update(pagedir, virt, phys, size, prop);
+
+ MALI_DEBUG_PRINT(3, ("The secure mem physical address: 0x%x gpu virtual address: 0x%x! \n", phys, virt));
+ virt += size;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_secure_mali_unmap(mali_mem_allocation *alloc)
+{
+ struct mali_session_data *session;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ mali_session_memory_lock(session);
+ mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+ alloc->flags);
+ mali_session_memory_unlock(session);
+}
+
+
+int mali_mem_secure_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+
+ int ret = 0;
+ struct scatterlist *sg;
+ mali_mem_secure *secure_mem = &mem_bkend->secure_mem;
+ unsigned long addr = vma->vm_start;
+ int i;
+
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_SECURE);
+
+ for_each_sg(secure_mem->sgt->sgl, sg, secure_mem->sgt->nents, i) {
+ phys_addr_t phys;
+ dma_addr_t dev_addr;
+ u32 size, j;
+ dev_addr = sg_dma_address(sg);
+#if defined(CONFIG_ARM64) ||LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ phys = dma_to_phys(&mali_platform_device->dev, dev_addr);
+#else
+ phys = page_to_phys(pfn_to_page(dma_to_pfn(&mali_platform_device->dev, dev_addr)));
+#endif
+ size = sg_dma_len(sg);
+ MALI_DEBUG_ASSERT(0 == size % _MALI_OSK_MALI_PAGE_SIZE);
+
+ for (j = 0; j < size / _MALI_OSK_MALI_PAGE_SIZE; j++) {
+ ret = vm_insert_pfn(vma, addr, PFN_DOWN(phys));
+
+ if (unlikely(0 != ret)) {
+ return -EFAULT;
+ }
+ addr += _MALI_OSK_MALI_PAGE_SIZE;
+ phys += _MALI_OSK_MALI_PAGE_SIZE;
+
+ MALI_DEBUG_PRINT(3, ("The secure mem physical address: 0x%x , cpu virtual address: 0x%x! \n", phys, addr));
+ }
+ }
+ return ret;
+}
+
+u32 mali_mem_secure_release(mali_mem_backend *mem_bkend)
+{
+ struct mali_mem_secure *mem;
+ mali_mem_allocation *alloc = mem_bkend->mali_allocation;
+ u32 free_pages_nr = 0;
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_SECURE);
+
+ mem = &mem_bkend->secure_mem;
+ MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+ MALI_DEBUG_ASSERT_POINTER(mem->buf);
+ MALI_DEBUG_ASSERT_POINTER(mem->sgt);
+ /* Unmap the memory from the mali virtual address space. */
+ mali_mem_secure_mali_unmap(alloc);
+ mutex_lock(&mem_bkend->mutex);
+ dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL);
+ dma_buf_detach(mem->buf, mem->attachment);
+ dma_buf_put(mem->buf);
+ mutex_unlock(&mem_bkend->mutex);
+
+ free_pages_nr = mem->count;
+
+ return free_pages_nr;
+}
+
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_secure.h b/drivers/gpu/arm/utgard/linux/mali_memory_secure.h
new file mode 100644
index 000000000000..48691d4790fe
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_secure.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010, 2013, 2015-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_SECURE_H__
+#define __MALI_MEMORY_SECURE_H__
+
+#include "mali_session.h"
+#include "mali_memory.h"
+#include <linux/spinlock.h>
+
+#include "mali_memory_types.h"
+
+_mali_osk_errcode_t mali_mem_secure_attach_dma_buf(mali_mem_secure *secure_mem, u32 size, int mem_fd);
+
+_mali_osk_errcode_t mali_mem_secure_mali_map(mali_mem_secure *secure_mem, struct mali_session_data *session, u32 vaddr, u32 props);
+
+void mali_mem_secure_mali_unmap(mali_mem_allocation *alloc);
+
+int mali_mem_secure_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);
+
+u32 mali_mem_secure_release(mali_mem_backend *mem_bkend);
+
+#endif /* __MALI_MEMORY_SECURE_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.c b/drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.c
new file mode 100644
index 000000000000..4e88c702a68f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.c
@@ -0,0 +1,942 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/idr.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/shmem_fs.h>
+#include <linux/file.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_memory.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_cow.h"
+#include "mali_ukk.h"
+#include "mali_kernel_utilization.h"
+#include "mali_memory_swap_alloc.h"
+
+
+static struct _mali_osk_bitmap idx_mgr;
+static struct file *global_swap_file;
+static struct address_space *global_swap_space;
+static _mali_osk_wq_work_t *mali_mem_swap_out_workq = NULL;
+static u32 mem_backend_swapped_pool_size;
+#ifdef MALI_MEM_SWAP_TRACKING
+static u32 mem_backend_swapped_unlock_size;
+#endif
+/* Lock order: mem_backend_swapped_pool_lock > each memory backend's mutex lock.
+ * This lock used to protect mem_backend_swapped_pool_size and mem_backend_swapped_pool. */
+static struct mutex mem_backend_swapped_pool_lock;
+static struct list_head mem_backend_swapped_pool;
+
+extern struct mali_mem_os_allocator mali_mem_os_allocator;
+
+#define MALI_SWAP_LOW_MEM_DEFAULT_VALUE (60*1024*1024)
+#define MALI_SWAP_INVALIDATE_MALI_ADDRESS (0) /* Used to mark the given memory cookie is invalidate. */
+#define MALI_SWAP_GLOBAL_SWAP_FILE_SIZE (0xFFFFFFFF)
+#define MALI_SWAP_GLOBAL_SWAP_FILE_INDEX ((MALI_SWAP_GLOBAL_SWAP_FILE_SIZE) >> PAGE_SHIFT)
+#define MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE (1 << 15) /* Reserved for CoW nonlinear swap backend memory, the space size is 128MB. */
+
+unsigned int mali_mem_swap_out_threshold_value = MALI_SWAP_LOW_MEM_DEFAULT_VALUE;
+
+/**
+ * We have two situations to do shrinking things, one is we met low GPU utilization which shows GPU needn't touch too
+ * swappable backends in short time, and the other one is we add new swappable backends, the total pool size exceed
+ * the threshold value of the swapped pool size.
+ */
+typedef enum {
+ MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION = 100,
+ MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS = 257,
+} _mali_mem_swap_pool_shrink_type_t;
+
+static void mali_mem_swap_swapped_bkend_pool_check_for_low_utilization(void *arg);
+
+_mali_osk_errcode_t mali_mem_swap_init(void)
+{
+ gfp_t flags = __GFP_NORETRY | __GFP_NOWARN;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_bitmap_init(&idx_mgr, MALI_SWAP_GLOBAL_SWAP_FILE_INDEX, MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE)) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ global_swap_file = shmem_file_setup("mali_swap", MALI_SWAP_GLOBAL_SWAP_FILE_SIZE, VM_NORESERVE);
+ if (IS_ERR(global_swap_file)) {
+ _mali_osk_bitmap_term(&idx_mgr);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ global_swap_space = global_swap_file->f_path.dentry->d_inode->i_mapping;
+
+ mali_mem_swap_out_workq = _mali_osk_wq_create_work(mali_mem_swap_swapped_bkend_pool_check_for_low_utilization, NULL);
+ if (NULL == mali_mem_swap_out_workq) {
+ _mali_osk_bitmap_term(&idx_mgr);
+ fput(global_swap_file);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
+ flags |= GFP_HIGHUSER;
+#else
+#ifdef CONFIG_ZONE_DMA32
+ flags |= GFP_DMA32;
+#else
+#ifdef CONFIG_ZONE_DMA
+ flags |= GFP_DMA;
+#else
+ /* arm64 utgard only work on < 4G, but the kernel
+ * didn't provide method to allocte memory < 4G
+ */
+ MALI_DEBUG_ASSERT(0);
+#endif
+#endif
+#endif
+
+ /* When we use shmem_read_mapping_page to allocate/swap-in, it will
+ * use these flags to allocate new page if need.*/
+ mapping_set_gfp_mask(global_swap_space, flags);
+
+ mem_backend_swapped_pool_size = 0;
+#ifdef MALI_MEM_SWAP_TRACKING
+ mem_backend_swapped_unlock_size = 0;
+#endif
+ mutex_init(&mem_backend_swapped_pool_lock);
+ INIT_LIST_HEAD(&mem_backend_swapped_pool);
+
+ MALI_DEBUG_PRINT(2, ("Mali SWAP: Swap out threshold vaule is %uM\n", mali_mem_swap_out_threshold_value >> 20));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_swap_term(void)
+{
+ _mali_osk_bitmap_term(&idx_mgr);
+
+ fput(global_swap_file);
+
+ _mali_osk_wq_delete_work(mali_mem_swap_out_workq);
+
+ MALI_DEBUG_ASSERT(list_empty(&mem_backend_swapped_pool));
+ MALI_DEBUG_ASSERT(0 == mem_backend_swapped_pool_size);
+
+ return;
+}
+
+struct file *mali_mem_swap_get_global_swap_file(void)
+{
+ return global_swap_file;
+}
+
+/* Judge if swappable backend in swapped pool. */
+static mali_bool mali_memory_swap_backend_in_swapped_pool(mali_mem_backend *mem_bkend)
+{
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+
+ return !list_empty(&mem_bkend->list);
+}
+
+void mali_memory_swap_list_backend_delete(mali_mem_backend *mem_bkend)
+{
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+
+ mutex_lock(&mem_backend_swapped_pool_lock);
+ mutex_lock(&mem_bkend->mutex);
+
+ if (MALI_FALSE == mali_memory_swap_backend_in_swapped_pool(mem_bkend)) {
+ mutex_unlock(&mem_bkend->mutex);
+ mutex_unlock(&mem_backend_swapped_pool_lock);
+ return;
+ }
+
+ MALI_DEBUG_ASSERT(!list_empty(&mem_bkend->list));
+
+ list_del_init(&mem_bkend->list);
+
+ mutex_unlock(&mem_bkend->mutex);
+
+ mem_backend_swapped_pool_size -= mem_bkend->size;
+
+ mutex_unlock(&mem_backend_swapped_pool_lock);
+}
+
+static void mali_mem_swap_out_page_node(mali_page_node *page_node)
+{
+ MALI_DEBUG_ASSERT(page_node);
+
+ dma_unmap_page(&mali_platform_device->dev, page_node->swap_it->dma_addr,
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+ set_page_dirty(page_node->swap_it->page);
+ put_page(page_node->swap_it->page);
+}
+
+void mali_mem_swap_unlock_single_mem_backend(mali_mem_backend *mem_bkend)
+{
+ mali_page_node *m_page;
+
+ MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_bkend->mutex));
+
+ if (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN)) {
+ return;
+ }
+
+ mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN;
+
+ list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) {
+ mali_mem_swap_out_page_node(m_page);
+ }
+
+ return;
+}
+
+static void mali_mem_swap_unlock_partial_locked_mem_backend(mali_mem_backend *mem_bkend, mali_page_node *page_node)
+{
+ mali_page_node *m_page;
+
+ MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_bkend->mutex));
+
+ list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) {
+ if (m_page == page_node) {
+ break;
+ }
+ mali_mem_swap_out_page_node(m_page);
+ }
+}
+
+static void mali_mem_swap_swapped_bkend_pool_shrink(_mali_mem_swap_pool_shrink_type_t shrink_type)
+{
+ mali_mem_backend *bkend, *tmp_bkend;
+ long system_free_size;
+ u32 last_gpu_utilization, gpu_utilization_threshold_value, temp_swap_out_threshold_value;
+
+ MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_backend_swapped_pool_lock));
+
+ if (MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION == shrink_type) {
+ /**
+ * When we met that system memory is very low and Mali locked swappable memory size is less than
+ * threshold value, and at the same time, GPU load is very low and don't need high performance,
+ * at this condition, we can unlock more swap memory backend from swapped backends pool.
+ */
+ gpu_utilization_threshold_value = MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION;
+ temp_swap_out_threshold_value = (mali_mem_swap_out_threshold_value >> 2);
+ } else {
+ /* When we add swappable memory backends to swapped pool, we need to think that we couldn't
+ * hold too much swappable backends in Mali driver, and also we need considering performance.
+ * So there is a balance for swapping out memory backend, we should follow the following conditions:
+ * 1. Total memory size in global mem backend swapped pool is more than the defined threshold value.
+ * 2. System level free memory size is less than the defined threshold value.
+ * 3. Please note that GPU utilization problem isn't considered in this condition.
+ */
+ gpu_utilization_threshold_value = MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS;
+ temp_swap_out_threshold_value = mali_mem_swap_out_threshold_value;
+ }
+
+ /* Get system free pages number. */
+ system_free_size = global_page_state(NR_FREE_PAGES) * PAGE_SIZE;
+ last_gpu_utilization = _mali_ukk_utilization_gp_pp();
+
+ if ((last_gpu_utilization < gpu_utilization_threshold_value)
+ && (system_free_size < mali_mem_swap_out_threshold_value)
+ && (mem_backend_swapped_pool_size > temp_swap_out_threshold_value)) {
+ list_for_each_entry_safe(bkend, tmp_bkend, &mem_backend_swapped_pool, list) {
+ if (mem_backend_swapped_pool_size <= temp_swap_out_threshold_value) {
+ break;
+ }
+
+ mutex_lock(&bkend->mutex);
+
+ /* check if backend is in use. */
+ if (0 < bkend->using_count) {
+ mutex_unlock(&bkend->mutex);
+ continue;
+ }
+
+ mali_mem_swap_unlock_single_mem_backend(bkend);
+ list_del_init(&bkend->list);
+ mem_backend_swapped_pool_size -= bkend->size;
+#ifdef MALI_MEM_SWAP_TRACKING
+ mem_backend_swapped_unlock_size += bkend->size;
+#endif
+ mutex_unlock(&bkend->mutex);
+ }
+ }
+
+ return;
+}
+
+static void mali_mem_swap_swapped_bkend_pool_check_for_low_utilization(void *arg)
+{
+ MALI_IGNORE(arg);
+
+ mutex_lock(&mem_backend_swapped_pool_lock);
+
+ mali_mem_swap_swapped_bkend_pool_shrink(MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION);
+
+ mutex_unlock(&mem_backend_swapped_pool_lock);
+}
+
+/**
+ * After PP job finished, we add all of swappable memory backend used by this PP
+ * job to the tail of the global swapped pool, and if the total size of swappable memory is more than threshold
+ * value, we also need to shrink the swapped pool start from the head of the list.
+ */
+void mali_memory_swap_list_backend_add(mali_mem_backend *mem_bkend)
+{
+ mutex_lock(&mem_backend_swapped_pool_lock);
+ mutex_lock(&mem_bkend->mutex);
+
+ if (mali_memory_swap_backend_in_swapped_pool(mem_bkend)) {
+ MALI_DEBUG_ASSERT(!list_empty(&mem_bkend->list));
+
+ list_del_init(&mem_bkend->list);
+ list_add_tail(&mem_bkend->list, &mem_backend_swapped_pool);
+ mutex_unlock(&mem_bkend->mutex);
+ mutex_unlock(&mem_backend_swapped_pool_lock);
+ return;
+ }
+
+ list_add_tail(&mem_bkend->list, &mem_backend_swapped_pool);
+
+ mutex_unlock(&mem_bkend->mutex);
+ mem_backend_swapped_pool_size += mem_bkend->size;
+
+ mali_mem_swap_swapped_bkend_pool_shrink(MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS);
+
+ mutex_unlock(&mem_backend_swapped_pool_lock);
+ return;
+}
+
+
+u32 mali_mem_swap_idx_alloc(void)
+{
+ return _mali_osk_bitmap_alloc(&idx_mgr);
+}
+
+void mali_mem_swap_idx_free(u32 idx)
+{
+ _mali_osk_bitmap_free(&idx_mgr, idx);
+}
+
+static u32 mali_mem_swap_idx_range_alloc(u32 count)
+{
+ u32 index;
+
+ index = _mali_osk_bitmap_alloc_range(&idx_mgr, count);
+
+ return index;
+}
+
+static void mali_mem_swap_idx_range_free(u32 idx, int num)
+{
+ _mali_osk_bitmap_free_range(&idx_mgr, idx, num);
+}
+
+struct mali_swap_item *mali_mem_swap_alloc_swap_item(void)
+{
+ mali_swap_item *swap_item;
+
+ swap_item = kzalloc(sizeof(mali_swap_item), GFP_KERNEL);
+
+ if (NULL == swap_item) {
+ return NULL;
+ }
+
+ atomic_set(&swap_item->ref_count, 1);
+ swap_item->page = NULL;
+ atomic_add(1, &mali_mem_os_allocator.allocated_pages);
+
+ return swap_item;
+}
+
+void mali_mem_swap_free_swap_item(mali_swap_item *swap_item)
+{
+ struct inode *file_node;
+ long long start, end;
+
+ /* If this swap item is shared, we just reduce the reference counter. */
+ if (0 == atomic_dec_return(&swap_item->ref_count)) {
+ file_node = global_swap_file->f_path.dentry->d_inode;
+ start = swap_item->idx;
+ start = start << 12;
+ end = start + PAGE_SIZE;
+
+ shmem_truncate_range(file_node, start, (end - 1));
+
+ mali_mem_swap_idx_free(swap_item->idx);
+
+ atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
+
+ kfree(swap_item);
+ }
+}
+
+/* Used to allocate new swap item for new memory allocation and cow page for write. */
+struct mali_page_node *_mali_mem_swap_page_node_allocate(void)
+{
+ struct mali_page_node *m_page;
+
+ m_page = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP);
+
+ if (NULL == m_page) {
+ return NULL;
+ }
+
+ m_page->swap_it = mali_mem_swap_alloc_swap_item();
+
+ if (NULL == m_page->swap_it) {
+ kfree(m_page);
+ return NULL;
+ }
+
+ return m_page;
+}
+
+_mali_osk_errcode_t _mali_mem_swap_put_page_node(struct mali_page_node *m_page)
+{
+
+ mali_mem_swap_free_swap_item(m_page->swap_it);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_mem_swap_page_node_free(struct mali_page_node *m_page)
+{
+ _mali_mem_swap_put_page_node(m_page);
+
+ kfree(m_page);
+
+ return;
+}
+
+u32 mali_mem_swap_free(mali_mem_swap *swap_mem)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ u32 free_pages_nr = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(swap_mem);
+
+ list_for_each_entry_safe(m_page, m_tmp, &swap_mem->pages, list) {
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_SWAP);
+
+ /* free the page node and release the swap item, if the ref count is 1,
+ * then need also free the swap item. */
+ list_del(&m_page->list);
+ if (1 == _mali_page_node_get_ref_count(m_page)) {
+ free_pages_nr++;
+ }
+
+ _mali_mem_swap_page_node_free(m_page);
+ }
+
+ return free_pages_nr;
+}
+
+static u32 mali_mem_swap_cow_free(mali_mem_cow *cow_mem)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ u32 free_pages_nr = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(cow_mem);
+
+ list_for_each_entry_safe(m_page, m_tmp, &cow_mem->pages, list) {
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_SWAP);
+
+ /* free the page node and release the swap item, if the ref count is 1,
+ * then need also free the swap item. */
+ list_del(&m_page->list);
+ if (1 == _mali_page_node_get_ref_count(m_page)) {
+ free_pages_nr++;
+ }
+
+ _mali_mem_swap_page_node_free(m_page);
+ }
+
+ return free_pages_nr;
+}
+
+u32 mali_mem_swap_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)
+{
+ mali_mem_allocation *alloc;
+ u32 free_pages_nr = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+ alloc = mem_bkend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ if (is_mali_mapped) {
+ mali_mem_swap_mali_unmap(alloc);
+ }
+
+ mali_memory_swap_list_backend_delete(mem_bkend);
+
+ mutex_lock(&mem_bkend->mutex);
+ /* To make sure the given memory backend was unlocked from Mali side,
+ * and then free this memory block. */
+ mali_mem_swap_unlock_single_mem_backend(mem_bkend);
+ mutex_unlock(&mem_bkend->mutex);
+
+ if (MALI_MEM_SWAP == mem_bkend->type) {
+ free_pages_nr = mali_mem_swap_free(&mem_bkend->swap_mem);
+ } else {
+ free_pages_nr = mali_mem_swap_cow_free(&mem_bkend->cow_mem);
+ }
+
+ return free_pages_nr;
+}
+
+mali_bool mali_mem_swap_in_page_node(struct mali_page_node *page_node)
+{
+ MALI_DEBUG_ASSERT(NULL != page_node);
+
+ page_node->swap_it->page = shmem_read_mapping_page(global_swap_space, page_node->swap_it->idx);
+
+ if (IS_ERR(page_node->swap_it->page)) {
+ MALI_DEBUG_PRINT_ERROR(("SWAP Mem: failed to swap in page with index: %d.\n", page_node->swap_it->idx));
+ return MALI_FALSE;
+ }
+
+ /* Ensure page is flushed from CPU caches. */
+ page_node->swap_it->dma_addr = dma_map_page(&mali_platform_device->dev, page_node->swap_it->page,
+ 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+ return MALI_TRUE;
+}
+
+int mali_mem_swap_alloc_pages(mali_mem_swap *swap_mem, u32 size, u32 *bkend_idx)
+{
+ size_t page_count = PAGE_ALIGN(size) / PAGE_SIZE;
+ struct mali_page_node *m_page;
+ long system_free_size;
+ u32 i, index;
+ mali_bool ret;
+
+ MALI_DEBUG_ASSERT(NULL != swap_mem);
+ MALI_DEBUG_ASSERT(NULL != bkend_idx);
+ MALI_DEBUG_ASSERT(page_count <= MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE);
+
+ if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
+ MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
+ size,
+ atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
+ mali_mem_os_allocator.allocation_limit));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ INIT_LIST_HEAD(&swap_mem->pages);
+ swap_mem->count = page_count;
+ index = mali_mem_swap_idx_range_alloc(page_count);
+
+ if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == index) {
+ MALI_PRINT_ERROR(("Mali Swap: Failed to allocate continuous index for swappable Mali memory."));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ for (i = 0; i < page_count; i++) {
+ m_page = _mali_mem_swap_page_node_allocate();
+
+ if (NULL == m_page) {
+ MALI_DEBUG_PRINT_ERROR(("SWAP Mem: Failed to allocate mali page node."));
+ swap_mem->count = i;
+
+ mali_mem_swap_free(swap_mem);
+ mali_mem_swap_idx_range_free(index + i, page_count - i);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ m_page->swap_it->idx = index + i;
+
+ ret = mali_mem_swap_in_page_node(m_page);
+
+ if (MALI_FALSE == ret) {
+ MALI_DEBUG_PRINT_ERROR(("SWAP Mem: Allocate new page from SHMEM file failed."));
+ _mali_mem_swap_page_node_free(m_page);
+ mali_mem_swap_idx_range_free(index + i + 1, page_count - i - 1);
+
+ swap_mem->count = i;
+ mali_mem_swap_free(swap_mem);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ list_add_tail(&m_page->list, &swap_mem->pages);
+ }
+
+ system_free_size = global_page_state(NR_FREE_PAGES) * PAGE_SIZE;
+
+ if ((system_free_size < mali_mem_swap_out_threshold_value)
+ && (mem_backend_swapped_pool_size > (mali_mem_swap_out_threshold_value >> 2))
+ && mali_utilization_enabled()) {
+ _mali_osk_wq_schedule_work(mali_mem_swap_out_workq);
+ }
+
+ *bkend_idx = index;
+ return 0;
+}
+
+void mali_mem_swap_mali_unmap(mali_mem_allocation *alloc)
+{
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ mali_session_memory_lock(session);
+ mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+ alloc->flags);
+ mali_session_memory_unlock(session);
+}
+
+
+/* Insert these pages from shmem to mali page table*/
+_mali_osk_errcode_t mali_mem_swap_mali_map(mali_mem_swap *swap_mem, struct mali_session_data *session, u32 vaddr, u32 props)
+{
+ struct mali_page_directory *pagedir = session->page_directory;
+ struct mali_page_node *m_page;
+ dma_addr_t phys;
+ u32 virt = vaddr;
+ u32 prop = props;
+
+ list_for_each_entry(m_page, &swap_mem->pages, list) {
+ MALI_DEBUG_ASSERT(NULL != m_page->swap_it->page);
+ phys = m_page->swap_it->dma_addr;
+
+ mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop);
+ virt += MALI_MMU_PAGE_SIZE;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+int mali_mem_swap_in_pages(struct mali_pp_job *job)
+{
+ u32 num_memory_cookies;
+ struct mali_session_data *session;
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_allocation *mali_alloc = NULL;
+ mali_mem_backend *mem_bkend = NULL;
+ struct mali_page_node *m_page;
+ mali_bool swap_in_success = MALI_TRUE;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+ session = mali_pp_job_get_session(job);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ for (i = 0; i < num_memory_cookies; i++) {
+
+ u32 mali_addr = mali_pp_job_get_memory_cookie(job, i);
+
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+ if (NULL == mali_vma_node) {
+ job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS;
+ swap_in_success = MALI_FALSE;
+ MALI_PRINT_ERROR(("SWAP Mem: failed to find mali_vma_node through Mali address: 0x%08x.\n", mali_addr));
+ continue;
+ }
+
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ MALI_DEBUG_ASSERT(NULL != mali_alloc);
+
+ if (MALI_MEM_SWAP != mali_alloc->type &&
+ MALI_MEM_COW != mali_alloc->type) {
+ continue;
+ }
+
+ /* Get backend memory & Map on GPU */
+ mutex_lock(&mali_idr_mutex);
+ mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+ /* We neednot hold backend's lock here, race safe.*/
+ if ((MALI_MEM_COW == mem_bkend->type) &&
+ (!(mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
+ continue;
+ }
+
+ mutex_lock(&mem_bkend->mutex);
+
+ /* When swap_in_success is MALI_FALSE, it means this job has memory backend that could not be swapped in,
+ * and it will be aborted in mali scheduler, so here, we just mark those memory cookies which
+ * should not be swapped out when delete job to invalide */
+ if (MALI_FALSE == swap_in_success) {
+ job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS;
+ mutex_unlock(&mem_bkend->mutex);
+ continue;
+ }
+
+ /* Before swap in, checking if this memory backend has been swapped in by the latest flushed jobs. */
+ ++mem_bkend->using_count;
+
+ if (1 < mem_bkend->using_count) {
+ MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN != (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags));
+ mutex_unlock(&mem_bkend->mutex);
+ continue;
+ }
+
+ if (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN != (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags)) {
+ mutex_unlock(&mem_bkend->mutex);
+ continue;
+ }
+
+
+ list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) {
+ if (MALI_FALSE == mali_mem_swap_in_page_node(m_page)) {
+ /* Don't have enough memory to swap in page, so release pages have already been swapped
+ * in and then mark this pp job to be fail. */
+ mali_mem_swap_unlock_partial_locked_mem_backend(mem_bkend, m_page);
+ swap_in_success = MALI_FALSE;
+ break;
+ }
+ }
+
+ if (swap_in_success) {
+#ifdef MALI_MEM_SWAP_TRACKING
+ mem_backend_swapped_unlock_size -= mem_bkend->size;
+#endif
+ _mali_osk_mutex_wait(session->memory_lock);
+ mali_mem_swap_mali_map(&mem_bkend->swap_mem, session, mali_alloc->mali_mapping.addr, mali_alloc->mali_mapping.properties);
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ /* Remove the unlock flag from mem backend flags, mark this backend has been swapped in. */
+ mem_bkend->flags &= ~(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN);
+ mutex_unlock(&mem_bkend->mutex);
+ } else {
+ --mem_bkend->using_count;
+ /* Marking that this backend is not swapped in, need not to be processed anymore. */
+ job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS;
+ mutex_unlock(&mem_bkend->mutex);
+ }
+ }
+
+ job->swap_status = swap_in_success ? MALI_SWAP_IN_SUCC : MALI_SWAP_IN_FAIL;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+int mali_mem_swap_out_pages(struct mali_pp_job *job)
+{
+ u32 num_memory_cookies;
+ struct mali_session_data *session;
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_allocation *mali_alloc = NULL;
+ mali_mem_backend *mem_bkend = NULL;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+ session = mali_pp_job_get_session(job);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+
+ for (i = 0; i < num_memory_cookies; i++) {
+ u32 mali_addr = mali_pp_job_get_memory_cookie(job, i);
+
+ if (MALI_SWAP_INVALIDATE_MALI_ADDRESS == mali_addr) {
+ continue;
+ }
+
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+
+ if (NULL == mali_vma_node) {
+ MALI_PRINT_ERROR(("SWAP Mem: failed to find mali_vma_node through Mali address: 0x%08x.\n", mali_addr));
+ continue;
+ }
+
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ MALI_DEBUG_ASSERT(NULL != mali_alloc);
+
+ if (MALI_MEM_SWAP != mali_alloc->type &&
+ MALI_MEM_COW != mali_alloc->type) {
+ continue;
+ }
+
+ mutex_lock(&mali_idr_mutex);
+ mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+ /* We neednot hold backend's lock here, race safe.*/
+ if ((MALI_MEM_COW == mem_bkend->type) &&
+ (!(mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
+ continue;
+ }
+
+ mutex_lock(&mem_bkend->mutex);
+
+ MALI_DEBUG_ASSERT(0 < mem_bkend->using_count);
+
+ /* Reducing the using_count of mem backend means less pp job are using this memory backend,
+ * if this count get to zero, it means no pp job is using it now, could put it to swap out list. */
+ --mem_bkend->using_count;
+
+ if (0 < mem_bkend->using_count) {
+ mutex_unlock(&mem_bkend->mutex);
+ continue;
+ }
+ mutex_unlock(&mem_bkend->mutex);
+
+ mali_memory_swap_list_backend_add(mem_bkend);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+int mali_mem_swap_allocate_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep)
+{
+ struct mali_page_node *m_page, *found_node = NULL;
+ struct page *found_page;
+ mali_mem_swap *swap = NULL;
+ mali_mem_cow *cow = NULL;
+ dma_addr_t dma_addr;
+ u32 i = 0;
+
+ if (MALI_MEM_SWAP == mem_bkend->type) {
+ swap = &mem_bkend->swap_mem;
+ list_for_each_entry(m_page, &swap->pages, list) {
+ if (i == offset) {
+ found_node = m_page;
+ break;
+ }
+ i++;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+ MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_SWAP_COWED == (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags));
+
+ cow = &mem_bkend->cow_mem;
+ list_for_each_entry(m_page, &cow->pages, list) {
+ if (i == offset) {
+ found_node = m_page;
+ break;
+ }
+ i++;
+ }
+ }
+
+ if (NULL == found_node) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ found_page = shmem_read_mapping_page(global_swap_space, found_node->swap_it->idx);
+
+ if (!IS_ERR(found_page)) {
+ lock_page(found_page);
+ dma_addr = dma_map_page(&mali_platform_device->dev, found_page,
+ 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+ dma_unmap_page(&mali_platform_device->dev, dma_addr,
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+ *pagep = found_page;
+ } else {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+int mali_mem_swap_cow_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep)
+{
+ struct mali_page_node *m_page, *found_node = NULL, *new_node = NULL;
+ mali_mem_cow *cow = NULL;
+ u32 i = 0;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+ MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED));
+ MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN == (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags));
+ MALI_DEBUG_ASSERT(!mali_memory_swap_backend_in_swapped_pool(mem_bkend));
+
+ cow = &mem_bkend->cow_mem;
+ list_for_each_entry(m_page, &cow->pages, list) {
+ if (i == offset) {
+ found_node = m_page;
+ break;
+ }
+ i++;
+ }
+
+ if (NULL == found_node) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ new_node = _mali_mem_swap_page_node_allocate();
+
+ if (NULL == new_node) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ new_node->swap_it->idx = mali_mem_swap_idx_alloc();
+
+ if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == new_node->swap_it->idx) {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW on demand.\n"));
+ kfree(new_node->swap_it);
+ kfree(new_node);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (MALI_FALSE == mali_mem_swap_in_page_node(new_node)) {
+ _mali_mem_swap_page_node_free(new_node);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* swap in found node for copy in kernel. */
+ if (MALI_FALSE == mali_mem_swap_in_page_node(found_node)) {
+ mali_mem_swap_out_page_node(new_node);
+ _mali_mem_swap_page_node_free(new_node);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ _mali_mem_cow_copy_page(found_node, new_node);
+
+ list_replace(&found_node->list, &new_node->list);
+
+ if (1 != _mali_page_node_get_ref_count(found_node)) {
+ atomic_add(1, &mem_bkend->mali_allocation->session->mali_mem_allocated_pages);
+ if (atomic_read(&mem_bkend->mali_allocation->session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > mem_bkend->mali_allocation->session->max_mali_mem_allocated_size) {
+ mem_bkend->mali_allocation->session->max_mali_mem_allocated_size = atomic_read(&mem_bkend->mali_allocation->session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+ mem_bkend->cow_mem.change_pages_nr++;
+ }
+
+ mali_mem_swap_out_page_node(found_node);
+ _mali_mem_swap_page_node_free(found_node);
+
+ /* When swap in the new page node, we have called dma_map_page for this page.\n */
+ dma_unmap_page(&mali_platform_device->dev, new_node->swap_it->dma_addr,
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+ lock_page(new_node->swap_it->page);
+
+ *pagep = new_node->swap_it->page;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+#ifdef MALI_MEM_SWAP_TRACKING
+void mali_mem_swap_tracking(u32 *swap_pool_size, u32 *unlock_size)
+{
+ *swap_pool_size = mem_backend_swapped_pool_size;
+ *unlock_size = mem_backend_swapped_unlock_size;
+}
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.h b/drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.h
new file mode 100644
index 000000000000..5810960e204a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_SWAP_ALLOC_H__
+#define __MALI_MEMORY_SWAP_ALLOC_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+#include "mali_memory_types.h"
+#include "mali_pp_job.h"
+
+/**
+ * Initialize memory swapping module.
+ */
+_mali_osk_errcode_t mali_mem_swap_init(void);
+
+void mali_mem_swap_term(void);
+
+/**
+ * Return global share memory file to other modules.
+ */
+struct file *mali_mem_swap_get_global_swap_file(void);
+
+/**
+ * Unlock the given memory backend and pages in it could be swapped out by kernel.
+ */
+void mali_mem_swap_unlock_single_mem_backend(mali_mem_backend *mem_bkend);
+
+/**
+ * Remove the given memory backend from global swap list.
+ */
+void mali_memory_swap_list_backend_delete(mali_mem_backend *mem_bkend);
+
+/**
+ * Add the given memory backend to global swap list.
+ */
+void mali_memory_swap_list_backend_add(mali_mem_backend *mem_bkend);
+
+/**
+ * Allocate 1 index from bitmap used as page index in global swap file.
+ */
+u32 mali_mem_swap_idx_alloc(void);
+
+void mali_mem_swap_idx_free(u32 idx);
+
+/**
+ * Allocate a new swap item without page index.
+ */
+struct mali_swap_item *mali_mem_swap_alloc_swap_item(void);
+
+/**
+ * Free a swap item, truncate the corresponding space in page cache and free index of page.
+ */
+void mali_mem_swap_free_swap_item(mali_swap_item *swap_item);
+
+/**
+ * Allocate a page node with swap item.
+ */
+struct mali_page_node *_mali_mem_swap_page_node_allocate(void);
+
+/**
+ * Reduce the reference count of given page node and if return 0, just free this page node.
+ */
+_mali_osk_errcode_t _mali_mem_swap_put_page_node(struct mali_page_node *m_page);
+
+void _mali_mem_swap_page_node_free(struct mali_page_node *m_page);
+
+/**
+ * Free a swappable memory backend.
+ */
+u32 mali_mem_swap_free(mali_mem_swap *swap_mem);
+
+/**
+ * Ummap and free.
+ */
+u32 mali_mem_swap_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped);
+
+/**
+ * Read in a page from global swap file with the pre-allcated page index.
+ */
+mali_bool mali_mem_swap_in_page_node(struct mali_page_node *page_node);
+
+int mali_mem_swap_alloc_pages(mali_mem_swap *swap_mem, u32 size, u32 *bkend_idx);
+
+_mali_osk_errcode_t mali_mem_swap_mali_map(mali_mem_swap *swap_mem, struct mali_session_data *session, u32 vaddr, u32 props);
+
+void mali_mem_swap_mali_unmap(mali_mem_allocation *alloc);
+
+/**
+ * When pp job created, we need swap in all of memory backend needed by this pp job.
+ */
+int mali_mem_swap_in_pages(struct mali_pp_job *job);
+
+/**
+ * Put all of memory backends used this pp job to the global swap list.
+ */
+int mali_mem_swap_out_pages(struct mali_pp_job *job);
+
+/**
+ * This will be called in page fault to process CPU read&write.
+ */
+int mali_mem_swap_allocate_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep) ;
+
+/**
+ * Used to process cow on demand for swappable memory backend.
+ */
+int mali_mem_swap_cow_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep);
+
+#ifdef MALI_MEM_SWAP_TRACKING
+void mali_mem_swap_tracking(u32 *swap_pool_size, u32 *unlock_size);
+#endif
+#endif /* __MALI_MEMORY_SWAP_ALLOC_H__ */
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_types.h b/drivers/gpu/arm/utgard/linux/mali_memory_types.h
new file mode 100644
index 000000000000..33db40929642
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_types.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_TYPES_H__
+#define __MALI_MEMORY_TYPES_H__
+
+#include <linux/mm.h>
+
+#if defined(CONFIG_MALI400_UMP)
+#include "ump_kernel_interface.h"
+#endif
+
+typedef u32 mali_address_t;
+
+typedef enum mali_mem_type {
+ MALI_MEM_OS,
+ MALI_MEM_EXTERNAL,
+ MALI_MEM_SWAP,
+ MALI_MEM_DMA_BUF,
+ MALI_MEM_UMP,
+ MALI_MEM_BLOCK,
+ MALI_MEM_COW,
+ MALI_MEM_SECURE,
+ MALI_MEM_TYPE_MAX,
+} mali_mem_type;
+
+typedef struct mali_block_item {
+ /* for block type, the block_phy is alway page size align
+ * so use low 12bit used for ref_cout.
+ */
+ unsigned long phy_addr;
+} mali_block_item;
+
+/**
+ * idx is used to locate the given page in the address space of swap file.
+ * ref_count is used to mark how many memory backends are using this item.
+ */
+typedef struct mali_swap_item {
+ u32 idx;
+ atomic_t ref_count;
+ struct page *page;
+ dma_addr_t dma_addr;
+} mali_swap_item;
+
+typedef enum mali_page_node_type {
+ MALI_PAGE_NODE_OS,
+ MALI_PAGE_NODE_BLOCK,
+ MALI_PAGE_NODE_SWAP,
+} mali_page_node_type;
+
+typedef struct mali_page_node {
+ struct list_head list;
+ union {
+ struct page *page;
+ mali_block_item *blk_it; /*pointer to block item*/
+ mali_swap_item *swap_it;
+ };
+
+ u32 type;
+} mali_page_node;
+
+typedef struct mali_mem_os_mem {
+ struct list_head pages;
+ u32 count;
+} mali_mem_os_mem;
+
+typedef struct mali_mem_dma_buf {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ struct mali_dma_buf_attachment *attachment;
+#endif
+} mali_mem_dma_buf;
+
+typedef struct mali_mem_external {
+ dma_addr_t phys;
+ u32 size;
+} mali_mem_external;
+
+typedef struct mali_mem_ump {
+#if defined(CONFIG_MALI400_UMP)
+ ump_dd_handle handle;
+#endif
+} mali_mem_ump;
+
+typedef struct block_allocator_allocation {
+ /* The list will be released in reverse order */
+ struct block_info *last_allocated;
+ u32 mapping_length;
+ struct block_allocator *info;
+} block_allocator_allocation;
+
+typedef struct mali_mem_block_mem {
+ struct list_head pfns;
+ u32 count;
+} mali_mem_block_mem;
+
+typedef struct mali_mem_virt_mali_mapping {
+ mali_address_t addr; /* Virtual Mali address */
+ u32 properties; /* MMU Permissions + cache, must match MMU HW */
+} mali_mem_virt_mali_mapping;
+
+typedef struct mali_mem_virt_cpu_mapping {
+ void __user *addr;
+ struct vm_area_struct *vma;
+} mali_mem_virt_cpu_mapping;
+
+#define MALI_MEM_ALLOCATION_VALID_MAGIC 0xdeda110c
+#define MALI_MEM_ALLOCATION_FREED_MAGIC 0x10101010
+
+typedef struct mali_mm_node {
+ /* MALI GPU vaddr start, use u32 for mmu only support 32bit address*/
+ uint32_t start; /* GPU vaddr */
+ uint32_t size; /* GPU allocation virtual size */
+ unsigned allocated : 1;
+} mali_mm_node;
+
+typedef struct mali_vma_node {
+ struct mali_mm_node vm_node;
+ struct rb_node vm_rb;
+} mali_vma_node;
+
+
+typedef struct mali_mem_allocation {
+ MALI_DEBUG_CODE(u32 magic);
+ mali_mem_type type; /**< Type of memory */
+ u32 flags; /**< Flags for this allocation */
+
+ struct mali_session_data *session; /**< Pointer to session that owns the allocation */
+
+ mali_mem_virt_cpu_mapping cpu_mapping; /**< CPU mapping */
+ mali_mem_virt_mali_mapping mali_mapping; /**< Mali mapping */
+
+ /* add for new memory system */
+ struct mali_vma_node mali_vma_node;
+ u32 vsize; /* virtual size*/
+ u32 psize; /* physical backend memory size*/
+ struct list_head list;
+ s32 backend_handle; /* idr for mem_backend */
+ _mali_osk_atomic_t mem_alloc_refcount;
+} mali_mem_allocation;
+
+struct mali_mem_os_allocator {
+ spinlock_t pool_lock;
+ struct list_head pool_pages;
+ size_t pool_count;
+
+ atomic_t allocated_pages;
+ size_t allocation_limit;
+
+ struct shrinker shrinker;
+ struct delayed_work timed_shrinker;
+ struct workqueue_struct *wq;
+};
+
+/* COW backend memory type */
+typedef struct mali_mem_cow {
+ struct list_head pages; /**< all pages for this cow backend allocation,
+ including new allocated pages for modified range*/
+ u32 count; /**< number of pages */
+ s32 change_pages_nr;
+} mali_mem_cow;
+
+typedef struct mali_mem_swap {
+ struct list_head pages;
+ u32 count;
+} mali_mem_swap;
+
+typedef struct mali_mem_secure {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ struct dma_buf *buf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+#endif
+ u32 count;
+} mali_mem_secure;
+
+#define MALI_MEM_BACKEND_FLAG_COWED (0x1) /* COW has happen on this backend */
+#define MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE (0x2) /* This is an COW backend, mapped as not allowed cpu to write */
+#define MALI_MEM_BACKEND_FLAG_SWAP_COWED (0x4) /* Mark the given backend is cowed from swappable memory. */
+/* Mark this backend is not swapped_in in MALI driver, and before using it,
+ * we should swap it in and set up corresponding page table. */
+#define MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN (0x8)
+#define MALI_MEM_BACKEND_FLAG_NOT_BINDED (0x1 << 5) /* this backend it not back with physical memory, used for defer bind */
+#define MALI_MEM_BACKEND_FLAG_BINDED (0x1 << 6) /* this backend it back with physical memory, used for defer bind */
+
+typedef struct mali_mem_backend {
+ mali_mem_type type; /**< Type of backend memory */
+ u32 flags; /**< Flags for this allocation */
+ u32 size;
+ /* Union selected by type. */
+ union {
+ mali_mem_os_mem os_mem; /**< MALI_MEM_OS */
+ mali_mem_external ext_mem; /**< MALI_MEM_EXTERNAL */
+ mali_mem_dma_buf dma_buf; /**< MALI_MEM_DMA_BUF */
+ mali_mem_ump ump_mem; /**< MALI_MEM_UMP */
+ mali_mem_block_mem block_mem; /**< MALI_MEM_BLOCK */
+ mali_mem_cow cow_mem;
+ mali_mem_swap swap_mem;
+ mali_mem_secure secure_mem;
+ };
+ mali_mem_allocation *mali_allocation;
+ struct mutex mutex;
+ mali_mem_type cow_type;
+
+ struct list_head list; /**< Used to link swappable memory backend to the global swappable list */
+ int using_count; /**< Mark how many PP jobs are using this memory backend */
+ u32 start_idx; /**< If the correspondign vma of this backend is linear, this value will be used to set vma->vm_pgoff */
+} mali_mem_backend;
+
+#define MALI_MEM_FLAG_MALI_GUARD_PAGE (_MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+#define MALI_MEM_FLAG_DONT_CPU_MAP (1 << 1)
+#define MALI_MEM_FLAG_CAN_RESIZE (_MALI_MEMORY_ALLOCATE_RESIZEABLE)
+#endif /* __MALI_MEMORY_TYPES__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_ump.c b/drivers/gpu/arm/utgard/linux/mali_memory_ump.c
new file mode 100644
index 000000000000..666d4b0fb1cd
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_ump.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_linux.h"
+#include "mali_memory.h"
+#include "ump_kernel_interface.h"
+
+static int mali_mem_ump_map(mali_mem_backend *mem_backend)
+{
+ ump_dd_handle ump_mem;
+ mali_mem_allocation *alloc;
+ struct mali_session_data *session;
+ u32 nr_blocks;
+ u32 i;
+ ump_dd_physical_block *ump_blocks;
+ struct mali_page_directory *pagedir;
+ u32 offset = 0;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);
+
+ alloc = mem_backend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ ump_mem = mem_backend->ump_mem.handle;
+ MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);
+
+ nr_blocks = ump_dd_phys_block_count_get(ump_mem);
+ if (nr_blocks == 0) {
+ MALI_DEBUG_PRINT(1, ("No block count\n"));
+ return -EINVAL;
+ }
+
+ ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks) * nr_blocks);
+ if (NULL == ump_blocks) {
+ return -ENOMEM;
+ }
+
+ if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks)) {
+ _mali_osk_free(ump_blocks);
+ return -EFAULT;
+ }
+
+ pagedir = session->page_directory;
+
+ mali_session_memory_lock(session);
+
+ err = mali_mem_mali_map_prepare(alloc);
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_DEBUG_PRINT(1, ("Mapping of UMP memory failed\n"));
+
+ _mali_osk_free(ump_blocks);
+ mali_session_memory_unlock(session);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_blocks; ++i) {
+ u32 virt = alloc->mali_vma_node.vm_node.start + offset;
+
+ MALI_DEBUG_PRINT(7, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
+
+ mali_mmu_pagedir_update(pagedir, virt, ump_blocks[i].addr,
+ ump_blocks[i].size, MALI_MMU_FLAGS_DEFAULT);
+
+ offset += ump_blocks[i].size;
+ }
+
+ if (alloc->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+ u32 virt = alloc->mali_vma_node.vm_node.start + offset;
+
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(6, ("Mapping in extra guard page\n"));
+
+ mali_mmu_pagedir_update(pagedir, virt, ump_blocks[0].addr, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+
+ offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+ mali_session_memory_unlock(session);
+ _mali_osk_free(ump_blocks);
+ return 0;
+}
+
+static void mali_mem_ump_unmap(mali_mem_allocation *alloc)
+{
+ struct mali_session_data *session;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+ mali_session_memory_lock(session);
+ mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+ alloc->flags);
+ mali_session_memory_unlock(session);
+}
+
+int mali_mem_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32 secure_id, u32 flags)
+{
+ ump_dd_handle ump_mem;
+ int ret;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+ secure_id, alloc->mali_vma_node.vm_node.start, alloc->mali_vma_node.vm_node.size));
+
+ ump_mem = ump_dd_handle_create_from_secure_id(secure_id);
+ if (UMP_DD_HANDLE_INVALID == ump_mem) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ alloc->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
+ if (flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+ alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE;
+ }
+
+ mem_backend->ump_mem.handle = ump_mem;
+
+ ret = mali_mem_ump_map(mem_backend);
+ if (0 != ret) {
+ ump_dd_reference_release(ump_mem);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ MALI_DEBUG_PRINT(3, ("Returning from UMP bind\n"));
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_unbind_ump_buf(mali_mem_backend *mem_backend)
+{
+ ump_dd_handle ump_mem;
+ mali_mem_allocation *alloc;
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);
+ ump_mem = mem_backend->ump_mem.handle;
+ MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);
+
+ alloc = mem_backend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ mali_mem_ump_unmap(alloc);
+ ump_dd_reference_release(ump_mem);
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_ump.h b/drivers/gpu/arm/utgard/linux/mali_memory_ump.h
new file mode 100644
index 000000000000..c314c8dcbf1c
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_ump.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_UMP_BUF_H__
+#define __MALI_MEMORY_UMP_BUF_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+#include "mali_memory.h"
+
+int mali_mem_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32 secure_id, u32 flags);
+void mali_mem_unbind_ump_buf(mali_mem_backend *mem_backend);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_MEMORY_DMA_BUF_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_util.c b/drivers/gpu/arm/utgard/linux/mali_memory_util.c
new file mode 100644
index 000000000000..8e13e923c3fb
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_util.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_linux.h"
+#include "mali_scheduler.h"
+
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#include "mali_memory_secure.h"
+#endif
+#if defined(CONFIG_MALI400_UMP)
+#include "mali_memory_ump.h"
+#endif
+#include "mali_memory_external.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_memory_swap_alloc.h"
+
+
+
+/**
+*function @_mali_free_allocation_mem - free a memory allocation
+*/
+static u32 _mali_free_allocation_mem(mali_mem_allocation *mali_alloc)
+{
+ mali_mem_backend *mem_bkend = NULL;
+ u32 free_pages_nr = 0;
+
+ struct mali_session_data *session = mali_alloc->session;
+ MALI_DEBUG_PRINT(4, (" _mali_free_allocation_mem, psize =0x%x! \n", mali_alloc->psize));
+ if (0 == mali_alloc->psize)
+ goto out;
+
+ /* Get backend memory & Map on CPU */
+ mutex_lock(&mali_idr_mutex);
+ mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+ switch (mem_bkend->type) {
+ case MALI_MEM_OS:
+ free_pages_nr = mali_mem_os_release(mem_bkend);
+ atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+ break;
+ case MALI_MEM_UMP:
+#if defined(CONFIG_MALI400_UMP)
+ mali_mem_unbind_ump_buf(mem_bkend);
+ atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]);
+#else
+ MALI_DEBUG_PRINT(1, ("UMP not supported\n"));
+#endif
+ break;
+ case MALI_MEM_DMA_BUF:
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ mali_mem_unbind_dma_buf(mem_bkend);
+ atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]);
+#else
+ MALI_DEBUG_PRINT(1, ("DMA not supported\n"));
+#endif
+ break;
+ case MALI_MEM_EXTERNAL:
+ mali_mem_unbind_ext_buf(mem_bkend);
+ atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]);
+ break;
+
+ case MALI_MEM_BLOCK:
+ free_pages_nr = mali_mem_block_release(mem_bkend);
+ atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+ break;
+
+ case MALI_MEM_COW:
+ if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) {
+ free_pages_nr = mali_mem_swap_release(mem_bkend, MALI_TRUE);
+ } else {
+ free_pages_nr = mali_mem_cow_release(mem_bkend, MALI_TRUE);
+ }
+ atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+ break;
+ case MALI_MEM_SWAP:
+ free_pages_nr = mali_mem_swap_release(mem_bkend, MALI_TRUE);
+ atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+ atomic_sub(free_pages_nr, &session->mali_mem_array[mem_bkend->type]);
+ break;
+ case MALI_MEM_SECURE:
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ free_pages_nr = mali_mem_secure_release(mem_bkend);
+ atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+#else
+ MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory\n"));
+#endif
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("mem type %d is not in the mali_mem_type enum.\n", mem_bkend->type));
+ break;
+ }
+
+ /*Remove backend memory idex */
+ mutex_lock(&mali_idr_mutex);
+ idr_remove(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ kfree(mem_bkend);
+out:
+ /* remove memory allocation */
+ mali_vma_offset_remove(&session->allocation_mgr, &mali_alloc->mali_vma_node);
+ mali_mem_allocation_struct_destory(mali_alloc);
+ return free_pages_nr;
+}
+
+/**
+* ref_count for allocation
+*/
+u32 mali_allocation_unref(struct mali_mem_allocation **alloc)
+{
+ u32 free_pages_nr = 0;
+ mali_mem_allocation *mali_alloc = *alloc;
+ *alloc = NULL;
+ if (0 == _mali_osk_atomic_dec_return(&mali_alloc->mem_alloc_refcount)) {
+ free_pages_nr = _mali_free_allocation_mem(mali_alloc);
+ }
+ return free_pages_nr;
+}
+
+void mali_allocation_ref(struct mali_mem_allocation *alloc)
+{
+ _mali_osk_atomic_inc(&alloc->mem_alloc_refcount);
+}
+
+void mali_free_session_allocations(struct mali_session_data *session)
+{
+ struct mali_mem_allocation *entry, *next;
+
+ MALI_DEBUG_PRINT(4, (" mali_free_session_allocations! \n"));
+
+ list_for_each_entry_safe(entry, next, &session->allocation_mgr.head, list) {
+ mali_allocation_unref(&entry);
+ }
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_util.h b/drivers/gpu/arm/utgard/linux/mali_memory_util.h
new file mode 100644
index 000000000000..33ac99509740
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_util.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_UTIL_H__
+#define __MALI_MEMORY_UTIL_H__
+
+u32 mali_allocation_unref(struct mali_mem_allocation **alloc);
+
+void mali_allocation_ref(struct mali_mem_allocation *alloc);
+
+void mali_free_session_allocations(struct mali_session_data *session);
+
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_virtual.c b/drivers/gpu/arm/utgard/linux/mali_memory_virtual.c
new file mode 100644
index 000000000000..0b31e3a23432
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_virtual.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_linux.h"
+#include "mali_scheduler.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+
+
+/**
+*internal helper to link node into the rb-tree
+*/
+static inline void _mali_vma_offset_add_rb(struct mali_allocation_manager *mgr,
+ struct mali_vma_node *node)
+{
+ struct rb_node **iter = &mgr->allocation_mgr_rb.rb_node;
+ struct rb_node *parent = NULL;
+ struct mali_vma_node *iter_node;
+
+ while (likely(*iter)) {
+ parent = *iter;
+ iter_node = rb_entry(*iter, struct mali_vma_node, vm_rb);
+
+ if (node->vm_node.start < iter_node->vm_node.start)
+ iter = &(*iter)->rb_left;
+ else if (node->vm_node.start > iter_node->vm_node.start)
+ iter = &(*iter)->rb_right;
+ else
+ MALI_DEBUG_ASSERT(0);
+ }
+
+ rb_link_node(&node->vm_rb, parent, iter);
+ rb_insert_color(&node->vm_rb, &mgr->allocation_mgr_rb);
+}
+
+/**
+ * mali_vma_offset_add() - Add offset node to RB Tree
+ */
+int mali_vma_offset_add(struct mali_allocation_manager *mgr,
+ struct mali_vma_node *node)
+{
+ int ret = 0;
+ write_lock(&mgr->vm_lock);
+
+ if (node->vm_node.allocated) {
+ goto out;
+ }
+
+ _mali_vma_offset_add_rb(mgr, node);
+ /* set to allocated */
+ node->vm_node.allocated = 1;
+
+out:
+ write_unlock(&mgr->vm_lock);
+ return ret;
+}
+
+/**
+ * mali_vma_offset_remove() - Remove offset node from RB tree
+ */
+void mali_vma_offset_remove(struct mali_allocation_manager *mgr,
+ struct mali_vma_node *node)
+{
+ write_lock(&mgr->vm_lock);
+
+ if (node->vm_node.allocated) {
+ rb_erase(&node->vm_rb, &mgr->allocation_mgr_rb);
+ memset(&node->vm_node, 0, sizeof(node->vm_node));
+ }
+ write_unlock(&mgr->vm_lock);
+}
+
+/**
+* mali_vma_offset_search - Search the node in RB tree
+*/
+struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr,
+ unsigned long start, unsigned long pages)
+{
+ struct mali_vma_node *node, *best;
+ struct rb_node *iter;
+ unsigned long offset;
+ read_lock(&mgr->vm_lock);
+
+ iter = mgr->allocation_mgr_rb.rb_node;
+ best = NULL;
+
+ while (likely(iter)) {
+ node = rb_entry(iter, struct mali_vma_node, vm_rb);
+ offset = node->vm_node.start;
+ if (start >= offset) {
+ iter = iter->rb_right;
+ best = node;
+ if (start == offset)
+ break;
+ } else {
+ iter = iter->rb_left;
+ }
+ }
+
+ if (best) {
+ offset = best->vm_node.start + best->vm_node.size;
+ if (offset <= start + pages)
+ best = NULL;
+ }
+ read_unlock(&mgr->vm_lock);
+
+ return best;
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_virtual.h b/drivers/gpu/arm/utgard/linux/mali_memory_virtual.h
new file mode 100644
index 000000000000..fd03ed9f2bbb
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_virtual.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __MALI_GPU_VMEM_H__
+#define __MALI_GPU_VMEM_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "mali_memory_types.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_memory_manager.h"
+
+
+
+int mali_vma_offset_add(struct mali_allocation_manager *mgr,
+ struct mali_vma_node *node);
+
+void mali_vma_offset_remove(struct mali_allocation_manager *mgr,
+ struct mali_vma_node *node);
+
+struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr,
+ unsigned long start, unsigned long pages);
+
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_atomics.c b/drivers/gpu/arm/utgard/linux/mali_osk_atomics.c
new file mode 100644
index 000000000000..5bc0e52ebe23
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_atomics.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2010, 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_atomics.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <asm/atomic.h>
+#include "mali_kernel_common.h"
+
+void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom)
+{
+ atomic_dec((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom)
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom)
+{
+ atomic_inc((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom)
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val)
+{
+ MALI_DEBUG_ASSERT_POINTER(atom);
+ atomic_set((atomic_t *)&atom->u.val, val);
+}
+
+u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom)
+{
+ return atomic_read((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_term(_mali_osk_atomic_t *atom)
+{
+ MALI_IGNORE(atom);
+}
+
+u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val)
+{
+ return atomic_xchg((atomic_t *)&atom->u.val, val);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_bitmap.c b/drivers/gpu/arm/utgard/linux/mali_osk_bitmap.c
new file mode 100644
index 000000000000..fb9ccd2ad1e2
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_bitmap.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2010, 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitmap.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/bitmap.h>
+#include <linux/vmalloc.h>
+#include "common/mali_kernel_common.h"
+#include "mali_osk_types.h"
+#include "mali_osk.h"
+
+u32 _mali_osk_bitmap_alloc(struct _mali_osk_bitmap *bitmap)
+{
+ u32 obj;
+
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+
+ _mali_osk_spinlock_lock(bitmap->lock);
+
+ obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->reserve);
+
+ if (obj < bitmap->max) {
+ set_bit(obj, bitmap->table);
+ } else {
+ obj = -1;
+ }
+
+ if (obj != -1)
+ --bitmap->avail;
+ _mali_osk_spinlock_unlock(bitmap->lock);
+
+ return obj;
+}
+
+void _mali_osk_bitmap_free(struct _mali_osk_bitmap *bitmap, u32 obj)
+{
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+
+ _mali_osk_bitmap_free_range(bitmap, obj, 1);
+}
+
+u32 _mali_osk_bitmap_alloc_range(struct _mali_osk_bitmap *bitmap, int cnt)
+{
+ u32 obj;
+
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+
+ if (0 >= cnt) {
+ return -1;
+ }
+
+ if (1 == cnt) {
+ return _mali_osk_bitmap_alloc(bitmap);
+ }
+
+ _mali_osk_spinlock_lock(bitmap->lock);
+ obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ bitmap->last, cnt, 0);
+
+ if (obj >= bitmap->max) {
+ obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ bitmap->reserve, cnt, 0);
+ }
+
+ if (obj < bitmap->max) {
+ bitmap_set(bitmap->table, obj, cnt);
+
+ bitmap->last = (obj + cnt);
+ if (bitmap->last >= bitmap->max) {
+ bitmap->last = bitmap->reserve;
+ }
+ } else {
+ obj = -1;
+ }
+
+ if (obj != -1) {
+ bitmap->avail -= cnt;
+ }
+
+ _mali_osk_spinlock_unlock(bitmap->lock);
+
+ return obj;
+}
+
+u32 _mali_osk_bitmap_avail(struct _mali_osk_bitmap *bitmap)
+{
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+
+ return bitmap->avail;
+}
+
+void _mali_osk_bitmap_free_range(struct _mali_osk_bitmap *bitmap, u32 obj, int cnt)
+{
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+
+ _mali_osk_spinlock_lock(bitmap->lock);
+ bitmap_clear(bitmap->table, obj, cnt);
+ bitmap->last = min(bitmap->last, obj);
+
+ bitmap->avail += cnt;
+ _mali_osk_spinlock_unlock(bitmap->lock);
+}
+
+int _mali_osk_bitmap_init(struct _mali_osk_bitmap *bitmap, u32 num, u32 reserve)
+{
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+ MALI_DEBUG_ASSERT(reserve <= num);
+
+ bitmap->reserve = reserve;
+ bitmap->last = reserve;
+ bitmap->max = num;
+ bitmap->avail = num - reserve;
+ bitmap->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_UNORDERED, _MALI_OSK_LOCK_ORDER_FIRST);
+ if (!bitmap->lock) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
+ sizeof(long), GFP_KERNEL);
+ if (!bitmap->table) {
+ _mali_osk_spinlock_term(bitmap->lock);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_bitmap_term(struct _mali_osk_bitmap *bitmap)
+{
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+
+ if (NULL != bitmap->lock) {
+ _mali_osk_spinlock_term(bitmap->lock);
+ }
+
+ if (NULL != bitmap->table) {
+ kfree(bitmap->table);
+ }
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_irq.c b/drivers/gpu/arm/utgard/linux/mali_osk_irq.c
new file mode 100644
index 000000000000..5c8b9ceab9ab
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_irq.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_irq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+typedef struct _mali_osk_irq_t_struct {
+ u32 irqnum;
+ void *data;
+ _mali_osk_irq_uhandler_t uhandler;
+} mali_osk_irq_object_t;
+
+typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *);
+static irqreturn_t irq_handler_upper_half(int port_name, void *dev_id); /* , struct pt_regs *regs*/
+
+#if defined(DEBUG)
+
+struct test_interrupt_data {
+ _mali_osk_irq_ack_t ack_func;
+ void *probe_data;
+ mali_bool interrupt_received;
+ wait_queue_head_t wq;
+};
+
+static irqreturn_t test_interrupt_upper_half(int port_name, void *dev_id)
+{
+ irqreturn_t ret = IRQ_NONE;
+ struct test_interrupt_data *data = (struct test_interrupt_data *)dev_id;
+
+ if (_MALI_OSK_ERR_OK == data->ack_func(data->probe_data)) {
+ data->interrupt_received = MALI_TRUE;
+ wake_up(&data->wq);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static _mali_osk_errcode_t test_interrupt(u32 irqnum,
+ _mali_osk_irq_trigger_t trigger_func,
+ _mali_osk_irq_ack_t ack_func,
+ void *probe_data,
+ const char *description)
+{
+ unsigned long irq_flags = 0;
+ struct test_interrupt_data data = {
+ .ack_func = ack_func,
+ .probe_data = probe_data,
+ .interrupt_received = MALI_FALSE,
+ };
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ irq_flags |= IRQF_SHARED;
+#endif /* defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+ if (0 != request_irq(irqnum, test_interrupt_upper_half, irq_flags, description, &data)) {
+ MALI_DEBUG_PRINT(2, ("Unable to install test IRQ handler for core '%s'\n", description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ init_waitqueue_head(&data.wq);
+
+ trigger_func(probe_data);
+ wait_event_timeout(data.wq, data.interrupt_received, 100);
+
+ free_irq(irqnum, &data);
+
+ if (data.interrupt_received) {
+ MALI_DEBUG_PRINT(3, ("%s: Interrupt test OK\n", description));
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("%s: Failed interrupt test on %u\n", description, irqnum));
+ return _MALI_OSK_ERR_FAULT;
+ }
+}
+
+#endif /* defined(DEBUG) */
+
+_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description)
+{
+ mali_osk_irq_object_t *irq_object;
+ unsigned long irq_flags = 0;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ irq_flags |= IRQF_SHARED;
+#endif /* defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+ irq_object = kmalloc(sizeof(mali_osk_irq_object_t), GFP_KERNEL);
+ if (NULL == irq_object) {
+ return NULL;
+ }
+
+ if (-1 == irqnum) {
+ /* Probe for IRQ */
+ if ((NULL != trigger_func) && (NULL != ack_func)) {
+ unsigned long probe_count = 3;
+ _mali_osk_errcode_t err;
+ int irq;
+
+ MALI_DEBUG_PRINT(2, ("Probing for irq\n"));
+
+ do {
+ unsigned long mask;
+
+ mask = probe_irq_on();
+ trigger_func(probe_data);
+
+ _mali_osk_time_ubusydelay(5);
+
+ irq = probe_irq_off(mask);
+ err = ack_func(probe_data);
+ } while (irq < 0 && (err == _MALI_OSK_ERR_OK) && probe_count--);
+
+ if (irq < 0 || (_MALI_OSK_ERR_OK != err)) irqnum = -1;
+ else irqnum = irq;
+ } else irqnum = -1; /* no probe functions, fault */
+
+ if (-1 != irqnum) {
+ /* found an irq */
+ MALI_DEBUG_PRINT(2, ("Found irq %d\n", irqnum));
+ } else {
+ MALI_DEBUG_PRINT(2, ("Probe for irq failed\n"));
+ }
+ }
+
+ irq_object->irqnum = irqnum;
+ irq_object->uhandler = uhandler;
+ irq_object->data = int_data;
+
+ if (-1 == irqnum) {
+ MALI_DEBUG_PRINT(2, ("No IRQ for core '%s' found during probe\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+
+#if defined(DEBUG)
+ /* Verify that the configured interrupt settings are working */
+ if (_MALI_OSK_ERR_OK != test_interrupt(irqnum, trigger_func, ack_func, probe_data, description)) {
+ MALI_DEBUG_PRINT(2, ("Test of IRQ(%d) handler for core '%s' failed\n", irqnum, description));
+ kfree(irq_object);
+ return NULL;
+ }
+#endif
+
+ if (0 != request_irq(irqnum, irq_handler_upper_half, irq_flags, description, irq_object)) {
+ MALI_DEBUG_PRINT(2, ("Unable to install IRQ handler for core '%s'\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+
+ return irq_object;
+}
+
+void _mali_osk_irq_term(_mali_osk_irq_t *irq)
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+ free_irq(irq_object->irqnum, irq_object);
+ kfree(irq_object);
+}
+
+
+/** This function is called directly in interrupt context from the OS just after
+ * the CPU get the hw-irq from mali, or other devices on the same IRQ-channel.
+ * It is registered one of these function for each mali core. When an interrupt
+ * arrives this function will be called equal times as registered mali cores.
+ * That means that we only check one mali core in one function call, and the
+ * core we check for each turn is given by the \a dev_id variable.
+ * If we detect an pending interrupt on the given core, we mask the interrupt
+ * out by settging the core's IRQ_MASK register to zero.
+ * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority
+ * work queue job.
+ */
+static irqreturn_t irq_handler_upper_half(int port_name, void *dev_id) /* , struct pt_regs *regs*/
+{
+ irqreturn_t ret = IRQ_NONE;
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id;
+
+ if (_MALI_OSK_ERR_OK == irq_object->uhandler(irq_object->data)) {
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_locks.c b/drivers/gpu/arm/utgard/linux/mali_osk_locks.c
new file mode 100644
index 000000000000..ed5f0b0da7cb
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_locks.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk_locks.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+
+#ifdef DEBUG
+#ifdef LOCK_ORDER_CHECKING
+static DEFINE_SPINLOCK(lock_tracking_lock);
+static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock, uint32_t tid);
+static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid);
+static const char *const lock_order_to_string(_mali_osk_lock_order_t order);
+#endif /* LOCK_ORDER_CHECKING */
+
+void _mali_osk_locks_debug_init(struct _mali_osk_lock_debug_s *checker, _mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+{
+ checker->orig_flags = flags;
+ checker->owner = 0;
+
+#ifdef LOCK_ORDER_CHECKING
+ checker->order = order;
+ checker->next = NULL;
+#endif
+}
+
+void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker)
+{
+ checker->owner = _mali_osk_get_tid();
+
+#ifdef LOCK_ORDER_CHECKING
+ if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) {
+ if (!add_lock_to_log_and_check(checker, _mali_osk_get_tid())) {
+ printk(KERN_ERR "%d: ERROR lock %p taken while holding a lock of a higher order.\n",
+ _mali_osk_get_tid(), checker);
+ dump_stack();
+ }
+ }
+#endif
+}
+
+void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker)
+{
+
+#ifdef LOCK_ORDER_CHECKING
+ if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) {
+ remove_lock_from_log(checker, _mali_osk_get_tid());
+ }
+#endif
+ checker->owner = 0;
+}
+
+
+#ifdef LOCK_ORDER_CHECKING
+/* Lock order checking
+ * -------------------
+ *
+ * To assure that lock ordering scheme defined by _mali_osk_lock_order_t is strictly adhered to, the
+ * following function will, together with a linked list and some extra members in _mali_osk_lock_debug_s,
+ * make sure that a lock that is taken has a higher order than the current highest-order lock a
+ * thread holds.
+ *
+ * This is done in the following manner:
+ * - A linked list keeps track of locks held by a thread.
+ * - A `next' pointer is added to each lock. This is used to chain the locks together.
+ * - When taking a lock, the `add_lock_to_log_and_check' makes sure that taking
+ * the given lock is legal. It will follow the linked list to find the last
+ * lock taken by this thread. If the last lock's order was lower than the
+ * lock that is to be taken, it appends the new lock to the list and returns
+ * true, if not, it return false. This return value is assert()'ed on in
+ * _mali_osk_lock_wait().
+ */
+
+static struct _mali_osk_lock_debug_s *lock_lookup_list;
+
+static void dump_lock_tracking_list(void)
+{
+ struct _mali_osk_lock_debug_s *l;
+ u32 n = 1;
+
+ /* print list for debugging purposes */
+ l = lock_lookup_list;
+
+ while (NULL != l) {
+ printk(" [lock: %p, tid_owner: %d, order: %d] ->", l, l->owner, l->order);
+ l = l->next;
+ MALI_DEBUG_ASSERT(n++ < 100);
+ }
+ printk(" NULL\n");
+}
+
+static int tracking_list_length(void)
+{
+ struct _mali_osk_lock_debug_s *l;
+ u32 n = 0;
+ l = lock_lookup_list;
+
+ while (NULL != l) {
+ l = l->next;
+ n++;
+ MALI_DEBUG_ASSERT(n < 100);
+ }
+ return n;
+}
+
+static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock, uint32_t tid)
+{
+ mali_bool ret = MALI_FALSE;
+ _mali_osk_lock_order_t highest_order_for_tid = _MALI_OSK_LOCK_ORDER_FIRST;
+ struct _mali_osk_lock_debug_s *highest_order_lock = (struct _mali_osk_lock_debug_s *)0xbeefbabe;
+ struct _mali_osk_lock_debug_s *l;
+ unsigned long local_lock_flag;
+ u32 len;
+
+ spin_lock_irqsave(&lock_tracking_lock, local_lock_flag);
+ len = tracking_list_length();
+
+ l = lock_lookup_list;
+ if (NULL == l) { /* This is the first lock taken by this thread -- record and return true */
+ lock_lookup_list = lock;
+ spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+ return MALI_TRUE;
+ } else {
+ /* Traverse the locks taken and find the lock of the highest order.
+ * Since several threads may hold locks, each lock's owner must be
+ * checked so that locks not owned by this thread can be ignored. */
+ for (;;) {
+ MALI_DEBUG_ASSERT_POINTER(l);
+ if (tid == l->owner && l->order >= highest_order_for_tid) {
+ highest_order_for_tid = l->order;
+ highest_order_lock = l;
+ }
+
+ if (NULL != l->next) {
+ l = l->next;
+ } else {
+ break;
+ }
+ }
+
+ l->next = lock;
+ l->next = NULL;
+ }
+
+ /* We have now found the highest order lock currently held by this thread and can see if it is
+ * legal to take the requested lock. */
+ ret = highest_order_for_tid < lock->order;
+
+ if (!ret) {
+ printk(KERN_ERR "Took lock of order %d (%s) while holding lock of order %d (%s)\n",
+ lock->order, lock_order_to_string(lock->order),
+ highest_order_for_tid, lock_order_to_string(highest_order_for_tid));
+ dump_lock_tracking_list();
+ }
+
+ if (len + 1 != tracking_list_length()) {
+ printk(KERN_ERR "************ lock: %p\n", lock);
+ printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length());
+ dump_lock_tracking_list();
+ MALI_DEBUG_ASSERT_POINTER(NULL);
+ }
+
+ spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+ return ret;
+}
+
+static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid)
+{
+ struct _mali_osk_lock_debug_s *curr;
+ struct _mali_osk_lock_debug_s *prev = NULL;
+ unsigned long local_lock_flag;
+ u32 len;
+ u32 n = 0;
+
+ spin_lock_irqsave(&lock_tracking_lock, local_lock_flag);
+ len = tracking_list_length();
+ curr = lock_lookup_list;
+
+ if (NULL == curr) {
+ printk(KERN_ERR "Error: Lock tracking list was empty on call to remove_lock_from_log\n");
+ dump_lock_tracking_list();
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(curr);
+
+
+ while (lock != curr) {
+ prev = curr;
+
+ MALI_DEBUG_ASSERT_POINTER(curr);
+ curr = curr->next;
+ MALI_DEBUG_ASSERT(n++ < 100);
+ }
+
+ if (NULL == prev) {
+ lock_lookup_list = curr->next;
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(curr);
+ MALI_DEBUG_ASSERT_POINTER(prev);
+ prev->next = curr->next;
+ }
+
+ lock->next = NULL;
+
+ if (len - 1 != tracking_list_length()) {
+ printk(KERN_ERR "************ lock: %p\n", lock);
+ printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length());
+ dump_lock_tracking_list();
+ MALI_DEBUG_ASSERT_POINTER(NULL);
+ }
+
+ spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+}
+
+static const char *const lock_order_to_string(_mali_osk_lock_order_t order)
+{
+ switch (order) {
+ case _MALI_OSK_LOCK_ORDER_SESSIONS:
+ return "_MALI_OSK_LOCK_ORDER_SESSIONS";
+ break;
+ case _MALI_OSK_LOCK_ORDER_MEM_SESSION:
+ return "_MALI_OSK_LOCK_ORDER_MEM_SESSION";
+ break;
+ case _MALI_OSK_LOCK_ORDER_MEM_INFO:
+ return "_MALI_OSK_LOCK_ORDER_MEM_INFO";
+ break;
+ case _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE:
+ return "_MALI_OSK_LOCK_ORDER_MEM_PT_CACHE";
+ break;
+ case _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP:
+ return "_MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP";
+ break;
+ case _MALI_OSK_LOCK_ORDER_PM_EXECUTION:
+ return "_MALI_OSK_LOCK_ORDER_PM_EXECUTION";
+ break;
+ case _MALI_OSK_LOCK_ORDER_EXECUTOR:
+ return "_MALI_OSK_LOCK_ORDER_EXECUTOR";
+ break;
+ case _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM:
+ return "_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM";
+ break;
+ case _MALI_OSK_LOCK_ORDER_SCHEDULER:
+ return "_MALI_OSK_LOCK_ORDER_SCHEDULER";
+ break;
+ case _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED:
+ return "_MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED";
+ break;
+ case _MALI_OSK_LOCK_ORDER_DMA_COMMAND:
+ return "_MALI_OSK_LOCK_ORDER_DMA_COMMAND";
+ break;
+ case _MALI_OSK_LOCK_ORDER_PROFILING:
+ return "_MALI_OSK_LOCK_ORDER_PROFILING";
+ break;
+ case _MALI_OSK_LOCK_ORDER_L2:
+ return "_MALI_OSK_LOCK_ORDER_L2";
+ break;
+ case _MALI_OSK_LOCK_ORDER_L2_COMMAND:
+ return "_MALI_OSK_LOCK_ORDER_L2_COMMAND";
+ break;
+ case _MALI_OSK_LOCK_ORDER_UTILIZATION:
+ return "_MALI_OSK_LOCK_ORDER_UTILIZATION";
+ break;
+ case _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS:
+ return "_MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS";
+ break;
+ case _MALI_OSK_LOCK_ORDER_PM_STATE:
+ return "_MALI_OSK_LOCK_ORDER_PM_STATE";
+ break;
+ default:
+ return "<UNKNOWN_LOCK_ORDER>";
+ }
+}
+#endif /* LOCK_ORDER_CHECKING */
+#endif /* DEBUG */
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_locks.h b/drivers/gpu/arm/utgard/linux/mali_osk_locks.h
new file mode 100644
index 000000000000..6fd5af95285b
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_locks.h
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.h
+ * Defines OS abstraction of lock and mutex
+ */
+#ifndef _MALI_OSK_LOCKS_H
+#define _MALI_OSK_LOCKS_H
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+
+#include <linux/slab.h>
+
+#include "mali_osk_types.h"
+
+#ifdef _cplusplus
+extern "C" {
+#endif
+
+ /* When DEBUG is enabled, this struct will be used to track owner, mode and order checking */
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s {
+ u32 owner;
+ _mali_osk_lock_flags_t orig_flags;
+ _mali_osk_lock_order_t order;
+ struct _mali_osk_lock_debug_s *next;
+ };
+#endif
+
+ /* Anstraction of spinlock_t */
+ struct _mali_osk_spinlock_s {
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s checker;
+#endif
+ spinlock_t spinlock;
+ };
+
+ /* Abstration of spinlock_t and lock flag which is used to store register's state before locking */
+ struct _mali_osk_spinlock_irq_s {
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s checker;
+#endif
+
+ spinlock_t spinlock;
+ unsigned long flags;
+ };
+
+ /* Abstraction of rw_semaphore in OS */
+ struct _mali_osk_mutex_rw_s {
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s checker;
+ _mali_osk_lock_mode_t mode;
+#endif
+
+ struct rw_semaphore rw_sema;
+ };
+
+ /* Mutex and mutex_interruptible functions share the same osk mutex struct */
+ struct _mali_osk_mutex_s {
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s checker;
+#endif
+ struct mutex mutex;
+ };
+
+#ifdef DEBUG
+ /** @brief _mali_osk_locks_debug_init/add/remove() functions are declared when DEBUG is enabled and
+ * defined in file mali_osk_locks.c. When LOCK_ORDER_CHECKING is enabled, calling these functions when we
+ * init/lock/unlock a lock/mutex, we could track lock order of a given tid. */
+ void _mali_osk_locks_debug_init(struct _mali_osk_lock_debug_s *checker, _mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order);
+ void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker);
+ void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker);
+
+ /** @brief This function can return a given lock's owner when DEBUG is enabled. */
+ static inline u32 _mali_osk_lock_get_owner(struct _mali_osk_lock_debug_s *lock)
+ {
+ return lock->owner;
+ }
+#else
+#define _mali_osk_locks_debug_init(x, y, z) do {} while (0)
+#define _mali_osk_locks_debug_add(x) do {} while (0)
+#define _mali_osk_locks_debug_remove(x) do {} while (0)
+#endif
+
+ /** @brief Before use _mali_osk_spin_lock, init function should be used to allocate memory and initial spinlock*/
+ static inline _mali_osk_spinlock_t *_mali_osk_spinlock_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+ {
+ _mali_osk_spinlock_t *lock = NULL;
+
+ lock = kmalloc(sizeof(_mali_osk_spinlock_t), GFP_KERNEL);
+ if (NULL == lock) {
+ return NULL;
+ }
+ spin_lock_init(&lock->spinlock);
+ _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+ return lock;
+ }
+
+ /** @brief Lock a spinlock */
+ static inline void _mali_osk_spinlock_lock(_mali_osk_spinlock_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ spin_lock(&lock->spinlock);
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+ }
+
+ /** @brief Unlock a spinlock */
+ static inline void _mali_osk_spinlock_unlock(_mali_osk_spinlock_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ spin_unlock(&lock->spinlock);
+ }
+
+ /** @brief Free a memory block which the argument lock pointed to and its type must be
+ * _mali_osk_spinlock_t *. */
+ static inline void _mali_osk_spinlock_term(_mali_osk_spinlock_t *lock)
+ {
+ /* Parameter validation */
+ BUG_ON(NULL == lock);
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+ }
+
+ /** @brief Before _mali_osk_spinlock_irq_lock/unlock/term() is called, init function should be
+ * called to initial spinlock and flags in struct _mali_osk_spinlock_irq_t. */
+ static inline _mali_osk_spinlock_irq_t *_mali_osk_spinlock_irq_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+ {
+ _mali_osk_spinlock_irq_t *lock = NULL;
+ lock = kmalloc(sizeof(_mali_osk_spinlock_irq_t), GFP_KERNEL);
+
+ if (NULL == lock) {
+ return NULL;
+ }
+
+ lock->flags = 0;
+ spin_lock_init(&lock->spinlock);
+ _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+ return lock;
+ }
+
+ /** @brief Lock spinlock and save the register's state */
+ static inline void _mali_osk_spinlock_irq_lock(_mali_osk_spinlock_irq_t *lock)
+ {
+ unsigned long tmp_flags;
+
+ BUG_ON(NULL == lock);
+ spin_lock_irqsave(&lock->spinlock, tmp_flags);
+ lock->flags = tmp_flags;
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+ }
+
+ /** @brief Unlock spinlock with saved register's state */
+ static inline void _mali_osk_spinlock_irq_unlock(_mali_osk_spinlock_irq_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ spin_unlock_irqrestore(&lock->spinlock, lock->flags);
+ }
+
+ /** @brief Destroy a given memory block which lock pointed to, and the lock type must be
+ * _mali_osk_spinlock_irq_t *. */
+ static inline void _mali_osk_spinlock_irq_term(_mali_osk_spinlock_irq_t *lock)
+ {
+ /* Parameter validation */
+ BUG_ON(NULL == lock);
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+ }
+
+ /** @brief Before _mali_osk_mutex_rw_wait/signal/term() is called, we should call
+ * _mali_osk_mutex_rw_init() to kmalloc a memory block and initial part of elements in it. */
+ static inline _mali_osk_mutex_rw_t *_mali_osk_mutex_rw_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+ {
+ _mali_osk_mutex_rw_t *lock = NULL;
+
+ lock = kmalloc(sizeof(_mali_osk_mutex_rw_t), GFP_KERNEL);
+
+ if (NULL == lock) {
+ return NULL;
+ }
+
+ init_rwsem(&lock->rw_sema);
+ _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+ return lock;
+ }
+
+ /** @brief When call _mali_osk_mutex_rw_wait/signal() functions, the second argument mode
+ * should be assigned with value _MALI_OSK_LOCKMODE_RO or _MALI_OSK_LOCKMODE_RW */
+ static inline void _mali_osk_mutex_rw_wait(_mali_osk_mutex_rw_t *lock, _mali_osk_lock_mode_t mode)
+ {
+ BUG_ON(NULL == lock);
+ BUG_ON(!(_MALI_OSK_LOCKMODE_RO == mode || _MALI_OSK_LOCKMODE_RW == mode));
+
+ if (mode == _MALI_OSK_LOCKMODE_RO) {
+ down_read(&lock->rw_sema);
+ } else {
+ down_write(&lock->rw_sema);
+ }
+
+#ifdef DEBUG
+ if (mode == _MALI_OSK_LOCKMODE_RW) {
+ lock->mode = mode;
+ } else { /* mode == _MALI_OSK_LOCKMODE_RO */
+ lock->mode = mode;
+ }
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+#endif
+ }
+
+ /** @brief Up lock->rw_sema with up_read/write() accordinf argument mode's value. */
+ static inline void _mali_osk_mutex_rw_signal(_mali_osk_mutex_rw_t *lock, _mali_osk_lock_mode_t mode)
+ {
+ BUG_ON(NULL == lock);
+ BUG_ON(!(_MALI_OSK_LOCKMODE_RO == mode || _MALI_OSK_LOCKMODE_RW == mode));
+#ifdef DEBUG
+ /* make sure the thread releasing the lock actually was the owner */
+ if (mode == _MALI_OSK_LOCKMODE_RW) {
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ /* This lock now has no owner */
+ lock->checker.owner = 0;
+ }
+#endif
+
+ if (mode == _MALI_OSK_LOCKMODE_RO) {
+ up_read(&lock->rw_sema);
+ } else {
+ up_write(&lock->rw_sema);
+ }
+ }
+
+ /** @brief Free a given memory block which lock pointed to and its type must be
+ * _mali_sok_mutex_rw_t *. */
+ static inline void _mali_osk_mutex_rw_term(_mali_osk_mutex_rw_t *lock)
+ {
+ /* Parameter validation */
+ BUG_ON(NULL == lock);
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+ }
+
+ /** @brief Mutex & mutex_interruptible share the same init and term function, because they have the
+ * same osk mutex struct, and the difference between them is which locking function they use */
+ static inline _mali_osk_mutex_t *_mali_osk_mutex_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+ {
+ _mali_osk_mutex_t *lock = NULL;
+
+ lock = kmalloc(sizeof(_mali_osk_mutex_t), GFP_KERNEL);
+
+ if (NULL == lock) {
+ return NULL;
+ }
+ mutex_init(&lock->mutex);
+
+ _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+ return lock;
+ }
+
+ /** @brief Lock the lock->mutex with mutex_lock_interruptible function */
+ static inline _mali_osk_errcode_t _mali_osk_mutex_wait_interruptible(_mali_osk_mutex_t *lock)
+ {
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ BUG_ON(NULL == lock);
+
+ if (mutex_lock_interruptible(&lock->mutex)) {
+ printk(KERN_WARNING "Mali: Can not lock mutex\n");
+ err = _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+ return err;
+ }
+
+ /** @brief Unlock the lock->mutex which is locked with mutex_lock_interruptible() function. */
+ static inline void _mali_osk_mutex_signal_interruptible(_mali_osk_mutex_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ mutex_unlock(&lock->mutex);
+ }
+
+ /** @brief Lock the lock->mutex just with mutex_lock() function which could not be interruptted. */
+ static inline void _mali_osk_mutex_wait(_mali_osk_mutex_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ mutex_lock(&lock->mutex);
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+ }
+
+ /** @brief Unlock the lock->mutex which is locked with mutex_lock() function. */
+ static inline void _mali_osk_mutex_signal(_mali_osk_mutex_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ mutex_unlock(&lock->mutex);
+ }
+
+ /** @brief Free a given memory block which lock point. */
+ static inline void _mali_osk_mutex_term(_mali_osk_mutex_t *lock)
+ {
+ /* Parameter validation */
+ BUG_ON(NULL == lock);
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+ }
+
+#ifdef _cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_low_level_mem.c b/drivers/gpu/arm/utgard/linux/mali_osk_low_level_mem.c
new file mode 100644
index 000000000000..4113ecebdba7
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_low_level_mem.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_low_level_mem.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+void _mali_osk_mem_barrier(void)
+{
+ mb();
+}
+
+void _mali_osk_write_mem_barrier(void)
+{
+ wmb();
+}
+
+mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description)
+{
+ return (mali_io_address)ioremap_nocache(phys, size);
+}
+
+void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address virt)
+{
+ iounmap((void *)virt);
+}
+
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description)
+{
+#if MALI_LICENSE_IS_GPL
+ return _MALI_OSK_ERR_OK; /* GPL driver gets the mem region for the resources registered automatically */
+#else
+ return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
+#endif
+}
+
+void inline _mali_osk_mem_unreqregion(uintptr_t phys, u32 size)
+{
+#if !MALI_LICENSE_IS_GPL
+ release_mem_region(phys, size);
+#endif
+}
+
+void inline _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val)
+{
+ __raw_writel(cpu_to_le32(val), ((u8 *)addr) + offset);
+}
+
+u32 inline _mali_osk_mem_ioread32(volatile mali_io_address addr, u32 offset)
+{
+ return ioread32(((u8 *)addr) + offset);
+}
+
+void inline _mali_osk_mem_iowrite32(volatile mali_io_address addr, u32 offset, u32 val)
+{
+ iowrite32(val, ((u8 *)addr) + offset);
+}
+
+void _mali_osk_cache_flushall(void)
+{
+ /** @note Cached memory is not currently supported in this implementation */
+}
+
+void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size)
+{
+ _mali_osk_write_mem_barrier();
+}
+
+u32 _mali_osk_mem_write_safe(void __user *dest, const void __user *src, u32 size)
+{
+#define MALI_MEM_SAFE_COPY_BLOCK_SIZE 4096
+ u32 retval = 0;
+ void *temp_buf;
+
+ temp_buf = kmalloc(MALI_MEM_SAFE_COPY_BLOCK_SIZE, GFP_KERNEL);
+ if (NULL != temp_buf) {
+ u32 bytes_left_to_copy = size;
+ u32 i;
+ for (i = 0; i < size; i += MALI_MEM_SAFE_COPY_BLOCK_SIZE) {
+ u32 size_to_copy;
+ u32 size_copied;
+ u32 bytes_left;
+
+ if (bytes_left_to_copy > MALI_MEM_SAFE_COPY_BLOCK_SIZE) {
+ size_to_copy = MALI_MEM_SAFE_COPY_BLOCK_SIZE;
+ } else {
+ size_to_copy = bytes_left_to_copy;
+ }
+
+ bytes_left = copy_from_user(temp_buf, ((char *)src) + i, size_to_copy);
+ size_copied = size_to_copy - bytes_left;
+
+ bytes_left = copy_to_user(((char *)dest) + i, temp_buf, size_copied);
+ size_copied -= bytes_left;
+
+ bytes_left_to_copy -= size_copied;
+ retval += size_copied;
+
+ if (size_copied != size_to_copy) {
+ break; /* Early out, we was not able to copy this entire block */
+ }
+ }
+
+ kfree(temp_buf);
+ }
+
+ return retval;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args)
+{
+ void __user *src;
+ void __user *dst;
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ if (NULL == session) {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ src = (void __user *)(uintptr_t)args->src;
+ dst = (void __user *)(uintptr_t)args->dest;
+
+ /* Return number of bytes actually copied */
+ args->size = _mali_osk_mem_write_safe(dst, src, args->size);
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_mali.c b/drivers/gpu/arm/utgard/linux/mali_osk_mali.c
new file mode 100644
index 000000000000..89c5a5ee71af
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_mali.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.c
+ * Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/mali/mali_utgard.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h" /* MALI_xxx macros */
+#include "mali_osk.h" /* kernel side OS functions */
+#include "mali_kernel_linux.h"
+
+static mali_bool mali_secure_mode_enabled = MALI_FALSE;
+static mali_bool mali_secure_mode_supported = MALI_FALSE;
+
+/* Function that init the mali gpu secure mode */
+void (*mali_secure_mode_deinit)(void) = NULL;
+/* Function that reset GPU and enable the mali gpu secure mode */
+int (*mali_gpu_reset_and_secure_mode_enable)(void) = NULL;
+/* Function that reset GPU and disable the mali gpu secure mode */
+int (*mali_gpu_reset_and_secure_mode_disable)(void) = NULL;
+
+#if defined(CONFIG_MALI_DT) && !defined(CONFIG_MALI_PLAT_SPECIFIC_DT)
+
+#define MALI_OSK_INVALID_RESOURCE_ADDRESS 0xFFFFFFFF
+
+/**
+ * Define the max number of resource we could have.
+ */
+#define MALI_OSK_MAX_RESOURCE_NUMBER 27
+
+/**
+ * Define the max number of resource with interrupts, and they are
+ * the first 20 elements in array mali_osk_resource_bank.
+ */
+#define MALI_OSK_RESOURCE_WITH_IRQ_NUMBER 20
+
+/**
+ * pp core start and end location in mali_osk_resource_bank array.
+ */
+#define MALI_OSK_RESOURCE_PP_LOCATION_START 2
+#define MALI_OSK_RESOURCE_PP_LOCATION_END 17
+
+/**
+ * L2 cache start and end location in mali_osk_resource_bank array.
+ */
+#define MALI_OSK_RESOURCE_L2_LOCATION_START 20
+#define MALI_OSK_RESOURCE_l2_LOCATION_END 22
+
+/**
+ * DMA unit location.
+ */
+#define MALI_OSK_RESOURCE_DMA_LOCATION 26
+
+static _mali_osk_resource_t mali_osk_resource_bank[MALI_OSK_MAX_RESOURCE_NUMBER] = {
+ {.description = "Mali_GP", .base = MALI_OFFSET_GP, .irq_name = "IRQGP",},
+ {.description = "Mali_GP_MMU", .base = MALI_OFFSET_GP_MMU, .irq_name = "IRQGPMMU",},
+ {.description = "Mali_PP0", .base = MALI_OFFSET_PP0, .irq_name = "IRQPP0",},
+ {.description = "Mali_PP0_MMU", .base = MALI_OFFSET_PP0_MMU, .irq_name = "IRQPPMMU0",},
+ {.description = "Mali_PP1", .base = MALI_OFFSET_PP1, .irq_name = "IRQPP1",},
+ {.description = "Mali_PP1_MMU", .base = MALI_OFFSET_PP1_MMU, .irq_name = "IRQPPMMU1",},
+ {.description = "Mali_PP2", .base = MALI_OFFSET_PP2, .irq_name = "IRQPP2",},
+ {.description = "Mali_PP2_MMU", .base = MALI_OFFSET_PP2_MMU, .irq_name = "IRQPPMMU2",},
+ {.description = "Mali_PP3", .base = MALI_OFFSET_PP3, .irq_name = "IRQPP3",},
+ {.description = "Mali_PP3_MMU", .base = MALI_OFFSET_PP3_MMU, .irq_name = "IRQPPMMU3",},
+ {.description = "Mali_PP4", .base = MALI_OFFSET_PP4, .irq_name = "IRQPP4",},
+ {.description = "Mali_PP4_MMU", .base = MALI_OFFSET_PP4_MMU, .irq_name = "IRQPPMMU4",},
+ {.description = "Mali_PP5", .base = MALI_OFFSET_PP5, .irq_name = "IRQPP5",},
+ {.description = "Mali_PP5_MMU", .base = MALI_OFFSET_PP5_MMU, .irq_name = "IRQPPMMU5",},
+ {.description = "Mali_PP6", .base = MALI_OFFSET_PP6, .irq_name = "IRQPP6",},
+ {.description = "Mali_PP6_MMU", .base = MALI_OFFSET_PP6_MMU, .irq_name = "IRQPPMMU6",},
+ {.description = "Mali_PP7", .base = MALI_OFFSET_PP7, .irq_name = "IRQPP7",},
+ {.description = "Mali_PP7_MMU", .base = MALI_OFFSET_PP7_MMU, .irq_name = "IRQPPMMU",},
+ {.description = "Mali_PP_Broadcast", .base = MALI_OFFSET_PP_BCAST, .irq_name = "IRQPP",},
+ {.description = "Mali_PMU", .base = MALI_OFFSET_PMU, .irq_name = "IRQPMU",},
+ {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE0,},
+ {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE1,},
+ {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE2,},
+ {.description = "Mali_PP_MMU_Broadcast", .base = MALI_OFFSET_PP_BCAST_MMU,},
+ {.description = "Mali_Broadcast", .base = MALI_OFFSET_BCAST,},
+ {.description = "Mali_DLBU", .base = MALI_OFFSET_DLBU,},
+ {.description = "Mali_DMA", .base = MALI_OFFSET_DMA,},
+};
+
+static int _mali_osk_get_compatible_name(const char **out_string)
+{
+ struct device_node *node = mali_platform_device->dev.of_node;
+
+ MALI_DEBUG_ASSERT(NULL != node);
+
+ return of_property_read_string(node, "compatible", out_string);
+}
+
+_mali_osk_errcode_t _mali_osk_resource_initialize(void)
+{
+ mali_bool mali_is_450 = MALI_FALSE, mali_is_470 = MALI_FALSE;
+ int i, pp_core_num = 0, l2_core_num = 0;
+ struct resource *res;
+ const char *compatible_name = NULL;
+
+ if (0 == _mali_osk_get_compatible_name(&compatible_name)) {
+ if (0 == strncmp(compatible_name, "arm,mali-450", strlen("arm,mali-450"))) {
+ mali_is_450 = MALI_TRUE;
+ MALI_DEBUG_PRINT(2, ("mali-450 device tree detected."));
+ } else if (0 == strncmp(compatible_name, "arm,mali-470", strlen("arm,mali-470"))) {
+ mali_is_470 = MALI_TRUE;
+ MALI_DEBUG_PRINT(2, ("mali-470 device tree detected."));
+ }
+ }
+
+ for (i = 0; i < MALI_OSK_RESOURCE_WITH_IRQ_NUMBER; i++) {
+ res = platform_get_resource_byname(mali_platform_device, IORESOURCE_IRQ, mali_osk_resource_bank[i].irq_name);
+ if (res) {
+ mali_osk_resource_bank[i].irq = res->start;
+ } else {
+ mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+ }
+ }
+
+ for (i = MALI_OSK_RESOURCE_PP_LOCATION_START; i <= MALI_OSK_RESOURCE_PP_LOCATION_END; i++) {
+ if (MALI_OSK_INVALID_RESOURCE_ADDRESS != mali_osk_resource_bank[i].base) {
+ pp_core_num++;
+ }
+ }
+
+ /* We have to divide by 2, because we caculate twice for only one pp(pp_core and pp_mmu_core). */
+ if (0 != pp_core_num % 2) {
+ MALI_DEBUG_PRINT(2, ("The value of pp core number isn't normal."));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pp_core_num /= 2;
+
+ /**
+ * we can caculate the number of l2 cache core according the number of pp core number
+ * and device type(mali400/mali450/mali470).
+ */
+ l2_core_num = 1;
+ if (mali_is_450) {
+ if (pp_core_num > 4) {
+ l2_core_num = 3;
+ } else if (pp_core_num <= 4) {
+ l2_core_num = 2;
+ }
+ }
+
+ for (i = MALI_OSK_RESOURCE_l2_LOCATION_END; i > MALI_OSK_RESOURCE_L2_LOCATION_START + l2_core_num - 1; i--) {
+ mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+ }
+
+ /* If device is not mali-450 type, we have to remove related resource from resource bank. */
+ if (!(mali_is_450 || mali_is_470)) {
+ for (i = MALI_OSK_RESOURCE_l2_LOCATION_END + 1; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) {
+ mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+ }
+ }
+
+ if (mali_is_470)
+ mali_osk_resource_bank[MALI_OSK_RESOURCE_DMA_LOCATION].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
+{
+ int i;
+
+ if (NULL == mali_platform_device) {
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ /* Traverse all of resources in resources bank to find the matching one. */
+ for (i = 0; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) {
+ if (mali_osk_resource_bank[i].base == addr) {
+ if (NULL != res) {
+ res->base = addr + _mali_osk_resource_base_address();
+ res->description = mali_osk_resource_bank[i].description;
+ res->irq = mali_osk_resource_bank[i].irq;
+ }
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+uintptr_t _mali_osk_resource_base_address(void)
+{
+ struct resource *reg_res = NULL;
+ uintptr_t ret = 0;
+
+ reg_res = platform_get_resource(mali_platform_device, IORESOURCE_MEM, 0);
+
+ if (NULL != reg_res) {
+ ret = reg_res->start;
+ }
+
+ return ret;
+}
+
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size)
+{
+ struct device_node *node = mali_platform_device->dev.of_node;
+ struct property *prop;
+ const __be32 *p;
+ int length = 0, i = 0;
+ u32 u;
+
+ MALI_DEBUG_PRINT(2, ("Get pmu config from device tree configuration.\n"));
+
+ MALI_DEBUG_ASSERT(NULL != node);
+
+ if (!of_get_property(node, "pmu_domain_config", &length)) {
+ return;
+ }
+
+ if (array_size != length / sizeof(u32)) {
+ MALI_PRINT_ERROR(("Wrong pmu domain config in device tree."));
+ return;
+ }
+
+ of_property_for_each_u32(node, "pmu_domain_config", prop, p, u) {
+ domain_config_array[i] = (u16)u;
+ i++;
+ }
+
+ return;
+}
+
+u32 _mali_osk_get_pmu_switch_delay(void)
+{
+ struct device_node *node = mali_platform_device->dev.of_node;
+ u32 switch_delay;
+
+ MALI_DEBUG_ASSERT(NULL != node);
+
+ if (0 == of_property_read_u32(node, "pmu_switch_delay", &switch_delay)) {
+ return switch_delay;
+ } else {
+ MALI_DEBUG_PRINT(2, ("Couldn't find pmu_switch_delay in device tree configuration.\n"));
+ }
+
+ return 0;
+}
+
+#else /* CONFIG_MALI_DT && !CONFIG_MALI_PLAT_SPECIFIC_DT */
+
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
+{
+ int i;
+ uintptr_t phys_addr;
+
+ if (NULL == mali_platform_device) {
+ /* Not connected to a device */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ phys_addr = addr + _mali_osk_resource_base_address();
+ for (i = 0; i < mali_platform_device->num_resources; i++) {
+ if (IORESOURCE_MEM == resource_type(&(mali_platform_device->resource[i])) &&
+ mali_platform_device->resource[i].start == phys_addr) {
+ if (NULL != res) {
+ res->base = phys_addr;
+ res->description = mali_platform_device->resource[i].name;
+
+ /* Any (optional) IRQ resource belonging to this resource will follow */
+ if ((i + 1) < mali_platform_device->num_resources &&
+ IORESOURCE_IRQ == resource_type(&(mali_platform_device->resource[i + 1]))) {
+ res->irq = mali_platform_device->resource[i + 1].start;
+ } else {
+ res->irq = -1;
+ }
+ }
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+uintptr_t _mali_osk_resource_base_address(void)
+{
+ uintptr_t lowest_addr = (uintptr_t)(0 - 1);
+ uintptr_t ret = 0;
+
+ if (NULL != mali_platform_device) {
+ int i;
+ for (i = 0; i < mali_platform_device->num_resources; i++) {
+ if (mali_platform_device->resource[i].flags & IORESOURCE_MEM &&
+ mali_platform_device->resource[i].start < lowest_addr) {
+ lowest_addr = mali_platform_device->resource[i].start;
+ ret = lowest_addr;
+ }
+ }
+ }
+
+ return ret;
+}
+
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size)
+{
+ _mali_osk_device_data data = { 0, };
+
+ MALI_DEBUG_PRINT(2, ("Get pmu config from platform device data.\n"));
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Copy the custom customer power domain config */
+ _mali_osk_memcpy(domain_config_array, data.pmu_domain_config, sizeof(data.pmu_domain_config));
+ }
+
+ return;
+}
+
+u32 _mali_osk_get_pmu_switch_delay(void)
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_device_data data = { 0, };
+
+ err = _mali_osk_device_data_get(&data);
+
+ if (_MALI_OSK_ERR_OK == err) {
+ return data.pmu_switch_delay;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_MALI_DT */
+
+_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data)
+{
+ MALI_DEBUG_ASSERT_POINTER(data);
+
+ if (NULL != mali_platform_device) {
+ struct mali_gpu_device_data *os_data = NULL;
+
+ os_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data;
+ if (NULL != os_data) {
+ /* Copy data from OS dependant struct to Mali neutral struct (identical!) */
+ BUILD_BUG_ON(sizeof(*os_data) != sizeof(*data));
+ _mali_osk_memcpy(data, os_data, sizeof(*os_data));
+
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+u32 _mali_osk_identify_gpu_resource(void)
+{
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE1, NULL))
+ /* Mali 450 */
+ return 0x450;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_DLBU, NULL))
+ /* Mali 470 */
+ return 0x470;
+
+ /* Mali 400 */
+ return 0x400;
+}
+
+mali_bool _mali_osk_shared_interrupts(void)
+{
+ u32 irqs[128];
+ u32 i, j, irq, num_irqs_found = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ MALI_DEBUG_ASSERT(128 >= mali_platform_device->num_resources);
+
+ for (i = 0; i < mali_platform_device->num_resources; i++) {
+ if (IORESOURCE_IRQ & mali_platform_device->resource[i].flags) {
+ irq = mali_platform_device->resource[i].start;
+
+ for (j = 0; j < num_irqs_found; ++j) {
+ if (irq == irqs[j]) {
+ return MALI_TRUE;
+ }
+ }
+
+ irqs[num_irqs_found++] = irq;
+ }
+ }
+
+ return MALI_FALSE;
+}
+
+_mali_osk_errcode_t _mali_osk_gpu_secure_mode_init(void)
+{
+ _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ if ((NULL != data.secure_mode_init) && (NULL != data.secure_mode_deinit)
+ && (NULL != data.gpu_reset_and_secure_mode_enable) && (NULL != data.gpu_reset_and_secure_mode_disable)) {
+ int err = data.secure_mode_init();
+ if (err) {
+ MALI_DEBUG_PRINT(1, ("Failed to init gpu secure mode.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_secure_mode_deinit = data.secure_mode_deinit;
+ mali_gpu_reset_and_secure_mode_enable = data.gpu_reset_and_secure_mode_enable;
+ mali_gpu_reset_and_secure_mode_disable = data.gpu_reset_and_secure_mode_disable;
+
+ mali_secure_mode_supported = MALI_TRUE;
+ mali_secure_mode_enabled = MALI_FALSE;
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+ MALI_DEBUG_PRINT(3, ("GPU secure mode not supported.\n"));
+ return _MALI_OSK_ERR_UNSUPPORTED;
+
+}
+
+_mali_osk_errcode_t _mali_osk_gpu_secure_mode_deinit(void)
+{
+ if (NULL != mali_secure_mode_deinit) {
+ mali_secure_mode_deinit();
+ mali_secure_mode_enabled = MALI_FALSE;
+ mali_secure_mode_supported = MALI_FALSE;
+ return _MALI_OSK_ERR_OK;
+ }
+ MALI_DEBUG_PRINT(3, ("GPU secure mode not supported.\n"));
+ return _MALI_OSK_ERR_UNSUPPORTED;
+
+}
+
+
+_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_enable(void)
+{
+ /* the mali executor lock must be held before enter this function. */
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == mali_secure_mode_enabled);
+
+ if (NULL != mali_gpu_reset_and_secure_mode_enable) {
+ if (mali_gpu_reset_and_secure_mode_enable()) {
+ MALI_DEBUG_PRINT(1, ("Failed to reset GPU or enable gpu secure mode.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_secure_mode_enabled = MALI_TRUE;
+ return _MALI_OSK_ERR_OK;
+ }
+ MALI_DEBUG_PRINT(1, ("GPU secure mode not supported.\n"));
+ return _MALI_OSK_ERR_UNSUPPORTED;
+}
+
+_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_disable(void)
+{
+ /* the mali executor lock must be held before enter this function. */
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_secure_mode_enabled);
+
+ if (NULL != mali_gpu_reset_and_secure_mode_disable) {
+ if (mali_gpu_reset_and_secure_mode_disable()) {
+ MALI_DEBUG_PRINT(1, ("Failed to reset GPU or disable gpu secure mode.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_secure_mode_enabled = MALI_FALSE;
+
+ return _MALI_OSK_ERR_OK;
+
+ }
+ MALI_DEBUG_PRINT(1, ("GPU secure mode not supported.\n"));
+ return _MALI_OSK_ERR_UNSUPPORTED;
+
+}
+
+mali_bool _mali_osk_gpu_secure_mode_is_enabled(void)
+{
+ return mali_secure_mode_enabled;
+}
+
+mali_bool _mali_osk_gpu_secure_mode_is_supported(void)
+{
+ return mali_secure_mode_supported;
+}
+
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_math.c b/drivers/gpu/arm/utgard/linux/mali_osk_math.c
new file mode 100644
index 000000000000..0b2d00762771
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_math.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010, 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_math.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/bitops.h>
+
+u32 _mali_osk_clz(u32 input)
+{
+ return 32 - fls(input);
+}
+
+u32 _mali_osk_fls(u32 input)
+{
+ return fls(input);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_memory.c b/drivers/gpu/arm/utgard/linux/mali_osk_memory.c
new file mode 100644
index 000000000000..174616b566c4
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_memory.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+void inline *_mali_osk_calloc(u32 n, u32 size)
+{
+ return kcalloc(n, size, GFP_KERNEL);
+}
+
+void inline *_mali_osk_malloc(u32 size)
+{
+ return kmalloc(size, GFP_KERNEL);
+}
+
+void inline _mali_osk_free(void *ptr)
+{
+ kfree(ptr);
+}
+
+void inline *_mali_osk_valloc(u32 size)
+{
+ return vmalloc(size);
+}
+
+void inline _mali_osk_vfree(void *ptr)
+{
+ vfree(ptr);
+}
+
+void inline *_mali_osk_memcpy(void *dst, const void *src, u32 len)
+{
+ return memcpy(dst, src, len);
+}
+
+void inline *_mali_osk_memset(void *s, u32 c, u32 n)
+{
+ return memset(s, c, n);
+}
+
+mali_bool _mali_osk_mem_check_allocated(u32 max_allocated)
+{
+ /* No need to prevent an out-of-memory dialogue appearing on Linux,
+ * so we always return MALI_TRUE.
+ */
+ return MALI_TRUE;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_misc.c b/drivers/gpu/arm/utgard/linux/mali_osk_misc.c
new file mode 100644
index 000000000000..a1497c21a09b
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_misc.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_misc.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+
+#if !defined(CONFIG_MALI_QUIET)
+void _mali_osk_dbgmsg(const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+}
+#endif /* !defined(CONFIG_MALI_QUIET) */
+
+u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...)
+{
+ int res;
+ va_list args;
+ va_start(args, fmt);
+
+ res = vscnprintf(buf, (size_t)size, fmt, args);
+
+ va_end(args);
+ return res;
+}
+
+void _mali_osk_abort(void)
+{
+ /* make a simple fault by dereferencing a NULL pointer */
+ dump_stack();
+ *(int *)0 = 0;
+}
+
+void _mali_osk_break(void)
+{
+ _mali_osk_abort();
+}
+
+u32 _mali_osk_get_pid(void)
+{
+ /* Thread group ID is the process ID on Linux */
+ return (u32)current->tgid;
+}
+
+char *_mali_osk_get_comm(void)
+{
+ return (char *)current->comm;
+}
+
+
+u32 _mali_osk_get_tid(void)
+{
+ /* pid is actually identifying the thread on Linux */
+ u32 tid = current->pid;
+
+ /* If the pid is 0 the core was idle. Instead of returning 0 we return a special number
+ * identifying which core we are on. */
+ if (0 == tid) {
+ tid = -(1 + raw_smp_processor_id());
+ }
+
+ return tid;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_notification.c b/drivers/gpu/arm/utgard/linux/mali_osk_notification.c
new file mode 100644
index 000000000000..19e12e38d522
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_notification.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_notification.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/**
+ * Declaration of the notification queue object type
+ * Contains a linked list of notification pending delivery to user space.
+ * It also contains a wait queue of exclusive waiters blocked in the ioctl
+ * When a new notification is posted a single thread is resumed.
+ */
+struct _mali_osk_notification_queue_t_struct {
+ spinlock_t mutex; /**< Mutex protecting the list */
+ wait_queue_head_t receive_queue; /**< Threads waiting for new entries to the queue */
+ struct list_head head; /**< List of notifications waiting to be picked up */
+};
+
+typedef struct _mali_osk_notification_wrapper_t_struct {
+ struct list_head list; /**< Internal linked list variable */
+ _mali_osk_notification_t data; /**< Notification data */
+} _mali_osk_notification_wrapper_t;
+
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void)
+{
+ _mali_osk_notification_queue_t *result;
+
+ result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL);
+ if (NULL == result) return NULL;
+
+ spin_lock_init(&result->mutex);
+ init_waitqueue_head(&result->receive_queue);
+ INIT_LIST_HEAD(&result->head);
+
+ return result;
+}
+
+_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size)
+{
+ /* OPT Recycling of notification objects */
+ _mali_osk_notification_wrapper_t *notification;
+
+ notification = (_mali_osk_notification_wrapper_t *)kmalloc(sizeof(_mali_osk_notification_wrapper_t) + size,
+ GFP_KERNEL | __GFP_HIGH | __GFP_REPEAT);
+ if (NULL == notification) {
+ MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
+ return NULL;
+ }
+
+ /* Init the list */
+ INIT_LIST_HEAD(&notification->list);
+
+ if (0 != size) {
+ notification->data.result_buffer = ((u8 *)notification) + sizeof(_mali_osk_notification_wrapper_t);
+ } else {
+ notification->data.result_buffer = NULL;
+ }
+
+ /* set up the non-allocating fields */
+ notification->data.notification_type = type;
+ notification->data.result_buffer_size = size;
+
+ /* all ok */
+ return &(notification->data);
+}
+
+void _mali_osk_notification_delete(_mali_osk_notification_t *object)
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER(object);
+
+ notification = container_of(object, _mali_osk_notification_wrapper_t, data);
+
+ /* Free the container */
+ kfree(notification);
+}
+
+void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue)
+{
+ _mali_osk_notification_t *result;
+ MALI_DEBUG_ASSERT_POINTER(queue);
+
+ while (_MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, &result)) {
+ _mali_osk_notification_delete(result);
+ }
+
+ /* not much to do, just free the memory */
+ kfree(queue);
+}
+void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ unsigned long irq_flags;
+#endif
+
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER(queue);
+ MALI_DEBUG_ASSERT_POINTER(object);
+
+ notification = container_of(object, _mali_osk_notification_wrapper_t, data);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+ spin_lock(&queue->mutex);
+#endif
+
+ list_add_tail(&notification->list, &queue->head);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+ spin_unlock(&queue->mutex);
+#endif
+
+ /* and wake up one possible exclusive waiter */
+ wake_up(&queue->receive_queue);
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ unsigned long irq_flags;
+#endif
+
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ _mali_osk_notification_wrapper_t *wrapper_object;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+ spin_lock(&queue->mutex);
+#endif
+
+ if (!list_empty(&queue->head)) {
+ wrapper_object = list_entry(queue->head.next, _mali_osk_notification_wrapper_t, list);
+ *result = &(wrapper_object->data);
+ list_del_init(&wrapper_object->list);
+ ret = _MALI_OSK_ERR_OK;
+ }
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+ spin_unlock(&queue->mutex);
+#endif
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result)
+{
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(queue);
+ MALI_DEBUG_ASSERT_POINTER(result);
+
+ /* default result */
+ *result = NULL;
+
+ if (wait_event_interruptible(queue->receive_queue,
+ _MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, result))) {
+ return _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_pm.c b/drivers/gpu/arm/utgard/linux/mali_osk_pm.c
new file mode 100644
index 000000000000..e28e2eb21fe2
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_pm.c
@@ -0,0 +1,83 @@
+/**
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_pm.c
+ * Implementation of the callback functions from common power management
+ */
+
+#include <linux/sched.h>
+
+#include "mali_kernel_linux.h"
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* Can NOT run in atomic context */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ int err;
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ err = pm_runtime_get_sync(&(mali_platform_device->dev));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+#endif
+ if (0 > err) {
+ MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get_sync() returned error code %d\n", err));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Can run in atomic context */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ int err;
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ err = pm_runtime_get(&(mali_platform_device->dev));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+#endif
+ if (0 > err && -EINPROGRESS != err) {
+ MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get() returned error code %d\n", err));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+ return _MALI_OSK_ERR_OK;
+}
+
+
+/* Can run in atomic context */
+void _mali_osk_pm_dev_ref_put(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+ pm_runtime_put_autosuspend(&(mali_platform_device->dev));
+#else
+ pm_runtime_put(&(mali_platform_device->dev));
+#endif
+#endif
+}
+
+void _mali_osk_pm_dev_barrier(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_barrier(&(mali_platform_device->dev));
+#endif
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_profiling.c b/drivers/gpu/arm/utgard/linux/mali_osk_profiling.c
new file mode 100644
index 000000000000..9e977ea4d0ff
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_profiling.c
@@ -0,0 +1,1282 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/anon_inodes.h>
+#include <linux/sched.h>
+
+#include <mali_profiling_gator_api.h>
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_osk_profiling.h"
+#include "mali_linux_trace.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_l2_cache.h"
+#include "mali_user_settings_db.h"
+#include "mali_executor.h"
+#include "mali_memory_manager.h"
+
+#define MALI_PROFILING_STREAM_DATA_DEFAULT_SIZE 100
+#define MALI_PROFILING_STREAM_HOLD_TIME 1000000 /*1 ms */
+
+#define MALI_PROFILING_STREAM_BUFFER_SIZE (1 << 12)
+#define MALI_PROFILING_STREAM_BUFFER_NUM 100
+
+/**
+ * Define the mali profiling stream struct.
+ */
+typedef struct mali_profiling_stream {
+ u8 data[MALI_PROFILING_STREAM_BUFFER_SIZE];
+ u32 used_size;
+ struct list_head list;
+} mali_profiling_stream;
+
+typedef struct mali_profiling_stream_list {
+ spinlock_t spin_lock;
+ struct list_head free_list;
+ struct list_head queue_list;
+} mali_profiling_stream_list;
+
+static const char mali_name[] = "4xx";
+static const char utgard_setup_version[] = "ANNOTATE_SETUP 1\n";
+
+static u32 profiling_sample_rate = 0;
+static u32 first_sw_counter_index = 0;
+
+static mali_bool l2_cache_counter_if_enabled = MALI_FALSE;
+static u32 num_counters_enabled = 0;
+static u32 mem_counters_enabled = 0;
+
+static _mali_osk_atomic_t stream_fd_if_used;
+
+static wait_queue_head_t stream_fd_wait_queue;
+static mali_profiling_counter *global_mali_profiling_counters = NULL;
+static u32 num_global_mali_profiling_counters = 0;
+
+static mali_profiling_stream_list *global_mali_stream_list = NULL;
+static mali_profiling_stream *mali_counter_stream = NULL;
+static mali_profiling_stream *mali_core_activity_stream = NULL;
+static u64 mali_core_activity_stream_dequeue_time = 0;
+static spinlock_t mali_activity_lock;
+static u32 mali_activity_cores_num = 0;
+static struct hrtimer profiling_sampling_timer;
+
+const char *_mali_mem_counter_descriptions[] = _MALI_MEM_COUTNER_DESCRIPTIONS;
+const char *_mali_special_counter_descriptions[] = _MALI_SPCIAL_COUNTER_DESCRIPTIONS;
+
+static u32 current_profiling_pid = 0;
+
+static void _mali_profiling_stream_list_destory(mali_profiling_stream_list *profiling_stream_list)
+{
+ mali_profiling_stream *profiling_stream, *tmp_profiling_stream;
+ MALI_DEBUG_ASSERT_POINTER(profiling_stream_list);
+
+ list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &profiling_stream_list->free_list, list) {
+ list_del(&profiling_stream->list);
+ kfree(profiling_stream);
+ }
+
+ list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &profiling_stream_list->queue_list, list) {
+ list_del(&profiling_stream->list);
+ kfree(profiling_stream);
+ }
+
+ kfree(profiling_stream_list);
+}
+
+static void _mali_profiling_global_stream_list_free(void)
+{
+ mali_profiling_stream *profiling_stream, *tmp_profiling_stream;
+ unsigned long irq_flags;
+
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+ spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+ list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &global_mali_stream_list->queue_list, list) {
+ profiling_stream->used_size = 0;
+ list_move(&profiling_stream->list, &global_mali_stream_list->free_list);
+ }
+ spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+}
+
+static _mali_osk_errcode_t _mali_profiling_global_stream_list_dequeue(struct list_head *stream_list, mali_profiling_stream **new_mali_profiling_stream)
+{
+ unsigned long irq_flags;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+ MALI_DEBUG_ASSERT_POINTER(stream_list);
+
+ spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+
+ if (!list_empty(stream_list)) {
+ *new_mali_profiling_stream = list_entry(stream_list->next, mali_profiling_stream, list);
+ list_del_init(&(*new_mali_profiling_stream)->list);
+ } else {
+ ret = _MALI_OSK_ERR_NOMEM;
+ }
+
+ spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+
+ return ret;
+}
+
+static void _mali_profiling_global_stream_list_queue(struct list_head *stream_list, mali_profiling_stream *current_mali_profiling_stream)
+{
+ unsigned long irq_flags;
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+ MALI_DEBUG_ASSERT_POINTER(stream_list);
+
+ spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+ list_add_tail(&current_mali_profiling_stream->list, stream_list);
+ spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+}
+
+static mali_bool _mali_profiling_global_stream_queue_list_if_empty(void)
+{
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+ return list_empty(&global_mali_stream_list->queue_list);
+}
+
+static u32 _mali_profiling_global_stream_queue_list_next_size(void)
+{
+ unsigned long irq_flags;
+ u32 size = 0;
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+
+ spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+ if (!list_empty(&global_mali_stream_list->queue_list)) {
+ mali_profiling_stream *next_mali_profiling_stream =
+ list_entry(global_mali_stream_list->queue_list.next, mali_profiling_stream, list);
+ size = next_mali_profiling_stream->used_size;
+ }
+ spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+ return size;
+}
+
+/* The mali profiling stream file operations functions. */
+static ssize_t _mali_profiling_stream_read(
+ struct file *filp,
+ char __user *buffer,
+ size_t size,
+ loff_t *f_pos);
+
+static unsigned int _mali_profiling_stream_poll(struct file *filp, poll_table *wait);
+
+static int _mali_profiling_stream_release(struct inode *inode, struct file *filp);
+
+/* The timeline stream file operations structure. */
+static const struct file_operations mali_profiling_stream_fops = {
+ .release = _mali_profiling_stream_release,
+ .read = _mali_profiling_stream_read,
+ .poll = _mali_profiling_stream_poll,
+};
+
+static ssize_t _mali_profiling_stream_read(
+ struct file *filp,
+ char __user *buffer,
+ size_t size,
+ loff_t *f_pos)
+{
+ u32 copy_len = 0;
+ mali_profiling_stream *current_mali_profiling_stream;
+ u32 used_size;
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+
+ while (!_mali_profiling_global_stream_queue_list_if_empty()) {
+ used_size = _mali_profiling_global_stream_queue_list_next_size();
+ if (used_size <= ((u32)size - copy_len)) {
+ current_mali_profiling_stream = NULL;
+ _mali_profiling_global_stream_list_dequeue(&global_mali_stream_list->queue_list,
+ &current_mali_profiling_stream);
+ MALI_DEBUG_ASSERT_POINTER(current_mali_profiling_stream);
+ if (copy_to_user(&buffer[copy_len], current_mali_profiling_stream->data, current_mali_profiling_stream->used_size)) {
+ current_mali_profiling_stream->used_size = 0;
+ _mali_profiling_global_stream_list_queue(&global_mali_stream_list->free_list, current_mali_profiling_stream);
+ return -EFAULT;
+ }
+ copy_len += current_mali_profiling_stream->used_size;
+ current_mali_profiling_stream->used_size = 0;
+ _mali_profiling_global_stream_list_queue(&global_mali_stream_list->free_list, current_mali_profiling_stream);
+ } else {
+ break;
+ }
+ }
+ return (ssize_t)copy_len;
+}
+
+static unsigned int _mali_profiling_stream_poll(struct file *filp, poll_table *wait)
+{
+ poll_wait(filp, &stream_fd_wait_queue, wait);
+ if (!_mali_profiling_global_stream_queue_list_if_empty())
+ return POLLIN;
+ return 0;
+}
+
+static int _mali_profiling_stream_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_atomic_init(&stream_fd_if_used, 0);
+ return 0;
+}
+
+/* The funs for control packet and stream data.*/
+static void _mali_profiling_set_packet_size(unsigned char *const buf, const u32 size)
+{
+ u32 i;
+
+ for (i = 0; i < sizeof(size); ++i)
+ buf[i] = (size >> 8 * i) & 0xFF;
+}
+
+static u32 _mali_profiling_get_packet_size(unsigned char *const buf)
+{
+ u32 i;
+ u32 size = 0;
+ for (i = 0; i < sizeof(size); ++i)
+ size |= (u32)buf[i] << 8 * i;
+ return size;
+}
+
+static u32 _mali_profiling_read_packet_int(unsigned char *const buf, u32 *const pos, u32 const packet_size)
+{
+ u64 int_value = 0;
+ u8 shift = 0;
+ u8 byte_value = ~0;
+
+ while ((byte_value & 0x80) != 0) {
+ if ((*pos) >= packet_size)
+ return -1;
+ byte_value = buf[*pos];
+ *pos += 1;
+ int_value |= (u32)(byte_value & 0x7f) << shift;
+ shift += 7;
+ }
+
+ if (shift < 8 * sizeof(int_value) && (byte_value & 0x40) != 0) {
+ int_value |= -(1 << shift);
+ }
+
+ return int_value;
+}
+
+static u32 _mali_profiling_pack_int(u8 *const buf, u32 const buf_size, u32 const pos, s32 value)
+{
+ u32 add_bytes = 0;
+ int more = 1;
+ while (more) {
+ /* low order 7 bits of val */
+ char byte_value = value & 0x7f;
+ value >>= 7;
+
+ if ((value == 0 && (byte_value & 0x40) == 0) || (value == -1 && (byte_value & 0x40) != 0)) {
+ more = 0;
+ } else {
+ byte_value |= 0x80;
+ }
+
+ if ((pos + add_bytes) >= buf_size)
+ return 0;
+ buf[pos + add_bytes] = byte_value;
+ add_bytes++;
+ }
+
+ return add_bytes;
+}
+
+static int _mali_profiling_pack_long(uint8_t *const buf, u32 const buf_size, u32 const pos, s64 val)
+{
+ int add_bytes = 0;
+ int more = 1;
+ while (more) {
+ /* low order 7 bits of x */
+ char byte_value = val & 0x7f;
+ val >>= 7;
+
+ if ((val == 0 && (byte_value & 0x40) == 0) || (val == -1 && (byte_value & 0x40) != 0)) {
+ more = 0;
+ } else {
+ byte_value |= 0x80;
+ }
+
+ MALI_DEBUG_ASSERT((pos + add_bytes) < buf_size);
+ buf[pos + add_bytes] = byte_value;
+ add_bytes++;
+ }
+
+ return add_bytes;
+}
+
+static void _mali_profiling_stream_add_counter(mali_profiling_stream *profiling_stream, s64 current_time, u32 key, u32 counter_value)
+{
+ u32 add_size = STREAM_HEADER_SIZE;
+ MALI_DEBUG_ASSERT_POINTER(profiling_stream);
+ MALI_DEBUG_ASSERT((profiling_stream->used_size) < MALI_PROFILING_STREAM_BUFFER_SIZE);
+
+ profiling_stream->data[profiling_stream->used_size] = STREAM_HEADER_COUNTER_VALUE;
+
+ add_size += _mali_profiling_pack_long(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+ profiling_stream->used_size + add_size, current_time);
+ add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+ profiling_stream->used_size + add_size, (s32)0);
+ add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+ profiling_stream->used_size + add_size, (s32)key);
+ add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+ profiling_stream->used_size + add_size, (s32)counter_value);
+
+ _mali_profiling_set_packet_size(profiling_stream->data + profiling_stream->used_size + 1,
+ add_size - STREAM_HEADER_SIZE);
+
+ profiling_stream->used_size += add_size;
+}
+
+/* The callback function for sampling timer.*/
+static enum hrtimer_restart _mali_profiling_sampling_counters(struct hrtimer *timer)
+{
+ u32 counter_index;
+ s64 current_time;
+ MALI_DEBUG_ASSERT_POINTER(global_mali_profiling_counters);
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+
+ MALI_DEBUG_ASSERT(NULL == mali_counter_stream);
+ if (_MALI_OSK_ERR_OK == _mali_profiling_global_stream_list_dequeue(
+ &global_mali_stream_list->free_list, &mali_counter_stream)) {
+
+ MALI_DEBUG_ASSERT_POINTER(mali_counter_stream);
+ MALI_DEBUG_ASSERT(0 == mali_counter_stream->used_size);
+
+ /* Capture l2 cache counter values if enabled */
+ if (MALI_TRUE == l2_cache_counter_if_enabled) {
+ int i, j = 0;
+ _mali_profiling_l2_counter_values l2_counters_values;
+ _mali_profiling_get_l2_counters(&l2_counters_values);
+
+ for (i = COUNTER_L2_0_C0; i <= COUNTER_L2_2_C1; i++) {
+ if (0 == (j % 2))
+ _mali_osk_profiling_record_global_counters(i, l2_counters_values.cores[j / 2].value0);
+ else
+ _mali_osk_profiling_record_global_counters(i, l2_counters_values.cores[j / 2].value1);
+ j++;
+ }
+ }
+
+ current_time = (s64)_mali_osk_boot_time_get_ns();
+
+ /* Add all enabled counter values into stream */
+ for (counter_index = 0; counter_index < num_global_mali_profiling_counters; counter_index++) {
+ /* No need to sample these couners here. */
+ if (global_mali_profiling_counters[counter_index].enabled) {
+ if ((global_mali_profiling_counters[counter_index].counter_id >= FIRST_MEM_COUNTER &&
+ global_mali_profiling_counters[counter_index].counter_id <= LAST_MEM_COUNTER)
+ || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_VP_ACTIVITY)
+ || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_FP_ACTIVITY)
+ || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_FILMSTRIP)) {
+
+ continue;
+ }
+
+ if (global_mali_profiling_counters[counter_index].counter_id >= COUNTER_L2_0_C0 &&
+ global_mali_profiling_counters[counter_index].counter_id <= COUNTER_L2_2_C1) {
+
+ u32 prev_val = global_mali_profiling_counters[counter_index].prev_counter_value;
+
+ _mali_profiling_stream_add_counter(mali_counter_stream, current_time, global_mali_profiling_counters[counter_index].key,
+ global_mali_profiling_counters[counter_index].current_counter_value - prev_val);
+
+ prev_val = global_mali_profiling_counters[counter_index].current_counter_value;
+
+ global_mali_profiling_counters[counter_index].prev_counter_value = prev_val;
+ } else {
+
+ if (global_mali_profiling_counters[counter_index].counter_id == COUNTER_TOTAL_ALLOC_PAGES) {
+ u32 total_alloc_mem = _mali_ukk_report_memory_usage();
+ global_mali_profiling_counters[counter_index].current_counter_value = total_alloc_mem / _MALI_OSK_MALI_PAGE_SIZE;
+ }
+ _mali_profiling_stream_add_counter(mali_counter_stream, current_time, global_mali_profiling_counters[counter_index].key,
+ global_mali_profiling_counters[counter_index].current_counter_value);
+ if (global_mali_profiling_counters[counter_index].counter_id < FIRST_SPECIAL_COUNTER)
+ global_mali_profiling_counters[counter_index].current_counter_value = 0;
+ }
+ }
+ }
+ _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_counter_stream);
+ mali_counter_stream = NULL;
+ } else {
+ MALI_DEBUG_PRINT(1, ("Not enough mali profiling stream buffer!\n"));
+ }
+
+ wake_up_interruptible(&stream_fd_wait_queue);
+
+ /*Enable the sampling timer again*/
+ if (0 != num_counters_enabled && 0 != profiling_sample_rate) {
+ hrtimer_forward_now(&profiling_sampling_timer, ns_to_ktime(profiling_sample_rate));
+ return HRTIMER_RESTART;
+ }
+ return HRTIMER_NORESTART;
+}
+
+static void _mali_profiling_sampling_core_activity_switch(int counter_id, int core, u32 activity, u32 pid)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&mali_activity_lock, irq_flags);
+ if (activity == 0)
+ mali_activity_cores_num--;
+ else
+ mali_activity_cores_num++;
+ spin_unlock_irqrestore(&mali_activity_lock, irq_flags);
+
+ if (NULL != global_mali_profiling_counters) {
+ int i ;
+ for (i = 0; i < num_global_mali_profiling_counters; i++) {
+ if (counter_id == global_mali_profiling_counters[i].counter_id && global_mali_profiling_counters[i].enabled) {
+ u64 current_time = _mali_osk_boot_time_get_ns();
+ u32 add_size = STREAM_HEADER_SIZE;
+
+ if (NULL != mali_core_activity_stream) {
+ if ((mali_core_activity_stream_dequeue_time + MALI_PROFILING_STREAM_HOLD_TIME < current_time) ||
+ (MALI_PROFILING_STREAM_DATA_DEFAULT_SIZE > MALI_PROFILING_STREAM_BUFFER_SIZE
+ - mali_core_activity_stream->used_size)) {
+ _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_core_activity_stream);
+ mali_core_activity_stream = NULL;
+ wake_up_interruptible(&stream_fd_wait_queue);
+ }
+ }
+
+ if (NULL == mali_core_activity_stream) {
+ if (_MALI_OSK_ERR_OK == _mali_profiling_global_stream_list_dequeue(
+ &global_mali_stream_list->free_list, &mali_core_activity_stream)) {
+ mali_core_activity_stream_dequeue_time = current_time;
+ } else {
+ MALI_DEBUG_PRINT(1, ("Not enough mali profiling stream buffer!\n"));
+ wake_up_interruptible(&stream_fd_wait_queue);
+ break;
+ }
+
+ }
+
+ mali_core_activity_stream->data[mali_core_activity_stream->used_size] = STREAM_HEADER_CORE_ACTIVITY;
+
+ add_size += _mali_profiling_pack_long(mali_core_activity_stream->data,
+ MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, (s64)current_time);
+ add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+ MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, core);
+ add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+ MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, (s32)global_mali_profiling_counters[i].key);
+ add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+ MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, activity);
+ add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+ MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, pid);
+
+ _mali_profiling_set_packet_size(mali_core_activity_stream->data + mali_core_activity_stream->used_size + 1,
+ add_size - STREAM_HEADER_SIZE);
+
+ mali_core_activity_stream->used_size += add_size;
+
+ if (0 == mali_activity_cores_num) {
+ _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_core_activity_stream);
+ mali_core_activity_stream = NULL;
+ wake_up_interruptible(&stream_fd_wait_queue);
+ }
+
+ break;
+ }
+ }
+ }
+}
+
+static mali_bool _mali_profiling_global_counters_init(void)
+{
+ int core_id, counter_index, counter_number, counter_id;
+ u32 num_l2_cache_cores;
+ u32 num_pp_cores;
+ u32 num_gp_cores = 1;
+
+ MALI_DEBUG_ASSERT(NULL == global_mali_profiling_counters);
+ num_pp_cores = mali_pp_get_glob_num_pp_cores();
+ num_l2_cache_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+
+ num_global_mali_profiling_counters = 3 * (num_gp_cores + num_pp_cores) + 2 * num_l2_cache_cores
+ + MALI_PROFILING_SW_COUNTERS_NUM
+ + MALI_PROFILING_SPECIAL_COUNTERS_NUM
+ + MALI_PROFILING_MEM_COUNTERS_NUM;
+ global_mali_profiling_counters = _mali_osk_calloc(num_global_mali_profiling_counters, sizeof(mali_profiling_counter));
+
+ if (NULL == global_mali_profiling_counters)
+ return MALI_FALSE;
+
+ counter_index = 0;
+ /*Vertex processor counters */
+ for (core_id = 0; core_id < num_gp_cores; core_id ++) {
+ global_mali_profiling_counters[counter_index].counter_id = ACTIVITY_VP_0 + core_id;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_VP_%d_active", mali_name, core_id);
+
+ for (counter_number = 0; counter_number < 2; counter_number++) {
+ counter_index++;
+ global_mali_profiling_counters[counter_index].counter_id = COUNTER_VP_0_C0 + (2 * core_id) + counter_number;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_VP_%d_cnt%d", mali_name, core_id, counter_number);
+ }
+ }
+
+ /* Fragment processors' counters */
+ for (core_id = 0; core_id < num_pp_cores; core_id++) {
+ counter_index++;
+ global_mali_profiling_counters[counter_index].counter_id = ACTIVITY_FP_0 + core_id;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_FP_%d_active", mali_name, core_id);
+
+ for (counter_number = 0; counter_number < 2; counter_number++) {
+ counter_index++;
+ global_mali_profiling_counters[counter_index].counter_id = COUNTER_FP_0_C0 + (2 * core_id) + counter_number;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_FP_%d_cnt%d", mali_name, core_id, counter_number);
+ }
+ }
+
+ /* L2 Cache counters */
+ for (core_id = 0; core_id < num_l2_cache_cores; core_id++) {
+ for (counter_number = 0; counter_number < 2; counter_number++) {
+ counter_index++;
+ global_mali_profiling_counters[counter_index].counter_id = COUNTER_L2_0_C0 + (2 * core_id) + counter_number;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_L2_%d_cnt%d", mali_name, core_id, counter_number);
+ }
+ }
+
+ /* Now set up the software counter entries */
+ for (counter_id = FIRST_SW_COUNTER; counter_id <= LAST_SW_COUNTER; counter_id++) {
+ counter_index++;
+
+ if (0 == first_sw_counter_index)
+ first_sw_counter_index = counter_index;
+
+ global_mali_profiling_counters[counter_index].counter_id = counter_id;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_SW_%d", mali_name, counter_id - FIRST_SW_COUNTER);
+ }
+
+ /* Now set up the special counter entries */
+ for (counter_id = FIRST_SPECIAL_COUNTER; counter_id <= LAST_SPECIAL_COUNTER; counter_id++) {
+
+ counter_index++;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_%s",
+ mali_name, _mali_special_counter_descriptions[counter_id - FIRST_SPECIAL_COUNTER]);
+
+ global_mali_profiling_counters[counter_index].counter_id = counter_id;
+ }
+
+ /* Now set up the mem counter entries*/
+ for (counter_id = FIRST_MEM_COUNTER; counter_id <= LAST_MEM_COUNTER; counter_id++) {
+
+ counter_index++;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_%s",
+ mali_name, _mali_mem_counter_descriptions[counter_id - FIRST_MEM_COUNTER]);
+
+ global_mali_profiling_counters[counter_index].counter_id = counter_id;
+ }
+
+ MALI_DEBUG_ASSERT((counter_index + 1) == num_global_mali_profiling_counters);
+
+ return MALI_TRUE;
+}
+
+void _mali_profiling_notification_mem_counter(struct mali_session_data *session, u32 counter_id, u32 key, int enable)
+{
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (NULL != session) {
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+
+ queue = session->ioctl_queue;
+ MALI_DEBUG_ASSERT(NULL != queue);
+
+ notification = _mali_osk_notification_create(_MALI_NOTIFICATION_ANNOTATE_PROFILING_MEM_COUNTER,
+ sizeof(_mali_uk_annotate_profiling_mem_counter_s));
+
+ if (NULL != notification) {
+ _mali_uk_annotate_profiling_mem_counter_s *data = notification->result_buffer;
+ data->counter_id = counter_id;
+ data->key = key;
+ data->enable = enable;
+
+ _mali_osk_notification_queue_send(queue, notification);
+ } else {
+ MALI_PRINT_ERROR(("Failed to create notification object!\n"));
+ }
+ } else {
+ MALI_PRINT_ERROR(("Failed to find the right session!\n"));
+ }
+}
+
+void _mali_profiling_notification_enable(struct mali_session_data *session, u32 sampling_rate, int enable)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (NULL != session) {
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+
+ queue = session->ioctl_queue;
+ MALI_DEBUG_ASSERT(NULL != queue);
+
+ notification = _mali_osk_notification_create(_MALI_NOTIFICATION_ANNOTATE_PROFILING_ENABLE,
+ sizeof(_mali_uk_annotate_profiling_enable_s));
+
+ if (NULL != notification) {
+ _mali_uk_annotate_profiling_enable_s *data = notification->result_buffer;
+ data->sampling_rate = sampling_rate;
+ data->enable = enable;
+
+ _mali_osk_notification_queue_send(queue, notification);
+ } else {
+ MALI_PRINT_ERROR(("Failed to create notification object!\n"));
+ }
+ } else {
+ MALI_PRINT_ERROR(("Failed to find the right session!\n"));
+ }
+}
+
+
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
+{
+ int i;
+ mali_profiling_stream *new_mali_profiling_stream = NULL;
+ mali_profiling_stream_list *new_mali_profiling_stream_list = NULL;
+ if (MALI_TRUE == auto_start) {
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+ }
+
+ /*Init the global_mali_stream_list*/
+ MALI_DEBUG_ASSERT(NULL == global_mali_stream_list);
+ new_mali_profiling_stream_list = (mali_profiling_stream_list *)kmalloc(sizeof(mali_profiling_stream_list), GFP_KERNEL);
+
+ if (NULL == new_mali_profiling_stream_list) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ spin_lock_init(&new_mali_profiling_stream_list->spin_lock);
+ INIT_LIST_HEAD(&new_mali_profiling_stream_list->free_list);
+ INIT_LIST_HEAD(&new_mali_profiling_stream_list->queue_list);
+
+ spin_lock_init(&mali_activity_lock);
+ mali_activity_cores_num = 0;
+
+ for (i = 0; i < MALI_PROFILING_STREAM_BUFFER_NUM; i++) {
+ new_mali_profiling_stream = (mali_profiling_stream *)kmalloc(sizeof(mali_profiling_stream), GFP_KERNEL);
+ if (NULL == new_mali_profiling_stream) {
+ _mali_profiling_stream_list_destory(new_mali_profiling_stream_list);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ INIT_LIST_HEAD(&new_mali_profiling_stream->list);
+ new_mali_profiling_stream->used_size = 0;
+ list_add_tail(&new_mali_profiling_stream->list, &new_mali_profiling_stream_list->free_list);
+
+ }
+
+ _mali_osk_atomic_init(&stream_fd_if_used, 0);
+ init_waitqueue_head(&stream_fd_wait_queue);
+
+ hrtimer_init(&profiling_sampling_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ profiling_sampling_timer.function = _mali_profiling_sampling_counters;
+
+ global_mali_stream_list = new_mali_profiling_stream_list;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_profiling_term(void)
+{
+ if (0 != profiling_sample_rate) {
+ hrtimer_cancel(&profiling_sampling_timer);
+ profiling_sample_rate = 0;
+ }
+ _mali_osk_atomic_term(&stream_fd_if_used);
+
+ if (NULL != global_mali_profiling_counters) {
+ _mali_osk_free(global_mali_profiling_counters);
+ global_mali_profiling_counters = NULL;
+ num_global_mali_profiling_counters = 0;
+ }
+
+ if (NULL != global_mali_stream_list) {
+ _mali_profiling_stream_list_destory(global_mali_stream_list);
+ global_mali_stream_list = NULL;
+ }
+
+}
+
+void _mali_osk_profiling_stop_sampling(u32 pid)
+{
+ if (pid == current_profiling_pid) {
+
+ int i;
+ /* Reset all counter states when closing connection.*/
+ for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+ _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, MALI_HW_CORE_NO_COUNTER);
+ global_mali_profiling_counters[i].enabled = 0;
+ global_mali_profiling_counters[i].prev_counter_value = 0;
+ global_mali_profiling_counters[i].current_counter_value = 0;
+ }
+ l2_cache_counter_if_enabled = MALI_FALSE;
+ num_counters_enabled = 0;
+ mem_counters_enabled = 0;
+ _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 0);
+ _mali_profiling_control(SW_COUNTER_ENABLE, 0);
+ /* Delete sampling timer when closing connection. */
+ if (0 != profiling_sample_rate) {
+ hrtimer_cancel(&profiling_sampling_timer);
+ profiling_sample_rate = 0;
+ }
+ current_profiling_pid = 0;
+ }
+}
+
+void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+ /*Record the freq & volt to global_mali_profiling_counters here. */
+ if (0 != profiling_sample_rate) {
+ u32 channel;
+ u32 state;
+ channel = (event_id >> 16) & 0xFF;
+ state = ((event_id >> 24) & 0xF) << 24;
+
+ switch (state) {
+ case MALI_PROFILING_EVENT_TYPE_SINGLE:
+ if ((MALI_PROFILING_EVENT_CHANNEL_GPU >> 16) == channel) {
+ u32 reason = (event_id & 0xFFFF);
+ if (MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE == reason) {
+ _mali_osk_profiling_record_global_counters(COUNTER_FREQUENCY, data0);
+ _mali_osk_profiling_record_global_counters(COUNTER_VOLTAGE, data1);
+ }
+ }
+ break;
+ case MALI_PROFILING_EVENT_TYPE_START:
+ if ((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) == channel) {
+ _mali_profiling_sampling_core_activity_switch(COUNTER_VP_ACTIVITY, 0, 1, data1);
+ } else if (channel >= (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) &&
+ (MALI_PROFILING_EVENT_CHANNEL_PP7 >> 16) >= channel) {
+ u32 core_id = channel - (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16);
+ _mali_profiling_sampling_core_activity_switch(COUNTER_FP_ACTIVITY, core_id, 1, data1);
+ }
+ break;
+ case MALI_PROFILING_EVENT_TYPE_STOP:
+ if ((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) == channel) {
+ _mali_profiling_sampling_core_activity_switch(COUNTER_VP_ACTIVITY, 0, 0, 0);
+ } else if (channel >= (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) &&
+ (MALI_PROFILING_EVENT_CHANNEL_PP7 >> 16) >= channel) {
+ u32 core_id = channel - (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16);
+ _mali_profiling_sampling_core_activity_switch(COUNTER_FP_ACTIVITY, core_id, 0, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ trace_mali_timeline_event(event_id, data0, data1, data2, data3, data4);
+}
+
+void _mali_osk_profiling_report_sw_counters(u32 *counters)
+{
+ trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters);
+}
+
+void _mali_osk_profiling_record_global_counters(int counter_id, u32 value)
+{
+ if (NULL != global_mali_profiling_counters) {
+ int i ;
+ for (i = 0; i < num_global_mali_profiling_counters; i++) {
+ if (counter_id == global_mali_profiling_counters[i].counter_id && global_mali_profiling_counters[i].enabled) {
+ global_mali_profiling_counters[i].current_counter_value = value;
+ break;
+ }
+ }
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
+{
+ /* Always add process and thread identificator in the first two data elements for events from user space */
+ _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
+{
+ u32 *counters = (u32 *)(uintptr_t)args->counters;
+
+ _mali_osk_profiling_report_sw_counters(counters);
+
+ if (NULL != global_mali_profiling_counters) {
+ int i;
+ for (i = 0; i < MALI_PROFILING_SW_COUNTERS_NUM; i ++) {
+ if (global_mali_profiling_counters[first_sw_counter_index + i].enabled) {
+ global_mali_profiling_counters[first_sw_counter_index + i].current_counter_value = *(counters + i);
+ }
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_stream_fd_get(_mali_uk_profiling_stream_fd_get_s *args)
+{
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (1 == _mali_osk_atomic_inc_return(&stream_fd_if_used)) {
+
+ s32 fd = anon_inode_getfd("[mali_profiling_stream]", &mali_profiling_stream_fops,
+ session,
+ O_RDONLY | O_CLOEXEC);
+
+ args->stream_fd = fd;
+ if (0 > fd) {
+ _mali_osk_atomic_dec(&stream_fd_if_used);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ args->stream_fd = fd;
+ } else {
+ _mali_osk_atomic_dec(&stream_fd_if_used);
+ args->stream_fd = -1;
+ return _MALI_OSK_ERR_BUSY;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_control_set(_mali_uk_profiling_control_set_s *args)
+{
+ u32 control_packet_size;
+ u32 output_buffer_size;
+
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (NULL == global_mali_profiling_counters && MALI_FALSE == _mali_profiling_global_counters_init()) {
+ MALI_PRINT_ERROR(("Failed to create global_mali_profiling_counters.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ control_packet_size = args->control_packet_size;
+ output_buffer_size = args->response_packet_size;
+
+ if (0 != control_packet_size) {
+ u8 control_type;
+ u8 *control_packet_data;
+ u8 *response_packet_data;
+ u32 version_length = sizeof(utgard_setup_version) - 1;
+
+ control_packet_data = (u8 *)(uintptr_t)args->control_packet_data;
+ MALI_DEBUG_ASSERT_POINTER(control_packet_data);
+ response_packet_data = (u8 *)(uintptr_t)args->response_packet_data;
+ MALI_DEBUG_ASSERT_POINTER(response_packet_data);
+
+ /*Decide if need to ignore Utgard setup version.*/
+ if (control_packet_size >= version_length) {
+ if (0 == memcmp(control_packet_data, utgard_setup_version, version_length)) {
+ if (control_packet_size == version_length) {
+ args->response_packet_size = 0;
+ return _MALI_OSK_ERR_OK;
+ } else {
+ control_packet_data += version_length;
+ control_packet_size -= version_length;
+ }
+ }
+ }
+
+ current_profiling_pid = _mali_osk_get_pid();
+
+ control_type = control_packet_data[0];
+ switch (control_type) {
+ case PACKET_HEADER_COUNTERS_REQUEST: {
+ int i;
+
+ if (PACKET_HEADER_SIZE > control_packet_size ||
+ control_packet_size != _mali_profiling_get_packet_size(control_packet_data + 1)) {
+ MALI_PRINT_ERROR(("Wrong control packet size, type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Send supported counters */
+ if (PACKET_HEADER_SIZE > output_buffer_size)
+ return _MALI_OSK_ERR_FAULT;
+
+ *response_packet_data = PACKET_HEADER_COUNTERS_ACK;
+ args->response_packet_size = PACKET_HEADER_SIZE;
+
+ for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+ u32 name_size = strlen(global_mali_profiling_counters[i].counter_name);
+
+ if ((args->response_packet_size + name_size + 1) > output_buffer_size) {
+ MALI_PRINT_ERROR(("Response packet data is too large..\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ memcpy(response_packet_data + args->response_packet_size,
+ global_mali_profiling_counters[i].counter_name, name_size + 1);
+
+ args->response_packet_size += (name_size + 1);
+
+ if (global_mali_profiling_counters[i].counter_id == COUNTER_VP_ACTIVITY) {
+ args->response_packet_size += _mali_profiling_pack_int(response_packet_data,
+ output_buffer_size, args->response_packet_size, (s32)1);
+ } else if (global_mali_profiling_counters[i].counter_id == COUNTER_FP_ACTIVITY) {
+ args->response_packet_size += _mali_profiling_pack_int(response_packet_data,
+ output_buffer_size, args->response_packet_size, (s32)mali_pp_get_glob_num_pp_cores());
+ } else {
+ args->response_packet_size += _mali_profiling_pack_int(response_packet_data,
+ output_buffer_size, args->response_packet_size, (s32) - 1);
+ }
+ }
+
+ _mali_profiling_set_packet_size(response_packet_data + 1, args->response_packet_size);
+ break;
+ }
+
+ case PACKET_HEADER_COUNTERS_ENABLE: {
+ int i;
+ u32 request_pos = PACKET_HEADER_SIZE;
+ mali_bool sw_counter_if_enabled = MALI_FALSE;
+
+ if (PACKET_HEADER_SIZE > control_packet_size ||
+ control_packet_size != _mali_profiling_get_packet_size(control_packet_data + 1)) {
+ MALI_PRINT_ERROR(("Wrong control packet size , type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Init all counter states before enable requested counters.*/
+ for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+ _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, MALI_HW_CORE_NO_COUNTER);
+ global_mali_profiling_counters[i].enabled = 0;
+ global_mali_profiling_counters[i].prev_counter_value = 0;
+ global_mali_profiling_counters[i].current_counter_value = 0;
+
+ if (global_mali_profiling_counters[i].counter_id >= FIRST_MEM_COUNTER &&
+ global_mali_profiling_counters[i].counter_id <= LAST_MEM_COUNTER) {
+ _mali_profiling_notification_mem_counter(session, global_mali_profiling_counters[i].counter_id, 0, 0);
+ }
+ }
+
+ l2_cache_counter_if_enabled = MALI_FALSE;
+ num_counters_enabled = 0;
+ mem_counters_enabled = 0;
+ _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 0);
+ _mali_profiling_control(SW_COUNTER_ENABLE, 0);
+ _mali_profiling_notification_enable(session, 0, 0);
+
+ /* Enable requested counters */
+ while (request_pos < control_packet_size) {
+ u32 begin = request_pos;
+ u32 event;
+ u32 key;
+
+ /* Check the counter name which should be ended with null */
+ while (request_pos < control_packet_size && control_packet_data[request_pos] != '\0') {
+ ++request_pos;
+ }
+
+ if (request_pos >= control_packet_size)
+ return _MALI_OSK_ERR_FAULT;
+
+ ++request_pos;
+ event = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
+ key = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
+
+ for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+ u32 name_size = strlen((char *)(control_packet_data + begin));
+
+ if (strncmp(global_mali_profiling_counters[i].counter_name, (char *)(control_packet_data + begin), name_size) == 0) {
+ if (!sw_counter_if_enabled && (FIRST_SW_COUNTER <= global_mali_profiling_counters[i].counter_id
+ && global_mali_profiling_counters[i].counter_id <= LAST_SW_COUNTER)) {
+ sw_counter_if_enabled = MALI_TRUE;
+ _mali_profiling_control(SW_COUNTER_ENABLE, 1);
+ }
+
+ if (COUNTER_FILMSTRIP == global_mali_profiling_counters[i].counter_id) {
+ _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 1);
+ _mali_profiling_control(FBDUMP_CONTROL_RATE, event & 0xff);
+ _mali_profiling_control(FBDUMP_CONTROL_RESIZE_FACTOR, (event >> 8) & 0xff);
+ }
+
+ if (global_mali_profiling_counters[i].counter_id >= FIRST_MEM_COUNTER &&
+ global_mali_profiling_counters[i].counter_id <= LAST_MEM_COUNTER) {
+ _mali_profiling_notification_mem_counter(session, global_mali_profiling_counters[i].counter_id,
+ key, 1);
+ mem_counters_enabled++;
+ }
+
+ global_mali_profiling_counters[i].counter_event = event;
+ global_mali_profiling_counters[i].key = key;
+ global_mali_profiling_counters[i].enabled = 1;
+
+ _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id,
+ global_mali_profiling_counters[i].counter_event);
+ num_counters_enabled++;
+ break;
+ }
+ }
+
+ if (i == num_global_mali_profiling_counters) {
+ MALI_PRINT_ERROR(("Counter name does not match for type %u.\n", control_type));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ if (PACKET_HEADER_SIZE <= output_buffer_size) {
+ *response_packet_data = PACKET_HEADER_ACK;
+ _mali_profiling_set_packet_size(response_packet_data + 1, PACKET_HEADER_SIZE);
+ args->response_packet_size = PACKET_HEADER_SIZE;
+ } else {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ break;
+ }
+
+ case PACKET_HEADER_START_CAPTURE_VALUE: {
+ u32 live_rate;
+ u32 request_pos = PACKET_HEADER_SIZE;
+
+ if (PACKET_HEADER_SIZE > control_packet_size ||
+ control_packet_size != _mali_profiling_get_packet_size(control_packet_data + 1)) {
+ MALI_PRINT_ERROR(("Wrong control packet size , type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Read samping rate in nanoseconds and live rate, start capture.*/
+ profiling_sample_rate = _mali_profiling_read_packet_int(control_packet_data,
+ &request_pos, control_packet_size);
+
+ live_rate = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
+
+ if (PACKET_HEADER_SIZE <= output_buffer_size) {
+ *response_packet_data = PACKET_HEADER_ACK;
+ _mali_profiling_set_packet_size(response_packet_data + 1, PACKET_HEADER_SIZE);
+ args->response_packet_size = PACKET_HEADER_SIZE;
+ } else {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (0 != num_counters_enabled && 0 != profiling_sample_rate) {
+ _mali_profiling_global_stream_list_free();
+ if (mem_counters_enabled > 0) {
+ _mali_profiling_notification_enable(session, profiling_sample_rate, 1);
+ }
+ hrtimer_start(&profiling_sampling_timer,
+ ktime_set(profiling_sample_rate / 1000000000, profiling_sample_rate % 1000000000),
+ HRTIMER_MODE_REL_PINNED);
+ }
+
+ break;
+ }
+ default:
+ MALI_PRINT_ERROR(("Unsupported profiling packet header type %u.\n", control_type));
+ args->response_packet_size = 0;
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ _mali_osk_profiling_stop_sampling(current_profiling_pid);
+ _mali_profiling_notification_enable(session, 0, 0);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Called by gator.ko to set HW counters
+ *
+ * @param counter_id The counter ID.
+ * @param event_id Event ID that the counter should count (HW counter value from TRM).
+ *
+ * @return 1 on success, 0 on failure.
+ */
+int _mali_profiling_set_event(u32 counter_id, s32 event_id)
+{
+ if (COUNTER_VP_0_C0 == counter_id) {
+ mali_gp_job_set_gp_counter_src0(event_id);
+ } else if (COUNTER_VP_0_C1 == counter_id) {
+ mali_gp_job_set_gp_counter_src1(event_id);
+ } else if (COUNTER_FP_0_C0 <= counter_id && COUNTER_FP_7_C1 >= counter_id) {
+ /*
+ * Two compatibility notes for this function:
+ *
+ * 1) Previously the DDK allowed per core counters.
+ *
+ * This did not make much sense on Mali-450 with the "virtual PP core" concept,
+ * so this option was removed, and only the same pair of HW counters was allowed on all cores,
+ * beginning with r3p2 release.
+ *
+ * Starting with r4p0, it is now possible to set different HW counters for the different sub jobs.
+ * This should be almost the same, since sub job 0 is designed to run on core 0,
+ * sub job 1 on core 1, and so on.
+ *
+ * The scheduling of PP sub jobs is not predictable, and this often led to situations where core 0 ran 2
+ * sub jobs, while for instance core 1 ran zero. Having the counters set per sub job would thus increase
+ * the predictability of the returned data (as you would be guaranteed data for all the selected HW counters).
+ *
+ * PS: Core scaling needs to be disabled in order to use this reliably (goes for both solutions).
+ *
+ * The framework/#defines with Gator still indicates that the counter is for a particular core,
+ * but this is internally used as a sub job ID instead (no translation needed).
+ *
+ * 2) Global/default vs per sub job counters
+ *
+ * Releases before r3p2 had only per PP core counters.
+ * r3p2 releases had only one set of default/global counters which applied to all PP cores
+ * Starting with r4p0, we have both a set of default/global counters,
+ * and individual counters per sub job (equal to per core).
+ *
+ * To keep compatibility with Gator/DS-5/streamline, the following scheme is used:
+ *
+ * r3p2 release; only counters set for core 0 is handled,
+ * this is applied as the default/global set of counters, and will thus affect all cores.
+ *
+ * r4p0 release; counters set for core 0 is applied as both the global/default set of counters,
+ * and counters for sub job 0.
+ * Counters set for core 1-7 is only applied for the corresponding sub job.
+ *
+ * This should allow the DS-5/Streamline GUI to have a simple mode where it only allows setting the
+ * values for core 0, and thus this will be applied to all PP sub jobs/cores.
+ * Advanced mode will also be supported, where individual pairs of HW counters can be selected.
+ *
+ * The GUI will (until it is updated) still refer to cores instead of sub jobs, but this is probably
+ * something we can live with!
+ *
+ * Mali-450 note: Each job is not divided into a deterministic number of sub jobs, as the HW DLBU
+ * automatically distributes the load between whatever number of cores is available at this particular time.
+ * A normal PP job on Mali-450 is thus considered a single (virtual) job, and it will thus only be possible
+ * to use a single pair of HW counters (even if the job ran on multiple PP cores).
+ * In other words, only the global/default pair of PP HW counters will be used for normal Mali-450 jobs.
+ */
+ u32 sub_job = (counter_id - COUNTER_FP_0_C0) >> 1;
+ u32 counter_src = (counter_id - COUNTER_FP_0_C0) & 1;
+ if (0 == counter_src) {
+ mali_pp_job_set_pp_counter_sub_job_src0(sub_job, event_id);
+ if (0 == sub_job) {
+ mali_pp_job_set_pp_counter_global_src0(event_id);
+ }
+ } else {
+ mali_pp_job_set_pp_counter_sub_job_src1(sub_job, event_id);
+ if (0 == sub_job) {
+ mali_pp_job_set_pp_counter_global_src1(event_id);
+ }
+ }
+ } else if (COUNTER_L2_0_C0 <= counter_id && COUNTER_L2_2_C1 >= counter_id) {
+ u32 core_id = (counter_id - COUNTER_L2_0_C0) >> 1;
+ struct mali_l2_cache_core *l2_cache_core = mali_l2_cache_core_get_glob_l2_core(core_id);
+
+ if (NULL != l2_cache_core) {
+ u32 counter_src = (counter_id - COUNTER_L2_0_C0) & 1;
+ mali_l2_cache_core_set_counter_src(l2_cache_core,
+ counter_src, event_id);
+ l2_cache_counter_if_enabled = MALI_TRUE;
+ }
+ } else {
+ return 0; /* Failure, unknown event */
+ }
+
+ return 1; /* success */
+}
+
+/**
+ * Called by gator.ko to retrieve the L2 cache counter values for all L2 cache cores.
+ * The L2 cache counters are unique in that they are polled by gator, rather than being
+ * transmitted via the tracepoint mechanism.
+ *
+ * @param values Pointer to a _mali_profiling_l2_counter_values structure where
+ * the counter sources and values will be output
+ * @return 0 if all went well; otherwise, return the mask with the bits set for the powered off cores
+ */
+u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values)
+{
+ u32 l2_cores_num = mali_l2_cache_core_get_glob_num_l2_cores();
+ u32 i;
+
+ MALI_DEBUG_ASSERT(l2_cores_num <= 3);
+
+ for (i = 0; i < l2_cores_num; i++) {
+ struct mali_l2_cache_core *l2_cache = mali_l2_cache_core_get_glob_l2_core(i);
+
+ if (NULL == l2_cache) {
+ continue;
+ }
+
+ mali_l2_cache_core_get_counter_values(l2_cache,
+ &values->cores[i].source0,
+ &values->cores[i].value0,
+ &values->cores[i].source1,
+ &values->cores[i].value1);
+ }
+
+ return 0;
+}
+
+/**
+ * Called by gator to control the production of profiling information at runtime.
+ */
+void _mali_profiling_control(u32 action, u32 value)
+{
+ switch (action) {
+ case FBDUMP_CONTROL_ENABLE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED, (value == 0 ? MALI_FALSE : MALI_TRUE));
+ break;
+ case FBDUMP_CONTROL_RATE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES, value);
+ break;
+ case SW_COUNTER_ENABLE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_COUNTER_ENABLED, value);
+ break;
+ case FBDUMP_CONTROL_RESIZE_FACTOR:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR, value);
+ break;
+ default:
+ break; /* Ignore unimplemented actions */
+ }
+}
+
+/**
+ * Called by gator to get mali api version.
+ */
+u32 _mali_profiling_get_api_version(void)
+{
+ return MALI_PROFILING_API_VERSION;
+}
+
+/**
+* Called by gator to get the data about Mali instance in use:
+* product id, version, number of cores
+*/
+void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values)
+{
+ values->mali_product_id = (u32)mali_kernel_core_get_product_id();
+ values->mali_version_major = mali_kernel_core_get_gpu_major_version();
+ values->mali_version_minor = mali_kernel_core_get_gpu_minor_version();
+ values->num_of_l2_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+ values->num_of_fp_cores = mali_executor_get_num_cores_total();
+ values->num_of_vp_cores = 1;
+}
+
+
+EXPORT_SYMBOL(_mali_profiling_set_event);
+EXPORT_SYMBOL(_mali_profiling_get_l2_counters);
+EXPORT_SYMBOL(_mali_profiling_control);
+EXPORT_SYMBOL(_mali_profiling_get_api_version);
+EXPORT_SYMBOL(_mali_profiling_get_mali_version);
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_specific.h b/drivers/gpu/arm/utgard/linux/mali_osk_specific.h
new file mode 100644
index 000000000000..19e33f236359
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_specific.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010, 2012-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_SPECIFIC_H__
+#define __MALI_OSK_SPECIFIC_H__
+
+#include <asm/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+#include <linux/hardirq.h>
+
+
+#include "mali_osk_types.h"
+#include "mali_kernel_linux.h"
+
+#define MALI_STATIC_INLINE static inline
+#define MALI_NON_STATIC_INLINE inline
+
+typedef struct dma_pool *mali_dma_pool;
+
+typedef u32 mali_dma_addr;
+
+#if MALI_ENABLE_CPU_CYCLES
+/* Reads out the clock cycle performance counter of the current cpu.
+ It is useful for cost-free (2 cycle) measuring of the time spent
+ in a code path. Sample before and after, the diff number of cycles.
+ When the CPU is idle it will not increase this clock counter.
+ It means that the counter is accurate if only spin-locks are used,
+ but mutexes may lead to too low values since the cpu might "idle"
+ waiting for the mutex to become available.
+ The clock source is configured on the CPU during mali module load,
+ but will not give useful output after a CPU has been power cycled.
+ It is therefore important to configure the system to not turn of
+ the cpu cores when using this functionallity.*/
+static inline unsigned int mali_get_cpu_cyclecount(void)
+{
+ unsigned int value;
+ /* Reading the CCNT Register - CPU clock counter */
+ asm volatile("MRC p15, 0, %0, c9, c13, 0\t\n": "=r"(value));
+ return value;
+}
+
+void mali_init_cpu_time_counters(int reset, int enable_divide_by_64);
+#endif
+
+
+MALI_STATIC_INLINE u32 _mali_osk_copy_from_user(void *to, void *from, u32 n)
+{
+ return (u32)copy_from_user(to, from, (unsigned long)n);
+}
+
+MALI_STATIC_INLINE mali_bool _mali_osk_in_atomic(void)
+{
+ return in_atomic();
+}
+
+#define _mali_osk_put_user(x, ptr) put_user(x, ptr)
+
+#endif /* __MALI_OSK_SPECIFIC_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_time.c b/drivers/gpu/arm/utgard/linux/mali_osk_time.c
new file mode 100644
index 000000000000..34481e3bad20
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_time.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2010, 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_time.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <asm/delay.h>
+
+mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb)
+{
+ return time_after_eq(ticka, tickb) ?
+ MALI_TRUE : MALI_FALSE;
+}
+
+unsigned long _mali_osk_time_mstoticks(u32 ms)
+{
+ return msecs_to_jiffies(ms);
+}
+
+u32 _mali_osk_time_tickstoms(unsigned long ticks)
+{
+ return jiffies_to_msecs(ticks);
+}
+
+unsigned long _mali_osk_time_tickcount(void)
+{
+ return jiffies;
+}
+
+void _mali_osk_time_ubusydelay(u32 usecs)
+{
+ udelay(usecs);
+}
+
+u64 _mali_osk_time_get_ns(void)
+{
+ struct timespec tsval;
+ getnstimeofday(&tsval);
+ return (u64)timespec_to_ns(&tsval);
+}
+
+u64 _mali_osk_boot_time_get_ns(void)
+{
+ struct timespec tsval;
+ get_monotonic_boottime(&tsval);
+ return (u64)timespec_to_ns(&tsval);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_timers.c b/drivers/gpu/arm/utgard/linux/mali_osk_timers.c
new file mode 100644
index 000000000000..7e592b7e6225
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_timers.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_timers.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_timer_t_struct {
+ struct timer_list timer;
+};
+
+typedef void (*timer_timeout_function_t)(unsigned long);
+
+_mali_osk_timer_t *_mali_osk_timer_init(void)
+{
+ _mali_osk_timer_t *t = (_mali_osk_timer_t *)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+ if (NULL != t) init_timer(&t->timer);
+ return t;
+}
+
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.expires = jiffies + ticks_to_expire;
+ add_timer(&(tim->timer));
+}
+
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ mod_timer(&(tim->timer), jiffies + ticks_to_expire);
+}
+
+void _mali_osk_timer_del(_mali_osk_timer_t *tim)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ del_timer_sync(&(tim->timer));
+}
+
+void _mali_osk_timer_del_async(_mali_osk_timer_t *tim)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ del_timer(&(tim->timer));
+}
+
+mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ return 1 == timer_pending(&(tim->timer));
+}
+
+void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.data = (unsigned long)data;
+ tim->timer.function = (timer_timeout_function_t)callback;
+}
+
+void _mali_osk_timer_term(_mali_osk_timer_t *tim)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ kfree(tim);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_wait_queue.c b/drivers/gpu/arm/utgard/linux/mali_osk_wait_queue.c
new file mode 100644
index 000000000000..fa12abd3f5dc
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_wait_queue.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2012-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wait_queue.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_wait_queue_t_struct {
+ wait_queue_head_t wait_queue;
+};
+
+_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void)
+{
+ _mali_osk_wait_queue_t *ret = NULL;
+
+ ret = kmalloc(sizeof(_mali_osk_wait_queue_t), GFP_KERNEL);
+
+ if (NULL == ret) {
+ return ret;
+ }
+
+ init_waitqueue_head(&ret->wait_queue);
+ MALI_DEBUG_ASSERT(!waitqueue_active(&ret->wait_queue));
+
+ return ret;
+}
+
+void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data)
+{
+ MALI_DEBUG_ASSERT_POINTER(queue);
+ MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
+ wait_event(queue->wait_queue, condition(data));
+}
+
+void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout)
+{
+ MALI_DEBUG_ASSERT_POINTER(queue);
+ MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
+ wait_event_timeout(queue->wait_queue, condition(data), _mali_osk_time_mstoticks(timeout));
+}
+
+void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue)
+{
+ MALI_DEBUG_ASSERT_POINTER(queue);
+
+ /* if queue is empty, don't attempt to wake up its elements */
+ if (!waitqueue_active(&queue->wait_queue)) return;
+
+ MALI_DEBUG_PRINT(6, ("Waking up elements in wait queue %p ....\n", queue));
+
+ wake_up_all(&queue->wait_queue);
+
+ MALI_DEBUG_PRINT(6, ("... elements in wait queue %p woken up\n", queue));
+}
+
+void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue)
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER(queue);
+
+ /* Linux requires no explicit termination of wait queues */
+ kfree(queue);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_wq.c b/drivers/gpu/arm/utgard/linux/mali_osk_wq.c
new file mode 100644
index 000000000000..d5e258a83a29
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_wq.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+#include <linux/workqueue.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_linux.h"
+
+typedef struct _mali_osk_wq_work_s {
+ _mali_osk_wq_work_handler_t handler;
+ void *data;
+ mali_bool high_pri;
+ struct work_struct work_handle;
+} mali_osk_wq_work_object_t;
+
+typedef struct _mali_osk_wq_delayed_work_s {
+ _mali_osk_wq_work_handler_t handler;
+ void *data;
+ struct delayed_work work;
+} mali_osk_wq_delayed_work_object_t;
+
+#if MALI_LICENSE_IS_GPL
+static struct workqueue_struct *mali_wq_normal = NULL;
+static struct workqueue_struct *mali_wq_high = NULL;
+#endif
+
+static void _mali_osk_wq_work_func(struct work_struct *work);
+
+_mali_osk_errcode_t _mali_osk_wq_init(void)
+{
+#if MALI_LICENSE_IS_GPL
+ MALI_DEBUG_ASSERT(NULL == mali_wq_normal);
+ MALI_DEBUG_ASSERT(NULL == mali_wq_high);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+ mali_wq_normal = alloc_workqueue("mali", WQ_UNBOUND, 0);
+ mali_wq_high = alloc_workqueue("mali_high_pri", WQ_HIGHPRI | WQ_UNBOUND, 0);
+#else
+ mali_wq_normal = create_workqueue("mali");
+ mali_wq_high = create_workqueue("mali_high_pri");
+#endif
+ if (NULL == mali_wq_normal || NULL == mali_wq_high) {
+ MALI_PRINT_ERROR(("Unable to create Mali workqueues\n"));
+
+ if (mali_wq_normal) destroy_workqueue(mali_wq_normal);
+ if (mali_wq_high) destroy_workqueue(mali_wq_high);
+
+ mali_wq_normal = NULL;
+ mali_wq_high = NULL;
+
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif /* MALI_LICENSE_IS_GPL */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_wq_flush(void)
+{
+#if MALI_LICENSE_IS_GPL
+ flush_workqueue(mali_wq_high);
+ flush_workqueue(mali_wq_normal);
+#else
+ flush_scheduled_work();
+#endif
+}
+
+void _mali_osk_wq_term(void)
+{
+#if MALI_LICENSE_IS_GPL
+ MALI_DEBUG_ASSERT(NULL != mali_wq_normal);
+ MALI_DEBUG_ASSERT(NULL != mali_wq_high);
+
+ flush_workqueue(mali_wq_normal);
+ destroy_workqueue(mali_wq_normal);
+
+ flush_workqueue(mali_wq_high);
+ destroy_workqueue(mali_wq_high);
+
+ mali_wq_normal = NULL;
+ mali_wq_high = NULL;
+#else
+ flush_scheduled_work();
+#endif
+}
+
+_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data)
+{
+ mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
+
+ if (NULL == work) return NULL;
+
+ work->handler = handler;
+ work->data = data;
+ work->high_pri = MALI_FALSE;
+
+ INIT_WORK(&work->work_handle, _mali_osk_wq_work_func);
+
+ return work;
+}
+
+_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data)
+{
+ mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
+
+ if (NULL == work) return NULL;
+
+ work->handler = handler;
+ work->data = data;
+ work->high_pri = MALI_TRUE;
+
+ INIT_WORK(&work->work_handle, _mali_osk_wq_work_func);
+
+ return work;
+}
+
+void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work)
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+ _mali_osk_wq_flush();
+ kfree(work_object);
+}
+
+void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work)
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+ kfree(work_object);
+}
+
+void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work)
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+#if MALI_LICENSE_IS_GPL
+ queue_work(mali_wq_normal, &work_object->work_handle);
+#else
+ schedule_work(&work_object->work_handle);
+#endif
+}
+
+void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work)
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+#if MALI_LICENSE_IS_GPL
+ queue_work(mali_wq_high, &work_object->work_handle);
+#else
+ schedule_work(&work_object->work_handle);
+#endif
+}
+
+static void _mali_osk_wq_work_func(struct work_struct *work)
+{
+ mali_osk_wq_work_object_t *work_object;
+
+ work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_work_object_t, work_handle);
+
+#if MALI_LICENSE_IS_GPL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+ /* We want highest Dynamic priority of the thread so that the Jobs depending
+ ** on this thread could be scheduled in time. Without this, this thread might
+ ** sometimes need to wait for some threads in user mode to finish its round-robin
+ ** time, causing *bubble* in the Mali pipeline. Thanks to the new implementation
+ ** of high-priority workqueue in new kernel, this only happens in older kernel.
+ */
+ if (MALI_TRUE == work_object->high_pri) {
+ set_user_nice(current, -19);
+ }
+#endif
+#endif /* MALI_LICENSE_IS_GPL */
+
+ work_object->handler(work_object->data);
+}
+
+static void _mali_osk_wq_delayed_work_func(struct work_struct *work)
+{
+ mali_osk_wq_delayed_work_object_t *work_object;
+
+ work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_delayed_work_object_t, work.work);
+ work_object->handler(work_object->data);
+}
+
+mali_osk_wq_delayed_work_object_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data)
+{
+ mali_osk_wq_delayed_work_object_t *work = kmalloc(sizeof(mali_osk_wq_delayed_work_object_t), GFP_KERNEL);
+
+ if (NULL == work) return NULL;
+
+ work->handler = handler;
+ work->data = data;
+
+ INIT_DELAYED_WORK(&work->work, _mali_osk_wq_delayed_work_func);
+
+ return work;
+}
+
+void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work)
+{
+ mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+ kfree(work_object);
+}
+
+void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work)
+{
+ mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+ cancel_delayed_work(&work_object->work);
+}
+
+void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work)
+{
+ mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+ cancel_delayed_work_sync(&work_object->work);
+}
+
+void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay)
+{
+ mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+
+#if MALI_LICENSE_IS_GPL
+ queue_delayed_work(mali_wq_normal, &work_object->work, delay);
+#else
+ schedule_delayed_work(&work_object->work, delay);
+#endif
+
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_pmu_power_up_down.c b/drivers/gpu/arm/utgard/linux/mali_pmu_power_up_down.c
new file mode 100644
index 000000000000..931d7f07a1d2
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_pmu_power_up_down.c
@@ -0,0 +1,23 @@
+/**
+ * Copyright (C) 2010, 2012-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu_power_up_down.c
+ */
+
+#include <linux/module.h>
+#include "mali_executor.h"
+
+int mali_perf_set_num_pp_cores(unsigned int num_cores)
+{
+ return mali_executor_set_perf_level(num_cores, MALI_FALSE);
+}
+
+EXPORT_SYMBOL(mali_perf_set_num_pp_cores);
diff --git a/drivers/gpu/arm/utgard/linux/mali_profiling_events.h b/drivers/gpu/arm/utgard/linux/mali_profiling_events.h
new file mode 100644
index 000000000000..4661cac42b3f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_profiling_events.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_EVENTS_H__
+#define __MALI_PROFILING_EVENTS_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_events.h>
+
+#endif /* __MALI_PROFILING_EVENTS_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_profiling_gator_api.h b/drivers/gpu/arm/utgard/linux/mali_profiling_gator_api.h
new file mode 100644
index 000000000000..6fdaa427c4cf
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_profiling_gator_api.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012-2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_GATOR_API_H__
+#define __MALI_PROFILING_GATOR_API_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_gator_api.h>
+
+#endif /* __MALI_PROFILING_GATOR_API_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_profiling_internal.c b/drivers/gpu/arm/utgard/linux/mali_profiling_internal.c
new file mode 100644
index 000000000000..c3a526f0ad90
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_profiling_internal.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_timestamp.h"
+#include "mali_osk_profiling.h"
+#include "mali_user_settings_db.h"
+#include "mali_profiling_internal.h"
+
+typedef struct mali_profiling_entry {
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+} mali_profiling_entry;
+
+typedef enum mali_profiling_state {
+ MALI_PROFILING_STATE_UNINITIALIZED,
+ MALI_PROFILING_STATE_IDLE,
+ MALI_PROFILING_STATE_RUNNING,
+ MALI_PROFILING_STATE_RETURN,
+} mali_profiling_state;
+
+static _mali_osk_mutex_t *lock = NULL;
+static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+static mali_profiling_entry *profile_entries = NULL;
+static _mali_osk_atomic_t profile_insert_index;
+static u32 profile_mask = 0;
+
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+void probe_mali_timeline_event(void *data, TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, unsigned
+ int d2, unsigned int d3, unsigned int d4))
+{
+ add_event(event_id, d0, d1, d2, d3, d4);
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_init(mali_bool auto_start)
+{
+ profile_entries = NULL;
+ profile_mask = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+
+ lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PROFILING);
+ if (NULL == lock) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+
+ if (MALI_TRUE == auto_start) {
+ u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */
+
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit)) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_internal_profiling_term(void)
+{
+ u32 count;
+
+ /* Ensure profiling is stopped */
+ _mali_internal_profiling_stop(&count);
+
+ prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+
+ if (NULL != profile_entries) {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ if (NULL != lock) {
+ _mali_osk_mutex_term(lock);
+ lock = NULL;
+ }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_start(u32 *limit)
+{
+ _mali_osk_errcode_t ret;
+ mali_profiling_entry *new_profile_entries;
+
+ _mali_osk_mutex_wait(lock);
+
+ if (MALI_PROFILING_STATE_RUNNING == prof_state) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_BUSY;
+ }
+
+ new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry));
+
+ if (NULL == new_profile_entries) {
+ _mali_osk_mutex_signal(lock);
+ _mali_osk_vfree(new_profile_entries);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (MALI_PROFILING_MAX_BUFFER_ENTRIES < *limit) {
+ *limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
+ }
+
+ profile_mask = 1;
+ while (profile_mask <= *limit) {
+ profile_mask <<= 1;
+ }
+ profile_mask >>= 1;
+
+ *limit = profile_mask;
+
+ profile_mask--; /* turns the power of two into a mask of one less */
+
+ if (MALI_PROFILING_STATE_IDLE != prof_state) {
+ _mali_osk_mutex_signal(lock);
+ _mali_osk_vfree(new_profile_entries);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ profile_entries = new_profile_entries;
+
+ ret = _mali_timestamp_reset();
+
+ if (_MALI_OSK_ERR_OK == ret) {
+ prof_state = MALI_PROFILING_STATE_RUNNING;
+ } else {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ register_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+ _mali_osk_mutex_signal(lock);
+ return ret;
+}
+
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+ u32 cur_index = (_mali_osk_atomic_inc_return(&profile_insert_index) - 1) & profile_mask;
+
+ profile_entries[cur_index].timestamp = _mali_timestamp_get();
+ profile_entries[cur_index].event_id = event_id;
+ profile_entries[cur_index].data[0] = data0;
+ profile_entries[cur_index].data[1] = data1;
+ profile_entries[cur_index].data[2] = data2;
+ profile_entries[cur_index].data[3] = data3;
+ profile_entries[cur_index].data[4] = data4;
+
+ /* If event is "leave API function", add current memory usage to the event
+ * as data point 4. This is used in timeline profiling to indicate how
+ * much memory was used when leaving a function. */
+ if (event_id == (MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC)) {
+ profile_entries[cur_index].data[4] = _mali_ukk_report_memory_usage();
+ }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_stop(u32 *count)
+{
+ _mali_osk_mutex_wait(lock);
+
+ if (MALI_PROFILING_STATE_RUNNING != prof_state) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ /* go into return state (user to retreive events), no more events will be added after this */
+ prof_state = MALI_PROFILING_STATE_RETURN;
+
+ unregister_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+ _mali_osk_mutex_signal(lock);
+
+ tracepoint_synchronize_unregister();
+
+ *count = _mali_osk_atomic_read(&profile_insert_index);
+ if (*count > profile_mask) *count = profile_mask;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_internal_profiling_get_count(void)
+{
+ u32 retval = 0;
+
+ _mali_osk_mutex_wait(lock);
+ if (MALI_PROFILING_STATE_RETURN == prof_state) {
+ retval = _mali_osk_atomic_read(&profile_insert_index);
+ if (retval > profile_mask) retval = profile_mask;
+ }
+ _mali_osk_mutex_signal(lock);
+
+ return retval;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5])
+{
+ u32 raw_index = _mali_osk_atomic_read(&profile_insert_index);
+
+ _mali_osk_mutex_wait(lock);
+
+ if (index < profile_mask) {
+ if ((raw_index & ~profile_mask) != 0) {
+ index += raw_index;
+ index &= profile_mask;
+ }
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if (index >= raw_index) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ *timestamp = profile_entries[index].timestamp;
+ *event_id = profile_entries[index].event_id;
+ data[0] = profile_entries[index].data[0];
+ data[1] = profile_entries[index].data[1];
+ data[2] = profile_entries[index].data[2];
+ data[3] = profile_entries[index].data[3];
+ data[4] = profile_entries[index].data[4];
+ } else {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_clear(void)
+{
+ _mali_osk_mutex_wait(lock);
+
+ if (MALI_PROFILING_STATE_RETURN != prof_state) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+ profile_mask = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+
+ if (NULL != profile_entries) {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_internal_profiling_is_recording(void)
+{
+ return prof_state == MALI_PROFILING_STATE_RUNNING ? MALI_TRUE : MALI_FALSE;
+}
+
+mali_bool _mali_internal_profiling_have_recording(void)
+{
+ return prof_state == MALI_PROFILING_STATE_RETURN ? MALI_TRUE : MALI_FALSE;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_profiling_internal.h b/drivers/gpu/arm/utgard/linux/mali_profiling_internal.h
new file mode 100644
index 000000000000..f17b4583307a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_profiling_internal.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2012-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_INTERNAL_H__
+#define __MALI_PROFILING_INTERNAL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_osk.h"
+
+int _mali_internal_profiling_init(mali_bool auto_start);
+void _mali_internal_profiling_term(void);
+
+mali_bool _mali_internal_profiling_is_recording(void);
+mali_bool _mali_internal_profiling_have_recording(void);
+_mali_osk_errcode_t _mali_internal_profiling_clear(void);
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]);
+u32 _mali_internal_profiling_get_count(void);
+int _mali_internal_profiling_stop(u32 *count);
+int _mali_internal_profiling_start(u32 *limit);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PROFILING_INTERNAL_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_sync.c b/drivers/gpu/arm/utgard/linux/mali_sync.c
new file mode 100644
index 000000000000..5fab97486ea2
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_sync.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_sync.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_timeline.h"
+#include "mali_executor.h"
+
+#include <linux/file.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+
+struct mali_sync_pt {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_pt sync_pt;
+#else
+ struct mali_internal_sync_point sync_pt;
+#endif
+ struct mali_sync_flag *flag;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_timeline *sync_tl; /**< Sync timeline this pt is connected to. */
+#else
+ struct mali_internal_sync_timeline *sync_tl; /**< Sync timeline this pt is connected to. */
+#endif
+};
+
+/**
+ * The sync flag is used to connect sync fences to the Mali Timeline system. Sync fences can be
+ * created from a sync flag, and when the flag is signaled, the sync fences will also be signaled.
+ */
+struct mali_sync_flag {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_timeline *sync_tl; /**< Sync timeline this flag is connected to. */
+#else
+ struct mali_internal_sync_timeline *sync_tl; /**< Sync timeline this flag is connected to. */
+#endif
+ u32 point; /**< Point on timeline. */
+ int status; /**< 0 if unsignaled, 1 if signaled without error or negative if signaled with error. */
+ struct kref refcount; /**< Reference count. */
+};
+
+/**
+ * Mali sync timeline is used to connect mali timeline to sync_timeline.
+ * When fence timeout can print more detailed mali timeline system info.
+ */
+struct mali_sync_timeline_container {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_timeline sync_timeline;
+#else
+ struct mali_internal_sync_timeline sync_timeline;
+#endif
+ struct mali_timeline *timeline;
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+MALI_STATIC_INLINE struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
+#else
+MALI_STATIC_INLINE struct mali_sync_pt *to_mali_sync_pt(struct mali_internal_sync_point *pt)
+#endif
+{
+ return container_of(pt, struct mali_sync_pt, sync_pt);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+MALI_STATIC_INLINE struct mali_sync_timeline_container *to_mali_sync_tl_container(struct sync_timeline *sync_tl)
+#else
+MALI_STATIC_INLINE struct mali_sync_timeline_container *to_mali_sync_tl_container(struct mali_internal_sync_timeline *sync_tl)
+#endif
+{
+ return container_of(sync_tl, struct mali_sync_timeline_container, sync_timeline);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static int timeline_has_signaled(struct sync_pt *pt)
+#else
+static int timeline_has_signaled(struct mali_internal_sync_point *pt)
+#endif
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(pt);
+ mpt = to_mali_sync_pt(pt);
+
+ MALI_DEBUG_ASSERT_POINTER(mpt->flag);
+
+ return mpt->flag->status;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static void timeline_free_pt(struct sync_pt *pt)
+#else
+static void timeline_free_pt(struct mali_internal_sync_point *pt)
+#endif
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(pt);
+ mpt = to_mali_sync_pt(pt);
+
+ mali_sync_flag_put(mpt->flag);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static void timeline_release(struct sync_timeline *sync_timeline)
+#else
+static void timeline_release(struct mali_internal_sync_timeline *sync_timeline)
+#endif
+{
+ struct mali_sync_timeline_container *mali_sync_tl = NULL;
+ struct mali_timeline *mali_tl = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_timeline);
+
+ mali_sync_tl = to_mali_sync_tl_container(sync_timeline);
+ MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+ mali_tl = mali_sync_tl->timeline;
+
+ /* always signaled timeline didn't have mali container */
+ if (mali_tl) {
+ if (NULL != mali_tl->spinlock) {
+ mali_spinlock_reentrant_term(mali_tl->spinlock);
+ }
+ _mali_osk_free(mali_tl);
+ }
+
+ module_put(THIS_MODULE);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static struct sync_pt *timeline_dup(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt, *new_mpt;
+ struct sync_pt *new_pt;
+ MALI_DEBUG_ASSERT_POINTER(pt);
+
+ mpt = to_mali_sync_pt(pt);
+
+ new_pt = sync_pt_create(mpt->sync_tl, sizeof(struct mali_sync_pt));
+ if (NULL == new_pt) return NULL;
+
+ new_mpt = to_mali_sync_pt(new_pt);
+
+ mali_sync_flag_get(mpt->flag);
+ new_mpt->flag = mpt->flag;
+ new_mpt->sync_tl = mpt->sync_tl;
+
+ return new_pt;
+}
+
+static int timeline_compare(struct sync_pt *pta, struct sync_pt *ptb)
+{
+ struct mali_sync_pt *mpta;
+ struct mali_sync_pt *mptb;
+ u32 a, b;
+
+ MALI_DEBUG_ASSERT_POINTER(pta);
+ MALI_DEBUG_ASSERT_POINTER(ptb);
+ mpta = to_mali_sync_pt(pta);
+ mptb = to_mali_sync_pt(ptb);
+
+ MALI_DEBUG_ASSERT_POINTER(mpta->flag);
+ MALI_DEBUG_ASSERT_POINTER(mptb->flag);
+
+ a = mpta->flag->point;
+ b = mptb->flag->point;
+
+ if (a == b) return 0;
+
+ return ((b - a) < (a - b) ? -1 : 1);
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+static void timeline_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(s);
+ MALI_DEBUG_ASSERT_POINTER(sync_pt);
+
+ mpt = to_mali_sync_pt(sync_pt);
+
+ /* It is possible this sync point is just under construct,
+ * make sure the flag is valid before accessing it
+ */
+ if (mpt->flag) {
+ seq_printf(s, "%u", mpt->flag->point);
+ } else {
+ seq_printf(s, "uninitialized");
+ }
+}
+
+static void timeline_print_obj(struct seq_file *s, struct sync_timeline *sync_tl)
+{
+ struct mali_sync_timeline_container *mali_sync_tl = NULL;
+ struct mali_timeline *mali_tl = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_tl);
+
+ mali_sync_tl = to_mali_sync_tl_container(sync_tl);
+ MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+ mali_tl = mali_sync_tl->timeline;
+
+ if (NULL != mali_tl) {
+ seq_printf(s, "oldest (%u) ", mali_tl->point_oldest);
+ seq_printf(s, "next (%u)", mali_tl->point_next);
+ seq_printf(s, "\n");
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+ {
+ u32 tid = _mali_osk_get_tid();
+ struct mali_timeline_system *system = mali_tl->system;
+
+ mali_spinlock_reentrant_wait(mali_tl->spinlock, tid);
+ if (!mali_tl->destroyed) {
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ mali_timeline_debug_print_timeline(mali_tl, s);
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ }
+ mali_spinlock_reentrant_signal(mali_tl->spinlock, tid);
+
+ /* dump job queue status and group running status */
+ mali_executor_status_dump();
+ }
+#endif
+ }
+}
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static void timeline_pt_value_str(struct sync_pt *pt, char *str, int size)
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(str);
+ MALI_DEBUG_ASSERT_POINTER(pt);
+
+ mpt = to_mali_sync_pt(pt);
+
+ /* It is possible this sync point is just under construct,
+ * make sure the flag is valid before accessing it
+ */
+ if (mpt->flag) {
+ _mali_osk_snprintf(str, size, "%u", mpt->flag->point);
+ } else {
+ _mali_osk_snprintf(str, size, "uninitialized");
+ }
+}
+
+static void timeline_value_str(struct sync_timeline *timeline, char *str, int size)
+{
+ struct mali_sync_timeline_container *mali_sync_tl = NULL;
+ struct mali_timeline *mali_tl = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ mali_sync_tl = to_mali_sync_tl_container(timeline);
+ MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+ mali_tl = mali_sync_tl->timeline;
+
+ if (NULL != mali_tl) {
+ _mali_osk_snprintf(str, size, "oldest (%u) ", mali_tl->point_oldest);
+ _mali_osk_snprintf(str, size, "next (%u)", mali_tl->point_next);
+ _mali_osk_snprintf(str, size, "\n");
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+ {
+ u32 tid = _mali_osk_get_tid();
+ struct mali_timeline_system *system = mali_tl->system;
+
+ mali_spinlock_reentrant_wait(mali_tl->spinlock, tid);
+ if (!mali_tl->destroyed) {
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ mali_timeline_debug_direct_print_timeline(mali_tl);
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ }
+ mali_spinlock_reentrant_signal(mali_tl->spinlock, tid);
+
+ /* dump job queue status and group running status */
+ mali_executor_status_dump();
+ }
+#endif
+ }
+}
+#else
+static void timeline_print_sync_pt(struct mali_internal_sync_point *sync_pt)
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_pt);
+
+ mpt = to_mali_sync_pt(sync_pt);
+
+ if (mpt->flag) {
+ MALI_DEBUG_PRINT(2, ("mali_internal_sync_pt: %u\n", mpt->flag->point));
+ } else {
+ MALI_DEBUG_PRINT(2, ("uninitialized\n", mpt->flag->point));
+ }
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static struct sync_timeline_ops mali_timeline_ops = {
+ .driver_name = "Mali",
+ .dup = timeline_dup,
+ .has_signaled = timeline_has_signaled,
+ .compare = timeline_compare,
+ .free_pt = timeline_free_pt,
+ .release_obj = timeline_release,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ .print_pt = timeline_print_pt,
+ .print_obj = timeline_print_obj,
+#else
+ .pt_value_str = timeline_pt_value_str,
+ .timeline_value_str = timeline_value_str,
+#endif
+};
+
+struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name)
+{
+ struct sync_timeline *sync_tl;
+ struct mali_sync_timeline_container *mali_sync_tl;
+
+ sync_tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct mali_sync_timeline_container), name);
+ if (NULL == sync_tl) return NULL;
+
+ mali_sync_tl = to_mali_sync_tl_container(sync_tl);
+ mali_sync_tl->timeline = timeline;
+
+ /* Grab a reference on the module to ensure the callbacks are present
+ * as long some timeline exists. The reference is released when the
+ * timeline is freed.
+ * Since this function is called from a ioctl on an open file we know
+ * we already have a reference, so using __module_get is safe. */
+ __module_get(THIS_MODULE);
+
+ return sync_tl;
+}
+
+s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence)
+{
+ s32 fd = -1;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
+ fd = get_unused_fd();
+#else
+ fd = get_unused_fd_flags(0);
+#endif
+
+ if (fd < 0) {
+ sync_fence_put(sync_fence);
+ return -1;
+ }
+ sync_fence_install(sync_fence, fd);
+
+ return fd;
+}
+
+struct sync_fence *mali_sync_fence_merge(struct sync_fence *sync_fence1, struct sync_fence *sync_fence2)
+{
+ struct sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+ MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+
+ sync_fence = sync_fence_merge("mali_merge_fence", sync_fence1, sync_fence2);
+ sync_fence_put(sync_fence1);
+ sync_fence_put(sync_fence2);
+
+ return sync_fence;
+}
+
+struct sync_fence *mali_sync_timeline_create_signaled_fence(struct sync_timeline *sync_tl)
+{
+ struct mali_sync_flag *flag;
+ struct sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_tl);
+
+ flag = mali_sync_flag_create(sync_tl, 0);
+ if (NULL == flag) return NULL;
+
+ sync_fence = mali_sync_flag_create_fence(flag);
+
+ mali_sync_flag_signal(flag, 0);
+ mali_sync_flag_put(flag);
+
+ return sync_fence;
+}
+
+struct mali_sync_flag *mali_sync_flag_create(struct sync_timeline *sync_tl, mali_timeline_point point)
+{
+ struct mali_sync_flag *flag;
+
+ if (NULL == sync_tl) return NULL;
+
+ flag = _mali_osk_calloc(1, sizeof(*flag));
+ if (NULL == flag) return NULL;
+
+ flag->sync_tl = sync_tl;
+ flag->point = point;
+
+ flag->status = 0;
+ kref_init(&flag->refcount);
+
+ return flag;
+}
+
+/**
+ * Create a sync point attached to given sync flag.
+ *
+ * @note Sync points must be triggered in *exactly* the same order as they are created.
+ *
+ * @param flag Sync flag.
+ * @return New sync point if successful, NULL if not.
+ */
+static struct sync_pt *mali_sync_flag_create_pt(struct mali_sync_flag *flag)
+{
+ struct sync_pt *pt;
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+ pt = sync_pt_create(flag->sync_tl, sizeof(struct mali_sync_pt));
+ if (NULL == pt) return NULL;
+
+ mali_sync_flag_get(flag);
+
+ mpt = to_mali_sync_pt(pt);
+ mpt->flag = flag;
+ mpt->sync_tl = flag->sync_tl;
+
+ return pt;
+}
+
+struct sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag)
+{
+ struct sync_pt *sync_pt;
+ struct sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+ sync_pt = mali_sync_flag_create_pt(flag);
+ if (NULL == sync_pt) return NULL;
+
+ sync_fence = sync_fence_create("mali_flag_fence", sync_pt);
+ if (NULL == sync_fence) {
+ sync_pt_free(sync_pt);
+ return NULL;
+ }
+
+ return sync_fence;
+}
+#else
+static struct mali_internal_sync_timeline_ops mali_timeline_ops = {
+ .driver_name = "Mali",
+ .has_signaled = timeline_has_signaled,
+ .free_pt = timeline_free_pt,
+ .release_obj = timeline_release,
+ .print_sync_pt = timeline_print_sync_pt,
+};
+
+struct mali_internal_sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name)
+{
+ struct mali_internal_sync_timeline *sync_tl;
+ struct mali_sync_timeline_container *mali_sync_tl;
+
+ sync_tl = mali_internal_sync_timeline_create(&mali_timeline_ops, sizeof(struct mali_sync_timeline_container), name);
+ if (NULL == sync_tl) return NULL;
+
+ mali_sync_tl = to_mali_sync_tl_container(sync_tl);
+ mali_sync_tl->timeline = timeline;
+
+ /* Grab a reference on the module to ensure the callbacks are present
+ * as long some timeline exists. The reference is released when the
+ * timeline is freed.
+ * Since this function is called from a ioctl on an open file we know
+ * we already have a reference, so using __module_get is safe. */
+ __module_get(THIS_MODULE);
+
+ return sync_tl;
+}
+
+s32 mali_sync_fence_fd_alloc(struct mali_internal_sync_fence *sync_fence)
+{
+ s32 fd = -1;
+
+ fd = get_unused_fd_flags(0);
+
+ if (fd < 0) {
+ fput(sync_fence->file);
+ return -1;
+ }
+ fd_install(fd, sync_fence->file);
+ return fd;
+}
+
+struct mali_internal_sync_fence *mali_sync_fence_merge(struct mali_internal_sync_fence *sync_fence1, struct mali_internal_sync_fence *sync_fence2)
+{
+ struct mali_internal_sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+ MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+
+ sync_fence = mali_internal_sync_fence_merge(sync_fence1, sync_fence2);
+ fput(sync_fence1->file);
+ fput(sync_fence2->file);
+
+ return sync_fence;
+}
+
+struct mali_internal_sync_fence *mali_sync_timeline_create_signaled_fence(struct mali_internal_sync_timeline *sync_tl)
+{
+ struct mali_sync_flag *flag;
+ struct mali_internal_sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_tl);
+
+ flag = mali_sync_flag_create(sync_tl, 0);
+ if (NULL == flag) return NULL;
+
+ sync_fence = mali_sync_flag_create_fence(flag);
+
+ mali_sync_flag_signal(flag, 0);
+ mali_sync_flag_put(flag);
+
+ return sync_fence;
+}
+
+struct mali_sync_flag *mali_sync_flag_create(struct mali_internal_sync_timeline *sync_tl, mali_timeline_point point)
+{
+ struct mali_sync_flag *flag;
+
+ if (NULL == sync_tl) return NULL;
+
+ flag = _mali_osk_calloc(1, sizeof(*flag));
+ if (NULL == flag) return NULL;
+
+ flag->sync_tl = sync_tl;
+ flag->point = point;
+
+ flag->status = 0;
+ kref_init(&flag->refcount);
+
+ return flag;
+}
+
+/**
+ * Create a sync point attached to given sync flag.
+ *
+ * @note Sync points must be triggered in *exactly* the same order as they are created.
+ *
+ * @param flag Sync flag.
+ * @return New sync point if successful, NULL if not.
+ */
+static struct mali_internal_sync_point *mali_sync_flag_create_pt(struct mali_sync_flag *flag)
+{
+ struct mali_internal_sync_point *pt;
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+ pt = mali_internal_sync_point_create(flag->sync_tl, sizeof(struct mali_sync_pt));
+
+ if (pt == NULL) {
+ MALI_PRINT_ERROR(("Mali sync: sync_pt creation failed\n"));
+ return NULL;
+ }
+ mali_sync_flag_get(flag);
+
+ mpt = to_mali_sync_pt(pt);
+ mpt->flag = flag;
+ mpt->sync_tl = flag->sync_tl;
+
+ return pt;
+}
+
+struct mali_internal_sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag)
+{
+ struct mali_internal_sync_point *sync_pt;
+ struct mali_internal_sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+ sync_pt = mali_sync_flag_create_pt(flag);
+ if (NULL == sync_pt) {
+ MALI_PRINT_ERROR(("Mali sync: sync_pt creation failed\n"));
+ return NULL;
+ }
+ sync_fence = mali_internal_sync_fence_create(sync_pt);
+ if (NULL == sync_fence) {
+ MALI_PRINT_ERROR(("Mali sync: sync_fence creation failed\n"));
+ fence_put(&sync_pt->base);
+ return NULL;
+ }
+
+ return sync_fence;
+}
+#endif
+
+void mali_sync_flag_get(struct mali_sync_flag *flag)
+{
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ kref_get(&flag->refcount);
+}
+
+/**
+ * Free sync flag.
+ *
+ * @param ref kref object embedded in sync flag that should be freed.
+ */
+static void mali_sync_flag_free(struct kref *ref)
+{
+ struct mali_sync_flag *flag;
+
+ MALI_DEBUG_ASSERT_POINTER(ref);
+ flag = container_of(ref, struct mali_sync_flag, refcount);
+
+ _mali_osk_free(flag);
+}
+
+void mali_sync_flag_put(struct mali_sync_flag *flag)
+{
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ kref_put(&flag->refcount, mali_sync_flag_free);
+}
+
+void mali_sync_flag_signal(struct mali_sync_flag *flag, int error)
+{
+ MALI_DEBUG_ASSERT_POINTER(flag);
+
+ MALI_DEBUG_ASSERT(0 == flag->status);
+ flag->status = (0 > error) ? error : 1;
+
+ _mali_osk_write_mem_barrier();
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ sync_timeline_signal(flag->sync_tl);
+#else
+ mali_internal_sync_timeline_signal(flag->sync_tl);
+#endif
+}
+
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_sync.h b/drivers/gpu/arm/utgard/linux/mali_sync.h
new file mode 100644
index 000000000000..c3d7d32029ff
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_sync.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_sync.h
+ *
+ * Mali interface for Linux sync objects.
+ */
+
+#ifndef _MALI_SYNC_H_
+#define _MALI_SYNC_H_
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+
+#include <linux/seq_file.h>
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
+#include <linux/sync.h>
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+#include <sync.h>
+#else
+#include "mali_internal_sync.h"
+#endif
+
+
+#include "mali_osk.h"
+
+struct mali_sync_flag;
+struct mali_timeline;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+/**
+ * Create a sync timeline.
+ *
+ * @param name Name of the sync timeline.
+ * @return The new sync timeline if successful, NULL if not.
+ */
+struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name);
+
+/**
+ * Creates a file descriptor representing the sync fence. Will release sync fence if allocation of
+ * file descriptor fails.
+ *
+ * @param sync_fence Sync fence.
+ * @return File descriptor representing sync fence if successful, or -1 if not.
+ */
+s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence);
+
+/**
+ * Merges two sync fences. Both input sync fences will be released.
+ *
+ * @param sync_fence1 First sync fence.
+ * @param sync_fence2 Second sync fence.
+ * @return New sync fence that is the result of the merger if successful, or NULL if not.
+ */
+struct sync_fence *mali_sync_fence_merge(struct sync_fence *sync_fence1, struct sync_fence *sync_fence2);
+
+/**
+ * Create a sync fence that is already signaled.
+ *
+ * @param tl Sync timeline.
+ * @return New signaled sync fence if successful, NULL if not.
+ */
+struct sync_fence *mali_sync_timeline_create_signaled_fence(struct sync_timeline *sync_tl);
+
+
+/**
+ * Create a sync flag.
+ *
+ * @param sync_tl Sync timeline.
+ * @param point Point on Mali timeline.
+ * @return New sync flag if successful, NULL if not.
+ */
+struct mali_sync_flag *mali_sync_flag_create(struct sync_timeline *sync_tl, u32 point);
+
+/**
+ * Create a sync fence attached to given sync flag.
+ *
+ * @param flag Sync flag.
+ * @return New sync fence if successful, NULL if not.
+ */
+struct sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag);
+#else
+/**
+ * Create a sync timeline.
+ *
+ * @param name Name of the sync timeline.
+ * @return The new sync timeline if successful, NULL if not.
+ */
+struct mali_internal_sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name);
+
+/**
+ * Creates a file descriptor representing the sync fence. Will release sync fence if allocation of
+ * file descriptor fails.
+ *
+ * @param sync_fence Sync fence.
+ * @return File descriptor representing sync fence if successful, or -1 if not.
+ */
+s32 mali_sync_fence_fd_alloc(struct mali_internal_sync_fence *sync_fence);
+
+/**
+ * Merges two sync fences. Both input sync fences will be released.
+ *
+ * @param sync_fence1 First sync fence.
+ * @param sync_fence2 Second sync fence.
+ * @return New sync fence that is the result of the merger if successful, or NULL if not.
+ */
+struct mali_internal_sync_fence *mali_sync_fence_merge(struct mali_internal_sync_fence *sync_fence1, struct mali_internal_sync_fence *sync_fence2);
+
+/**
+ * Create a sync fence that is already signaled.
+ *
+ * @param tl Sync timeline.
+ * @return New signaled sync fence if successful, NULL if not.
+ */
+struct mali_internal_sync_fence *mali_sync_timeline_create_signaled_fence(struct mali_internal_sync_timeline *sync_tl);
+
+
+/**
+ * Create a sync flag.
+ *
+ * @param sync_tl Sync timeline.
+ * @param point Point on Mali timeline.
+ * @return New sync flag if successful, NULL if not.
+ */
+struct mali_sync_flag *mali_sync_flag_create(struct mali_internal_sync_timeline *sync_tl, u32 point);
+
+/**
+ * Create a sync fence attached to given sync flag.
+ *
+ * @param flag Sync flag.
+ * @return New sync fence if successful, NULL if not.
+ */
+struct mali_internal_sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag);
+
+#endif
+/**
+ * Grab sync flag reference.
+ *
+ * @param flag Sync flag.
+ */
+void mali_sync_flag_get(struct mali_sync_flag *flag);
+
+/**
+ * Release sync flag reference. If this was the last reference, the sync flag will be freed.
+ *
+ * @param flag Sync flag.
+ */
+void mali_sync_flag_put(struct mali_sync_flag *flag);
+
+/**
+ * Signal sync flag. All sync fences created from this flag will be signaled.
+ *
+ * @param flag Sync flag to signal.
+ * @param error Negative error code, or 0 if no error.
+ */
+void mali_sync_flag_signal(struct mali_sync_flag *flag, int error);
+
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+#endif /* _MALI_SYNC_H_ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_uk_types.h b/drivers/gpu/arm/utgard/linux/mali_uk_types.h
new file mode 100644
index 000000000000..68b27b8be067
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_uk_types.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_UK_TYPES_H__
+#define __MALI_UK_TYPES_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_uk_types.h>
+
+#endif /* __MALI_UK_TYPES_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_core.c b/drivers/gpu/arm/utgard/linux/mali_ukk_core.c
new file mode 100644
index 000000000000..ae4db766e0ee
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_core.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <linux/slab.h> /* memort allocation functions */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs)
+{
+ _mali_uk_get_api_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_get_api_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+ if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+ return 0;
+}
+
+int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs)
+{
+ _mali_uk_get_api_version_v2_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_get_api_version_v2(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+ if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+ return 0;
+}
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
+{
+ _mali_uk_wait_for_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_wait_for_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type) {
+ kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
+ } else {
+ if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
+ }
+
+ return 0;
+}
+
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs)
+{
+ _mali_uk_post_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+
+ if (0 != get_user(kargs.type, &uargs->type)) {
+ return -EFAULT;
+ }
+
+ err = _mali_ukk_post_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs)
+{
+ _mali_uk_get_user_settings_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_get_user_settings(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = 0; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_user_settings_s))) return -EFAULT;
+
+ return 0;
+}
+
+int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs)
+{
+ _mali_uk_request_high_priority_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_request_high_priority(&kargs);
+
+ kargs.ctx = 0;
+
+ return map_errcode(err);
+}
+
+int pending_submit_wrapper(struct mali_session_data *session_data, _mali_uk_pending_submit_s __user *uargs)
+{
+ _mali_uk_pending_submit_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_pending_submit(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_gp.c b/drivers/gpu/arm/utgard/linux/mali_ukk_gp.c
new file mode 100644
index 000000000000..d94498ca9fbc
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_gp.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2010, 2012-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+
+ /* If the job was started successfully, 0 is returned. If there was an error, but the job
+ * was started, we return -ENOENT. For anything else returned, the job was not started. */
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ err = _mali_ukk_gp_start_job(session_data, uargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs)
+{
+ _mali_uk_get_gp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_get_gp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs)
+{
+ _mali_uk_gp_suspend_response_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_gp_suspend_response(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.cookie, &uargs->cookie)) return -EFAULT;
+
+ /* no known transactions to roll-back */
+ return 0;
+}
+
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_gp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_get_gp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_mem.c b/drivers/gpu/arm/utgard/linux/mali_ukk_mem.c
new file mode 100644
index 000000000000..508b7d1a1897
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_mem.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int mem_alloc_wrapper(struct mali_session_data *session_data, _mali_uk_alloc_mem_s __user *uargs)
+{
+ _mali_uk_alloc_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_alloc_mem_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_allocate(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.backend_handle, &uargs->backend_handle)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mem_free_wrapper(struct mali_session_data *session_data, _mali_uk_free_mem_s __user *uargs)
+{
+ _mali_uk_free_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_free_mem_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_free(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.free_pages_nr, &uargs->free_pages_nr)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mem_bind_wrapper(struct mali_session_data *session_data, _mali_uk_bind_mem_s __user *uargs)
+{
+ _mali_uk_bind_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_bind_mem_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_bind(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int mem_unbind_wrapper(struct mali_session_data *session_data, _mali_uk_unbind_mem_s __user *uargs)
+{
+ _mali_uk_unbind_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_unbind_mem_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_unbind(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+
+int mem_cow_wrapper(struct mali_session_data *session_data, _mali_uk_cow_mem_s __user *uargs)
+{
+ _mali_uk_cow_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_cow_mem_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_cow(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.backend_handle, &uargs->backend_handle)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mem_cow_modify_range_wrapper(struct mali_session_data *session_data, _mali_uk_cow_modify_range_s __user *uargs)
+{
+ _mali_uk_cow_modify_range_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_cow_modify_range_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_cow_modify_range(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.change_pages_nr, &uargs->change_pages_nr)) {
+ return -EFAULT;
+ }
+ return 0;
+}
+
+
+int mem_resize_mem_wrapper(struct mali_session_data *session_data, _mali_uk_mem_resize_s __user *uargs)
+{
+ _mali_uk_mem_resize_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_resize_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_resize(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs)
+{
+ _mali_uk_mem_write_safe_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_write_safe_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+
+ /* Check if we can access the buffers */
+ if (!access_ok(VERIFY_WRITE, kargs.dest, kargs.size)
+ || !access_ok(VERIFY_READ, kargs.src, kargs.size)) {
+ return -EINVAL;
+ }
+
+ /* Check if size wraps */
+ if ((kargs.size + kargs.dest) <= kargs.dest
+ || (kargs.size + kargs.src) <= kargs.src) {
+ return -EINVAL;
+ }
+
+ err = _mali_ukk_mem_write_safe(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.size, &uargs->size)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+
+
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs)
+{
+ _mali_uk_query_mmu_page_table_dump_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs)
+{
+ _mali_uk_dump_mmu_page_table_s kargs;
+ _mali_osk_errcode_t err;
+ void __user *user_buffer;
+ void *buffer = NULL;
+ int rc = -EFAULT;
+
+ /* validate input */
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ /* the session_data pointer was validated by caller */
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_dump_mmu_page_table_s)))
+ goto err_exit;
+
+ user_buffer = (void __user *)(uintptr_t)kargs.buffer;
+ if (!access_ok(VERIFY_WRITE, user_buffer, kargs.size))
+ goto err_exit;
+
+ /* allocate temporary buffer (kernel side) to store mmu page table info */
+ if (kargs.size <= 0)
+ return -EINVAL;
+ /* Allow at most 8MiB buffers, this is more than enough to dump a fully
+ * populated page table. */
+ if (kargs.size > SZ_8M)
+ return -EINVAL;
+
+ buffer = (void *)(uintptr_t)_mali_osk_valloc(kargs.size);
+ if (NULL == buffer) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+ kargs.buffer = (uintptr_t)buffer;
+ err = _mali_ukk_dump_mmu_page_table(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ rc = map_errcode(err);
+ goto err_exit;
+ }
+
+ /* copy mmu page table info back to user space and update pointers */
+ if (0 != copy_to_user(user_buffer, buffer, kargs.size))
+ goto err_exit;
+
+ kargs.register_writes = kargs.register_writes -
+ (uintptr_t)buffer + (uintptr_t)user_buffer;
+ kargs.page_table_dump = kargs.page_table_dump -
+ (uintptr_t)buffer + (uintptr_t)user_buffer;
+
+ if (0 != copy_to_user(uargs, &kargs, sizeof(kargs)))
+ goto err_exit;
+
+ rc = 0;
+
+err_exit:
+ if (buffer) _mali_osk_vfree(buffer);
+ return rc;
+}
+
+int mem_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+ _mali_uk_profiling_memory_usage_get_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_mem_usage_get(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_pp.c b/drivers/gpu/arm/utgard/linux/mali_ukk_pp.c
new file mode 100644
index 000000000000..b706028543dc
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_pp.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2010, 2012-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+
+ /* If the job was started successfully, 0 is returned. If there was an error, but the job
+ * was started, we return -ENOENT. For anything else returned, the job was not started. */
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ err = _mali_ukk_pp_start_job(session_data, uargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int pp_and_gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_and_gp_start_job_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+
+ /* If the jobs were started successfully, 0 is returned. If there was an error, but the
+ * jobs were started, we return -ENOENT. For anything else returned, the jobs were not
+ * started. */
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ err = _mali_ukk_pp_and_gp_start_job(session_data, uargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_pp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_get_pp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_pp_number_of_cores_s))) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs)
+{
+ _mali_uk_get_pp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_get_pp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs)
+{
+ _mali_uk_pp_disable_wb_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_disable_wb_s))) return -EFAULT;
+
+ kargs.ctx = (uintptr_t)session_data;
+ _mali_ukk_pp_job_disable_wb(&kargs);
+
+ return 0;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_profiling.c b/drivers/gpu/arm/utgard/linux/mali_ukk_profiling.c
new file mode 100644
index 000000000000..8b39e69d9175
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_profiling.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+#include <linux/slab.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
+{
+ _mali_uk_profiling_add_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_profiling_add_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs)
+{
+ _mali_uk_sw_counters_report_s kargs;
+ _mali_osk_errcode_t err;
+ u32 *counter_buffer;
+ u32 __user *counters;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_sw_counters_report_s))) {
+ return -EFAULT;
+ }
+
+ /* make sure that kargs.num_counters is [at least somewhat] sane */
+ if (kargs.num_counters > 10000) {
+ MALI_DEBUG_PRINT(1, ("User space attempted to allocate too many counters.\n"));
+ return -EINVAL;
+ }
+
+ counter_buffer = (u32 *)kmalloc(sizeof(u32) * kargs.num_counters, GFP_KERNEL);
+ if (NULL == counter_buffer) {
+ return -ENOMEM;
+ }
+
+ counters = (u32 *)(uintptr_t)kargs.counters;
+
+ if (0 != copy_from_user(counter_buffer, counters, sizeof(u32) * kargs.num_counters)) {
+ kfree(counter_buffer);
+ return -EFAULT;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+ kargs.counters = (uintptr_t)counter_buffer;
+
+ err = _mali_ukk_sw_counters_report(&kargs);
+
+ kfree(counter_buffer);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_get_stream_fd_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stream_fd_get_s __user *uargs)
+{
+ _mali_uk_profiling_stream_fd_get_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_stream_fd_get_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_profiling_stream_fd_get(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_stream_fd_get_s))) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_control_set_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_control_set_s __user *uargs)
+{
+ _mali_uk_profiling_control_set_s kargs;
+ _mali_osk_errcode_t err;
+ u8 *kernel_control_data = NULL;
+ u8 *kernel_response_data = NULL;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.control_packet_size, &uargs->control_packet_size)) return -EFAULT;
+ if (0 != get_user(kargs.response_packet_size, &uargs->response_packet_size)) return -EFAULT;
+
+ kargs.ctx = (uintptr_t)session_data;
+
+
+ /* Sanity check about the size */
+ if (kargs.control_packet_size > PAGE_SIZE || kargs.response_packet_size > PAGE_SIZE)
+ return -EINVAL;
+
+ if (0 != kargs.control_packet_size) {
+
+ if (0 == kargs.response_packet_size)
+ return -EINVAL;
+
+ kernel_control_data = _mali_osk_calloc(1, kargs.control_packet_size);
+ if (NULL == kernel_control_data) {
+ return -ENOMEM;
+ }
+
+ kernel_response_data = _mali_osk_calloc(1, kargs.response_packet_size);
+ if (NULL == kernel_response_data) {
+ _mali_osk_free(kernel_control_data);
+ return -ENOMEM;
+ }
+
+ kargs.control_packet_data = (uintptr_t)kernel_control_data;
+ kargs.response_packet_data = (uintptr_t)kernel_response_data;
+
+ if (0 != copy_from_user((void *)(uintptr_t)kernel_control_data, (void *)(uintptr_t)uargs->control_packet_data, kargs.control_packet_size)) {
+ _mali_osk_free(kernel_control_data);
+ _mali_osk_free(kernel_response_data);
+ return -EFAULT;
+ }
+
+ err = _mali_ukk_profiling_control_set(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ _mali_osk_free(kernel_control_data);
+ _mali_osk_free(kernel_response_data);
+ return map_errcode(err);
+ }
+
+ if (0 != kargs.response_packet_size && 0 != copy_to_user(((void *)(uintptr_t)uargs->response_packet_data), ((void *)(uintptr_t)kargs.response_packet_data), kargs.response_packet_size)) {
+ _mali_osk_free(kernel_control_data);
+ _mali_osk_free(kernel_response_data);
+ return -EFAULT;
+ }
+
+ if (0 != put_user(kargs.response_packet_size, &uargs->response_packet_size)) {
+ _mali_osk_free(kernel_control_data);
+ _mali_osk_free(kernel_response_data);
+ return -EFAULT;
+ }
+
+ _mali_osk_free(kernel_control_data);
+ _mali_osk_free(kernel_response_data);
+ } else {
+
+ err = _mali_ukk_profiling_control_set(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ }
+ return 0;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_soft_job.c b/drivers/gpu/arm/utgard/linux/mali_ukk_soft_job.c
new file mode 100644
index 000000000000..91e4e7bf59ac
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_soft_job.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+#include "mali_soft_job.h"
+#include "mali_timeline.h"
+
+int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs)
+{
+ _mali_uk_soft_job_start_s kargs;
+ u32 type, point;
+ u64 user_job;
+ struct mali_timeline_fence fence;
+ struct mali_soft_job *job = NULL;
+ u32 __user *job_id_ptr = NULL;
+
+ /* If the job was started successfully, 0 is returned. If there was an error, but the job
+ * was started, we return -ENOENT. For anything else returned, the job was not started. */
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session, -EINVAL);
+
+ MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(kargs))) {
+ return -EFAULT;
+ }
+
+ type = kargs.type;
+ user_job = kargs.user_job;
+ job_id_ptr = (u32 __user *)(uintptr_t)kargs.job_id_ptr;
+
+ mali_timeline_fence_copy_uk_fence(&fence, &kargs.fence);
+
+ if ((MALI_SOFT_JOB_TYPE_USER_SIGNALED != type) && (MALI_SOFT_JOB_TYPE_SELF_SIGNALED != type)) {
+ MALI_DEBUG_PRINT_ERROR(("Invalid soft job type specified\n"));
+ return -EINVAL;
+ }
+
+ /* Create soft job. */
+ job = mali_soft_job_create(session->soft_job_system, (enum mali_soft_job_type)type, user_job);
+ if (unlikely(NULL == job)) {
+ return map_errcode(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Write job id back to user space. */
+ if (0 != put_user(job->id, job_id_ptr)) {
+ MALI_PRINT_ERROR(("Mali Soft Job: failed to put job id"));
+ mali_soft_job_destroy(job);
+ return map_errcode(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Start soft job. */
+ point = mali_soft_job_start(job, &fence);
+
+ if (0 != put_user(point, &uargs->point)) {
+ /* Let user space know that something failed after the job was started. */
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+int soft_job_signal_wrapper(struct mali_session_data *session, _mali_uk_soft_job_signal_s __user *uargs)
+{
+ u32 job_id;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (0 != get_user(job_id, &uargs->job_id)) return -EFAULT;
+
+ err = mali_soft_job_system_signal_job(session->soft_job_system, job_id);
+
+ return map_errcode(err);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_timeline.c b/drivers/gpu/arm/utgard/linux/mali_ukk_timeline.c
new file mode 100644
index 000000000000..cdad3de9ba2a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_timeline.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+#include "mali_timeline.h"
+#include "mali_timeline_fence_wait.h"
+#include "mali_timeline_sync_fence.h"
+
+int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs)
+{
+ u32 val;
+ mali_timeline_id timeline;
+ mali_timeline_point point;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (0 != get_user(val, &uargs->timeline)) return -EFAULT;
+
+ if (MALI_UK_TIMELINE_MAX <= val) {
+ return -EINVAL;
+ }
+
+ timeline = (mali_timeline_id)val;
+
+ point = mali_timeline_system_get_latest_point(session->timeline_system, timeline);
+
+ if (0 != put_user(point, &uargs->point)) return -EFAULT;
+
+ return 0;
+}
+
+int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs)
+{
+ u32 timeout, status;
+ mali_bool ret;
+ _mali_uk_fence_t uk_fence;
+ struct mali_timeline_fence fence;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
+ if (0 != get_user(timeout, &uargs->timeout)) return -EFAULT;
+
+ mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+
+ ret = mali_timeline_fence_wait(session->timeline_system, &fence, timeout);
+ status = (MALI_TRUE == ret ? 1 : 0);
+
+ if (0 != put_user(status, &uargs->status)) return -EFAULT;
+
+ return 0;
+}
+
+int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs)
+{
+ s32 sync_fd = -1;
+ _mali_uk_fence_t uk_fence;
+ struct mali_timeline_fence fence;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
+ mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ sync_fd = mali_timeline_sync_fence_create(session->timeline_system, &fence);
+#else
+ sync_fd = -1;
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+ if (0 != put_user(sync_fd, &uargs->sync_fd)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_vsync.c b/drivers/gpu/arm/utgard/linux/mali_ukk_vsync.c
new file mode 100644
index 000000000000..f8b2546bfd6a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_vsync.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs)
+{
+ _mali_uk_vsync_event_report_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_vsync_event_report_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_vsync_event_report(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_wrappers.h b/drivers/gpu/arm/utgard/linux/mali_ukk_wrappers.h
new file mode 100644
index 000000000000..8c343e0fa9be
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_wrappers.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk_wrappers.h
+ * Defines the wrapper functions for each user-kernel function
+ */
+
+#ifndef __MALI_UKK_WRAPPERS_H__
+#define __MALI_UKK_WRAPPERS_H__
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs);
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs);
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
+int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs);
+int pending_submit_wrapper(struct mali_session_data *session_data, _mali_uk_pending_submit_s __user *uargs);
+
+int mem_alloc_wrapper(struct mali_session_data *session_data, _mali_uk_alloc_mem_s __user *uargs);
+int mem_free_wrapper(struct mali_session_data *session_data, _mali_uk_free_mem_s __user *uargs);
+int mem_bind_wrapper(struct mali_session_data *session_data, _mali_uk_bind_mem_s __user *uargs);
+int mem_unbind_wrapper(struct mali_session_data *session_data, _mali_uk_unbind_mem_s __user *uargs);
+int mem_cow_wrapper(struct mali_session_data *session_data, _mali_uk_cow_mem_s __user *uargs);
+int mem_cow_modify_range_wrapper(struct mali_session_data *session_data, _mali_uk_cow_modify_range_s __user *uargs);
+int mem_resize_mem_wrapper(struct mali_session_data *session_data, _mali_uk_mem_resize_s __user *uargs);
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs);
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs);
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs);
+int mem_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs);
+
+int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs);
+int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs);
+int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs);
+int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs);
+int soft_job_signal_wrapper(struct mali_session_data *session, _mali_uk_soft_job_signal_s __user *uargs);
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs);
+int pp_and_gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_and_gp_start_job_s __user *uargs);
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs);
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs);
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs);
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs);
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs);
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs);
+int profiling_get_stream_fd_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stream_fd_get_s __user *uargs);
+int profiling_control_set_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_control_set_s __user *uargs);
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs);
+
+
+int map_errcode(_mali_osk_errcode_t err);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_WRAPPERS_H__ */
diff --git a/drivers/gpu/arm/utgard/platform/arm/arm.c b/drivers/gpu/arm/utgard/platform/arm/arm.c
new file mode 100644
index 000000000000..4e09aca4c6f0
--- /dev/null
+++ b/drivers/gpu/arm/utgard/platform/arm/arm.c
@@ -0,0 +1,623 @@
+/*
+ * Copyright (C) 2010, 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for:
+ * - Realview Versatile platforms with ARM11 Mpcore and virtex 5.
+ * - Versatile Express platforms with ARM Cortex-A9 and virtex 6.
+ */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/pm.h>
+#include "mali_kernel_linux.h"
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+#include <asm/io.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include <linux/dma-mapping.h>
+#include <linux/moduleparam.h>
+
+#include "arm_core_scaling.h"
+#include "mali_executor.h"
+
+#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
+#include <linux/devfreq_cooling.h>
+#include <linux/thermal.h>
+#endif
+
+static int mali_core_scaling_enable = 0;
+
+void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
+static u32 mali_read_phys(u32 phys_addr);
+#if defined(CONFIG_ARCH_REALVIEW)
+static void mali_write_phys(u32 phys_addr, u32 value);
+#endif
+
+#if defined(CONFIG_ARCH_VEXPRESS) && defined(CONFIG_ARM64)
+
+#define SECURE_MODE_CONTROL_HANDLER 0x6F02006C
+void *secure_mode_mapped_addr = NULL;
+/**
+ * Reset GPU and enable/disable Mali secure mode.
+ * @Return value:
+ * 0: success
+ * non-0: failure.
+ */
+
+static int mali_gpu_reset_and_secure_mode_enable_juno(void)
+{
+ u32 phys_offset = SECURE_MODE_CONTROL_HANDLER & 0x00001FFF;
+ MALI_DEBUG_ASSERT(NULL != secure_mode_mapped_addr);
+
+ iowrite32(1, ((u8 *)secure_mode_mapped_addr) + phys_offset);
+
+ if (1 == (u32)ioread32(((u8 *)secure_mode_mapped_addr) + phys_offset)) {
+ MALI_DEBUG_PRINT(3, ("Mali reset GPU and enable secured mode successfully! \n"));
+ return 0;
+ }
+
+ MALI_PRINT_ERROR(("Failed to reset GPU and enable Mali secured mode !!! \n"));
+
+ return -1;
+
+}
+
+static int mali_gpu_reset_and_secure_mode_disable_juno(void)
+{
+ u32 phys_offset = SECURE_MODE_CONTROL_HANDLER & 0x00001FFF;
+ MALI_DEBUG_ASSERT(NULL != secure_mode_mapped_addr);
+
+ iowrite32(0, ((u8 *)secure_mode_mapped_addr) + phys_offset);
+
+ if (0 == (u32)ioread32(((u8 *)secure_mode_mapped_addr) + phys_offset)) {
+ MALI_DEBUG_PRINT(3, ("Mali reset GPU and disable secured mode successfully! \n"));
+ return 0;
+ }
+
+ MALI_PRINT_ERROR(("Failed to reset GPU and disable mali secured mode !!! \n"));
+ return -1;
+}
+
+static int mali_secure_mode_init_juno(void)
+{
+ u32 phys_addr_page = SECURE_MODE_CONTROL_HANDLER & 0xFFFFE000;
+ u32 phys_offset = SECURE_MODE_CONTROL_HANDLER & 0x00001FFF;
+ u32 map_size = phys_offset + sizeof(u32);
+
+ MALI_DEBUG_ASSERT(NULL == secure_mode_mapped_addr);
+
+ secure_mode_mapped_addr = ioremap_nocache(phys_addr_page, map_size);
+ if (NULL != secure_mode_mapped_addr) {
+ return mali_gpu_reset_and_secure_mode_disable_juno();
+ }
+ MALI_DEBUG_PRINT(2, ("Failed to ioremap for Mali secured mode! \n"));
+ return -1;
+}
+
+static void mali_secure_mode_deinit_juno(void)
+{
+ if (NULL != secure_mode_mapped_addr) {
+ mali_gpu_reset_and_secure_mode_disable_juno();
+ iounmap(secure_mode_mapped_addr);
+ secure_mode_mapped_addr = NULL;
+ }
+}
+#endif
+
+#ifndef CONFIG_MALI_DT
+static void mali_platform_device_release(struct device *device);
+
+#if defined(CONFIG_ARCH_VEXPRESS)
+
+#if defined(CONFIG_ARM64)
+/* Juno + Mali-450 MP6 in V7 FPGA */
+static struct resource mali_gpu_resources_m450_mp6[] = {
+ MALI_GPU_RESOURCES_MALI450_MP6_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200)
+};
+
+static struct resource mali_gpu_resources_m470_mp4[] = {
+ MALI_GPU_RESOURCES_MALI470_MP4_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200)
+};
+
+static struct resource mali_gpu_resources_m470_mp3[] = {
+ MALI_GPU_RESOURCES_MALI470_MP3_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200, 200, 200)
+};
+
+static struct resource mali_gpu_resources_m470_mp2[] = {
+ MALI_GPU_RESOURCES_MALI470_MP2_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200)
+};
+
+static struct resource mali_gpu_resources_m470_mp1[] = {
+ MALI_GPU_RESOURCES_MALI470_MP1_PMU(0x6F040000, 200, 200, 200, 200, 200)
+};
+
+#else
+static struct resource mali_gpu_resources_m450_mp8[] = {
+ MALI_GPU_RESOURCES_MALI450_MP8_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+
+static struct resource mali_gpu_resources_m450_mp6[] = {
+ MALI_GPU_RESOURCES_MALI450_MP6_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+
+static struct resource mali_gpu_resources_m450_mp4[] = {
+ MALI_GPU_RESOURCES_MALI450_MP4_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+
+static struct resource mali_gpu_resources_m470_mp4[] = {
+ MALI_GPU_RESOURCES_MALI470_MP4_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+#endif /* CONFIG_ARM64 */
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+static struct resource mali_gpu_resources_m300[] = {
+ MALI_GPU_RESOURCES_MALI300_PMU(0xC0000000, -1, -1, -1, -1)
+};
+
+static struct resource mali_gpu_resources_m400_mp1[] = {
+ MALI_GPU_RESOURCES_MALI400_MP1_PMU(0xC0000000, -1, -1, -1, -1)
+};
+
+static struct resource mali_gpu_resources_m400_mp2[] = {
+ MALI_GPU_RESOURCES_MALI400_MP2_PMU(0xC0000000, -1, -1, -1, -1, -1, -1)
+};
+
+#endif
+#endif
+
+#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
+
+#define FALLBACK_STATIC_TEMPERATURE 55000
+
+static struct thermal_zone_device *gpu_tz;
+
+/* Calculate gpu static power example for reference */
+static unsigned long arm_model_static_power(unsigned long voltage)
+{
+ int temperature, temp;
+ int temp_squared, temp_cubed, temp_scaling_factor;
+ const unsigned long coefficient = (410UL << 20) / (729000000UL >> 10);
+ const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10;
+ unsigned long static_power;
+
+ if (gpu_tz) {
+ int ret;
+
+ ret = gpu_tz->ops->get_temp(gpu_tz, &temperature);
+ if (ret) {
+ MALI_DEBUG_PRINT(2, ("Error reading temperature for gpu thermal zone: %d\n", ret));
+ temperature = FALLBACK_STATIC_TEMPERATURE;
+ }
+ } else {
+ temperature = FALLBACK_STATIC_TEMPERATURE;
+ }
+
+ /* Calculate the temperature scaling factor. To be applied to the
+ * voltage scaled power.
+ */
+ temp = temperature / 1000;
+ temp_squared = temp * temp;
+ temp_cubed = temp_squared * temp;
+ temp_scaling_factor =
+ (2 * temp_cubed)
+ - (80 * temp_squared)
+ + (4700 * temp)
+ + 32000;
+
+ static_power = (((coefficient * voltage_cubed) >> 20)
+ * temp_scaling_factor)
+ / 1000000;
+
+ return static_power;
+}
+
+/* Calculate gpu dynamic power example for reference */
+static unsigned long arm_model_dynamic_power(unsigned long freq,
+ unsigned long voltage)
+{
+ /* The inputs: freq (f) is in Hz, and voltage (v) in mV.
+ * The coefficient (c) is in mW/(MHz mV mV).
+ *
+ * This function calculates the dynamic power after this formula:
+ * Pdyn (mW) = c (mW/(MHz*mV*mV)) * v (mV) * v (mV) * f (MHz)
+ */
+ const unsigned long v2 = (voltage * voltage) / 1000; /* m*(V*V) */
+ const unsigned long f_mhz = freq / 1000000; /* MHz */
+ const unsigned long coefficient = 3600; /* mW/(MHz*mV*mV) */
+ unsigned long dynamic_power;
+
+ dynamic_power = (coefficient * v2 * f_mhz) / 1000000; /* mW */
+
+ return dynamic_power;
+}
+
+struct devfreq_cooling_power arm_cooling_ops = {
+ .get_static_power = arm_model_static_power,
+ .get_dynamic_power = arm_model_dynamic_power,
+};
+#endif
+
+static struct mali_gpu_device_data mali_gpu_data = {
+#ifndef CONFIG_MALI_DT
+ .pmu_switch_delay = 0xFF, /* do not have to be this high on FPGA, but it is good for testing to have a delay */
+#if defined(CONFIG_ARCH_VEXPRESS)
+ .shared_mem_size = 256 * 1024 * 1024, /* 256MB */
+#endif
+#endif
+ .max_job_runtime = 60000, /* 60 seconds */
+
+#if defined(CONFIG_ARCH_REALVIEW)
+ .dedicated_mem_start = 0x80000000, /* Physical start address (use 0xD0000000 for old indirect setup) */
+ .dedicated_mem_size = 0x10000000, /* 256MB */
+#endif
+#if defined(CONFIG_ARM64)
+ /* Some framebuffer drivers get the framebuffer dynamically, such as through GEM,
+ * in which the memory resource can't be predicted in advance.
+ */
+ .fb_start = 0x0,
+ .fb_size = 0xFFFFF000,
+#else
+ .fb_start = 0xe0000000,
+ .fb_size = 0x01000000,
+#endif
+ .control_interval = 1000, /* 1000ms */
+ .utilization_callback = mali_gpu_utilization_callback,
+ .get_clock_info = NULL,
+ .get_freq = NULL,
+ .set_freq = NULL,
+#if defined(CONFIG_ARCH_VEXPRESS) && defined(CONFIG_ARM64)
+ .secure_mode_init = mali_secure_mode_init_juno,
+ .secure_mode_deinit = mali_secure_mode_deinit_juno,
+ .gpu_reset_and_secure_mode_enable = mali_gpu_reset_and_secure_mode_enable_juno,
+ .gpu_reset_and_secure_mode_disable = mali_gpu_reset_and_secure_mode_disable_juno,
+#else
+ .secure_mode_init = NULL,
+ .secure_mode_deinit = NULL,
+ .gpu_reset_and_secure_mode_enable = NULL,
+ .gpu_reset_and_secure_mode_disable = NULL,
+#endif
+#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
+ .gpu_cooling_ops = &arm_cooling_ops,
+#endif
+};
+
+#ifndef CONFIG_MALI_DT
+static struct platform_device mali_gpu_device = {
+ .name = MALI_GPU_NAME_UTGARD,
+ .id = 0,
+ .dev.release = mali_platform_device_release,
+ .dev.dma_mask = &mali_gpu_device.dev.coherent_dma_mask,
+ .dev.coherent_dma_mask = DMA_BIT_MASK(32),
+
+ .dev.platform_data = &mali_gpu_data,
+};
+
+int mali_platform_device_register(void)
+{
+ int err = -1;
+ int num_pp_cores = 0;
+#if defined(CONFIG_ARCH_REALVIEW)
+ u32 m400_gp_version;
+#endif
+
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));
+
+ /* Detect present Mali GPU and connect the correct resources to the device */
+#if defined(CONFIG_ARCH_VEXPRESS)
+
+#if defined(CONFIG_ARM64)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+ mali_gpu_device.dev.archdata.dma_ops = &dummy_dma_ops;
+#else
+ mali_gpu_device.dev.archdata.dma_ops = dma_ops;
+#endif
+ if ((mali_read_phys(0x6F000000) & 0x00600450) == 0x00600450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+ num_pp_cores = 6;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp6);
+ mali_gpu_device.resource = mali_gpu_resources_m450_mp6;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00400430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n"));
+ num_pp_cores = 4;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp4);
+ mali_gpu_device.resource = mali_gpu_resources_m470_mp4;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00300430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP3 device\n"));
+ num_pp_cores = 3;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp3);
+ mali_gpu_device.resource = mali_gpu_resources_m470_mp3;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00200430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP2 device\n"));
+ num_pp_cores = 2;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp2);
+ mali_gpu_device.resource = mali_gpu_resources_m470_mp2;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00100430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP1 device\n"));
+ num_pp_cores = 1;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp1);
+ mali_gpu_device.resource = mali_gpu_resources_m470_mp1;
+ }
+#else
+ if (mali_read_phys(0xFC000000) == 0x00000450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n"));
+ num_pp_cores = 8;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp8);
+ mali_gpu_device.resource = mali_gpu_resources_m450_mp8;
+ } else if (mali_read_phys(0xFC000000) == 0x40600450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+ num_pp_cores = 6;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp6);
+ mali_gpu_device.resource = mali_gpu_resources_m450_mp6;
+ } else if (mali_read_phys(0xFC000000) == 0x40400450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP4 device\n"));
+ num_pp_cores = 4;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp4);
+ mali_gpu_device.resource = mali_gpu_resources_m450_mp4;
+ } else if (mali_read_phys(0xFC000000) == 0xFFFFFFFF) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n"));
+ num_pp_cores = 4;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp4);
+ mali_gpu_device.resource = mali_gpu_resources_m470_mp4;
+ }
+#endif /* CONFIG_ARM64 */
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+ m400_gp_version = mali_read_phys(0xC000006C);
+ if ((m400_gp_version & 0xFFFF0000) == 0x0C070000) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-300 device\n"));
+ num_pp_cores = 1;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m300);
+ mali_gpu_device.resource = mali_gpu_resources_m300;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ } else if ((m400_gp_version & 0xFFFF0000) == 0x0B070000) {
+ u32 fpga_fw_version = mali_read_phys(0xC0010000);
+ if (fpga_fw_version == 0x130C008F || fpga_fw_version == 0x110C008F) {
+ /* Mali-400 MP1 r1p0 or r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP1 device\n"));
+ num_pp_cores = 1;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m400_mp1);
+ mali_gpu_device.resource = mali_gpu_resources_m400_mp1;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ } else if (fpga_fw_version == 0x130C000F) {
+ /* Mali-400 MP2 r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP2 device\n"));
+ num_pp_cores = 2;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m400_mp2);
+ mali_gpu_device.resource = mali_gpu_resources_m400_mp2;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ }
+ }
+
+#endif
+ /* Register the platform device */
+ err = platform_device_register(&mali_gpu_device);
+ if (0 == err) {
+#ifdef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
+ pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
+#endif
+ pm_runtime_enable(&(mali_gpu_device.dev));
+#endif
+ MALI_DEBUG_ASSERT(0 < num_pp_cores);
+ mali_core_scaling_init(num_pp_cores);
+
+ return 0;
+ }
+
+ return err;
+}
+
+void mali_platform_device_unregister(void)
+{
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_unregister() called\n"));
+
+ mali_core_scaling_term();
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_disable(&(mali_gpu_device.dev));
+#endif
+ platform_device_unregister(&mali_gpu_device);
+
+ platform_device_put(&mali_gpu_device);
+
+#if defined(CONFIG_ARCH_REALVIEW)
+ mali_write_phys(0xC0010020, 0x9); /* Restore default (legacy) memory mapping */
+#endif
+}
+
+static void mali_platform_device_release(struct device *device)
+{
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_release() called\n"));
+}
+
+#else /* CONFIG_MALI_DT */
+int mali_platform_device_init(struct platform_device *device)
+{
+ int num_pp_cores = 0;
+ int err = -1;
+#if defined(CONFIG_ARCH_REALVIEW)
+ u32 m400_gp_version;
+#endif
+
+ /* Detect present Mali GPU and connect the correct resources to the device */
+#if defined(CONFIG_ARCH_VEXPRESS)
+
+#if defined(CONFIG_ARM64)
+ if ((mali_read_phys(0x6F000000) & 0x00600450) == 0x00600450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+ num_pp_cores = 6;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00400430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n"));
+ num_pp_cores = 4;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00300430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP3 device\n"));
+ num_pp_cores = 3;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00200430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP2 device\n"));
+ num_pp_cores = 2;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00100430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP1 device\n"));
+ num_pp_cores = 1;
+ }
+#else
+ if (mali_read_phys(0xFC000000) == 0x00000450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n"));
+ num_pp_cores = 8;
+ } else if (mali_read_phys(0xFC000000) == 0x40400450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP4 device\n"));
+ num_pp_cores = 4;
+ } else if (mali_read_phys(0xFC000000) == 0xFFFFFFFF) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n"));
+ num_pp_cores = 4;
+ }
+#endif
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+ m400_gp_version = mali_read_phys(0xC000006C);
+ if ((m400_gp_version & 0xFFFF0000) == 0x0C070000) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-300 device\n"));
+ num_pp_cores = 1;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ } else if ((m400_gp_version & 0xFFFF0000) == 0x0B070000) {
+ u32 fpga_fw_version = mali_read_phys(0xC0010000);
+ if (fpga_fw_version == 0x130C008F || fpga_fw_version == 0x110C008F) {
+ /* Mali-400 MP1 r1p0 or r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP1 device\n"));
+ num_pp_cores = 1;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ } else if (fpga_fw_version == 0x130C000F) {
+ /* Mali-400 MP2 r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP2 device\n"));
+ num_pp_cores = 2;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ }
+ }
+#endif
+
+ /* After kernel 3.15 device tree will default set dev
+ * related parameters in of_platform_device_create_pdata.
+ * But kernel changes from version to version,
+ * For example 3.10 didn't include device->dev.dma_mask parameter setting,
+ * if we didn't include here will cause dma_mapping error,
+ * but in kernel 3.15 it include device->dev.dma_mask parameter setting,
+ * so it's better to set must need paramter by DDK itself.
+ */
+ if (!device->dev.dma_mask)
+ device->dev.dma_mask = &device->dev.coherent_dma_mask;
+ device->dev.archdata.dma_ops = dma_ops;
+
+ err = platform_device_add_data(device, &mali_gpu_data, sizeof(mali_gpu_data));
+
+ if (0 == err) {
+#ifdef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_set_autosuspend_delay(&(device->dev), 1000);
+ pm_runtime_use_autosuspend(&(device->dev));
+#endif
+ pm_runtime_enable(&(device->dev));
+#endif
+ MALI_DEBUG_ASSERT(0 < num_pp_cores);
+ mali_core_scaling_init(num_pp_cores);
+ }
+
+#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
+ /* Get thermal zone */
+ gpu_tz = thermal_zone_get_zone_by_name("soc_thermal");
+ if (IS_ERR(gpu_tz)) {
+ MALI_DEBUG_PRINT(2, ("Error getting gpu thermal zone (%ld), not yet ready?\n",
+ PTR_ERR(gpu_tz)));
+ gpu_tz = NULL;
+
+ err = -EPROBE_DEFER;
+ }
+#endif
+
+ return err;
+}
+
+int mali_platform_device_deinit(struct platform_device *device)
+{
+ MALI_IGNORE(device);
+
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_deinit() called\n"));
+
+ mali_core_scaling_term();
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_disable(&(device->dev));
+#endif
+
+#if defined(CONFIG_ARCH_REALVIEW)
+ mali_write_phys(0xC0010020, 0x9); /* Restore default (legacy) memory mapping */
+#endif
+
+ return 0;
+}
+
+#endif /* CONFIG_MALI_DT */
+
+static u32 mali_read_phys(u32 phys_addr)
+{
+ u32 phys_addr_page = phys_addr & 0xFFFFE000;
+ u32 phys_offset = phys_addr & 0x00001FFF;
+ u32 map_size = phys_offset + sizeof(u32);
+ u32 ret = 0xDEADBEEF;
+ void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
+ if (NULL != mem_mapped) {
+ ret = (u32)ioread32(((u8 *)mem_mapped) + phys_offset);
+ iounmap(mem_mapped);
+ }
+
+ return ret;
+}
+
+#if defined(CONFIG_ARCH_REALVIEW)
+static void mali_write_phys(u32 phys_addr, u32 value)
+{
+ u32 phys_addr_page = phys_addr & 0xFFFFE000;
+ u32 phys_offset = phys_addr & 0x00001FFF;
+ u32 map_size = phys_offset + sizeof(u32);
+ void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
+ if (NULL != mem_mapped) {
+ iowrite32(value, ((u8 *)mem_mapped) + phys_offset);
+ iounmap(mem_mapped);
+ }
+}
+#endif
+
+static int param_set_core_scaling(const char *val, const struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+
+ if (1 == mali_core_scaling_enable) {
+ mali_core_scaling_sync(mali_executor_get_num_cores_enabled());
+ }
+ return ret;
+}
+
+static struct kernel_param_ops param_ops_core_scaling = {
+ .set = param_set_core_scaling,
+ .get = param_get_int,
+};
+
+module_param_cb(mali_core_scaling_enable, &param_ops_core_scaling, &mali_core_scaling_enable, 0644);
+MODULE_PARM_DESC(mali_core_scaling_enable, "1 means to enable core scaling policy, 0 means to disable core scaling policy");
+
+void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data)
+{
+ if (1 == mali_core_scaling_enable) {
+ mali_core_scaling_update(data);
+ }
+}
diff --git a/drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.c b/drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.c
new file mode 100644
index 000000000000..7a2fc8107b4f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file arm_core_scaling.c
+ * Example core scaling policy.
+ */
+
+#include "arm_core_scaling.h"
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+
+#include <linux/workqueue.h>
+
+static int num_cores_total;
+static int num_cores_enabled;
+
+static struct work_struct wq_work;
+
+static void set_num_cores(struct work_struct *work)
+{
+ int err = mali_perf_set_num_pp_cores(num_cores_enabled);
+ MALI_DEBUG_ASSERT(0 == err);
+ MALI_IGNORE(err);
+}
+
+static void enable_one_core(void)
+{
+ if (num_cores_enabled < num_cores_total) {
+ ++num_cores_enabled;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Enabling one more core\n"));
+ }
+
+ MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
+ MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
+}
+
+static void disable_one_core(void)
+{
+ if (1 < num_cores_enabled) {
+ --num_cores_enabled;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Disabling one core\n"));
+ }
+
+ MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
+ MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
+}
+
+static void enable_max_num_cores(void)
+{
+ if (num_cores_enabled < num_cores_total) {
+ num_cores_enabled = num_cores_total;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Enabling maximum number of cores\n"));
+ }
+
+ MALI_DEBUG_ASSERT(num_cores_total == num_cores_enabled);
+}
+
+void mali_core_scaling_init(int num_pp_cores)
+{
+ INIT_WORK(&wq_work, set_num_cores);
+
+ num_cores_total = num_pp_cores;
+ num_cores_enabled = num_pp_cores;
+
+ /* NOTE: Mali is not fully initialized at this point. */
+}
+
+void mali_core_scaling_sync(int num_cores)
+{
+ num_cores_enabled = num_cores;
+}
+
+void mali_core_scaling_term(void)
+{
+ flush_scheduled_work();
+}
+
+#define PERCENT_OF(percent, max) ((int) ((percent)*(max)/100.0 + 0.5))
+
+void mali_core_scaling_update(struct mali_gpu_utilization_data *data)
+{
+ /*
+ * This function implements a very trivial PP core scaling algorithm.
+ *
+ * It is _NOT_ of production quality.
+ * The only intention behind this algorithm is to exercise and test the
+ * core scaling functionality of the driver.
+ * It is _NOT_ tuned for neither power saving nor performance!
+ *
+ * Other metrics than PP utilization need to be considered as well
+ * in order to make a good core scaling algorithm.
+ */
+
+ MALI_DEBUG_PRINT(3, ("Utilization: (%3d, %3d, %3d), cores enabled: %d/%d\n", data->utilization_gpu, data->utilization_gp, data->utilization_pp, num_cores_enabled, num_cores_total));
+
+ /* NOTE: this function is normally called directly from the utilization callback which is in
+ * timer context. */
+
+ if (PERCENT_OF(90, 256) < data->utilization_pp) {
+ enable_max_num_cores();
+ } else if (PERCENT_OF(50, 256) < data->utilization_pp) {
+ enable_one_core();
+ } else if (PERCENT_OF(40, 256) < data->utilization_pp) {
+ /* do nothing */
+ } else if (PERCENT_OF(0, 256) < data->utilization_pp) {
+ disable_one_core();
+ } else {
+ /* do nothing */
+ }
+}
diff --git a/drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.h b/drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.h
new file mode 100644
index 000000000000..8e0101830749
--- /dev/null
+++ b/drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file arm_core_scaling.h
+ * Example core scaling policy.
+ */
+
+#ifndef __ARM_CORE_SCALING_H__
+#define __ARM_CORE_SCALING_H__
+
+struct mali_gpu_utilization_data;
+
+/**
+ * Initialize core scaling policy.
+ *
+ * @note The core scaling policy will assume that all PP cores are on initially.
+ *
+ * @param num_pp_cores Total number of PP cores.
+ */
+void mali_core_scaling_init(int num_pp_cores);
+
+/**
+ * Terminate core scaling policy.
+ */
+void mali_core_scaling_term(void);
+
+/**
+ * Update core scaling policy with new utilization data.
+ *
+ * @param data Utilization data.
+ */
+void mali_core_scaling_update(struct mali_gpu_utilization_data *data);
+
+void mali_core_scaling_sync(int num_cores);
+
+#endif /* __ARM_CORE_SCALING_H__ */
diff --git a/drivers/gpu/arm/utgard/platform/arm/juno_opp.c b/drivers/gpu/arm/utgard/platform/arm/juno_opp.c
new file mode 100644
index 000000000000..e4e7ab8b2c2e
--- /dev/null
+++ b/drivers/gpu/arm/utgard/platform/arm/juno_opp.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2010, 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file juno_opp.c
+ * Example: Set up opp table
+ * Using ARM64 juno specific SCPI_PROTOCOL get frequence inform
+ * Customer need implement your own platform releated logic
+ */
+#ifdef CONFIG_ARCH_VEXPRESS
+#ifdef CONFIG_MALI_DEVFREQ
+#ifdef CONFIG_ARM64
+#ifdef CONFIG_ARM_SCPI_PROTOCOL
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/scpi_protocol.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+#include <linux/pm_opp.h>
+#else /* Linux >= 3.13 */
+/* In 3.13 the OPP include header file, types, and functions were all
+ * renamed. Use the old filename for the include, and define the new names to
+ * the old, when an old kernel is detected.
+ */
+#include <linux/opp.h>
+#define dev_pm_opp_add opp_add
+#define dev_pm_opp_remove opp_remove
+#endif /* Linux >= 3.13 */
+
+#include "mali_kernel_common.h"
+
+static int init_juno_opps_from_scpi(struct device *dev)
+{
+ struct scpi_dvfs_info *sinfo;
+ struct scpi_ops *sops;
+
+ int i;
+
+ sops = get_scpi_ops();
+ if (NULL == sops) {
+ MALI_DEBUG_PRINT(2, ("Mali didn't get any scpi ops \n"));
+ return -1;
+ }
+
+ /* Hard coded for Juno. 2 is GPU domain */
+ sinfo = sops->dvfs_get_info(2);
+ if (IS_ERR_OR_NULL(sinfo))
+ return PTR_ERR(sinfo);
+
+ for (i = 0; i < sinfo->count; i++) {
+ struct scpi_opp *e = &sinfo->opps[i];
+
+ MALI_DEBUG_PRINT(2, ("Mali OPP from SCPI: %u Hz @ %u mV\n", e->freq, e->m_volt));
+
+ dev_pm_opp_add(dev, e->freq, e->m_volt * 1000);
+ }
+
+ return 0;
+}
+
+int setup_opps(void)
+{
+ struct device_node *np;
+ struct platform_device *pdev;
+ int err;
+
+ np = of_find_node_by_name(NULL, "gpu");
+ if (!np) {
+ pr_err("Failed to find DT entry for Mali\n");
+ return -EFAULT;
+ }
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ pr_err("Failed to find device for Mali\n");
+ of_node_put(np);
+ return -EFAULT;
+ }
+
+ err = init_juno_opps_from_scpi(&pdev->dev);
+
+ of_node_put(np);
+
+ return err;
+}
+
+int term_opps(struct device *dev)
+{
+ struct scpi_dvfs_info *sinfo;
+ struct scpi_ops *sops;
+
+ int i;
+
+ sops = get_scpi_ops();
+ if (NULL == sops) {
+ MALI_DEBUG_PRINT(2, ("Mali didn't get any scpi ops \n"));
+ return -1;
+ }
+
+ /* Hard coded for Juno. 2 is GPU domain */
+ sinfo = sops->dvfs_get_info(2);
+ if (IS_ERR_OR_NULL(sinfo))
+ return PTR_ERR(sinfo);
+
+ for (i = 0; i < sinfo->count; i++) {
+ struct scpi_opp *e = &sinfo->opps[i];
+
+ MALI_DEBUG_PRINT(2, ("Mali Remove OPP: %u Hz \n", e->freq));
+
+ dev_pm_opp_remove(dev, e->freq);
+ }
+
+ return 0;
+
+}
+#endif
+#endif
+#endif
+#endif
diff --git a/drivers/gpu/arm/utgard/platform/hikey/mali_hikey.c b/drivers/gpu/arm/utgard/platform/hikey/mali_hikey.c
new file mode 100644
index 000000000000..bd2e8f2cdf5c
--- /dev/null
+++ b/drivers/gpu/arm/utgard/platform/hikey/mali_hikey.c
@@ -0,0 +1,635 @@
+/*
+ * Copyright (C) 2014 Hisilicon Co. Ltd.
+ * Copyright (C) 2015 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+/**
+ * @file mali_hikey.c
+ * HiKey platform specific Mali driver functions.
+ */
+
+/* Set to 1 to enable ION (not tested yet). */
+#define HISI6220_USE_ION 0
+
+#define pr_fmt(fmt) "Mali: HiKey: " fmt
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#if HISI6220_USE_ION
+#include <linux/hisi/hisi_ion.h>
+#endif
+#include <linux/byteorder/generic.h>
+
+#include <linux/mali/mali_utgard.h>
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_hikey_hi6220_registers_gpu.h"
+
+#define MALI_GPU_MHZ 1000000
+#define MALI_IRQ_ID 142
+#define MALI_FRAME_BUFFER_ADDR 0x3F100000
+#define MALI_FRAME_BUFFER_SIZE 0x00708000
+
+#define MALI_CALC_REG_MASK(bit_start, bit_end) \
+ (((0x1 << (bit_end - bit_start + 1)) - 1) << bit_start)
+
+enum mali_core_type {
+ MALI_CORE_400_MP1 = 0,
+ MALI_CORE_400_MP2 = 1,
+ MALI_CORE_450_MP4 = 2,
+ MALI_CORE_TYPE_MAX
+};
+
+enum mali_power_mode {
+ MALI_POWER_MODE_ON, /**< Power on */
+ MALI_POWER_MODE_LIGHT_SLEEP, /**< Idle for a short or PM suspend */
+ MALI_POWER_MODE_DEEP_SLEEP, /**< Idle for a long or OS suspend */
+};
+
+struct mali_soc_remap_addr_table {
+ u8 *soc_media_sctrl_base_addr;
+ u8 *soc_ao_sctrl_base_addr;
+ u8 *soc_peri_sctrl_base_addr;
+ u8 *soc_pmctl_base_addr;
+};
+
+static struct clk *mali_clk_g3d;
+static struct clk *mali_pclk_g3d;
+static struct device_node *mali_np;
+static bool mali_gpu_power_status;
+
+static struct resource mali_gpu_resources_m450_mp4[] = {
+ MALI_GPU_RESOURCES_MALI450_MP4(
+ SOC_G3D_S_BASE_ADDR, MALI_IRQ_ID, MALI_IRQ_ID, MALI_IRQ_ID,
+ MALI_IRQ_ID, MALI_IRQ_ID, MALI_IRQ_ID, MALI_IRQ_ID,
+ MALI_IRQ_ID, MALI_IRQ_ID, MALI_IRQ_ID, MALI_IRQ_ID)
+};
+
+static struct mali_soc_remap_addr_table *mali_soc_addr_table;
+
+static void mali_reg_writel(u8 *base_addr, unsigned int reg_offset,
+ unsigned char start_bit, unsigned char end_bit,
+ unsigned int val)
+{
+ int read_val;
+ unsigned long flags;
+ static DEFINE_SPINLOCK(reg_lock);
+ void __iomem *addr;
+
+ WARN_ON(!base_addr);
+
+ addr = base_addr + reg_offset;
+ spin_lock_irqsave(&reg_lock, flags);
+ read_val = readl(addr) & ~(MALI_CALC_REG_MASK(start_bit, end_bit));
+ read_val |= (MALI_CALC_REG_MASK(start_bit, end_bit)
+ & (val << start_bit));
+ writel(read_val, addr);
+ spin_unlock_irqrestore(&reg_lock, flags);
+}
+
+static unsigned int mali_reg_readl(u8 *base_addr, unsigned int reg_offset,
+ unsigned char start_bit,
+ unsigned char end_bit)
+{
+ unsigned int val;
+
+ WARN_ON(!base_addr);
+
+ val = readl((void __iomem *)(base_addr + reg_offset));
+ val &= MALI_CALC_REG_MASK(start_bit, end_bit);
+
+ return val >> start_bit;
+}
+
+static int mali_clock_on(void)
+{
+ u32 core_freq = 0;
+ u32 pclk_freq = 0;
+ int stat;
+
+ stat = clk_prepare_enable(mali_pclk_g3d);
+ if (stat)
+ return stat;
+
+ stat = of_property_read_u32(mali_np, "pclk_freq", &pclk_freq);
+ if (stat)
+ return stat;
+
+ stat = clk_set_rate(mali_pclk_g3d, pclk_freq * MALI_GPU_MHZ);
+ if (stat)
+ return stat;
+
+ stat = of_property_read_u32(mali_np, "mali_def_freq", &core_freq);
+ if (stat)
+ return stat;
+
+ stat = clk_set_rate(mali_clk_g3d, core_freq * MALI_GPU_MHZ);
+ if (stat)
+ return stat;
+
+ stat = clk_prepare_enable(mali_clk_g3d);
+ if (stat)
+ return stat;
+
+ mali_reg_writel(mali_soc_addr_table->soc_media_sctrl_base_addr,
+ SOC_MEDIA_SCTRL_SC_MEDIA_CLKDIS_ADDR(0), 17, 17, 1);
+
+ return 0;
+}
+
+static void mali_clock_off(void)
+{
+ clk_disable_unprepare(mali_clk_g3d);
+ clk_disable_unprepare(mali_pclk_g3d);
+}
+
+static int mali_domain_powerup_finish(void)
+{
+ unsigned int ret;
+
+ mali_reg_writel(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_RSTDIS0_ADDR(0), 1, 1, 1);
+ ret = mali_reg_readl(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_RST_STAT0_ADDR(0), 1, 1);
+ if (ret != 0) {
+ pr_err("SET SC_PW_RSTDIS0 failed!\n");
+ return -EFAULT;
+ }
+
+ mali_reg_writel(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_ISODIS0_ADDR(0), 1, 1, 1);
+ ret = mali_reg_readl(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_ISO_STAT0_ADDR(0), 1, 1);
+ if (ret != 0) {
+ pr_err("SET SC_PW_ISODIS0 failed!\n");
+ return -EFAULT;
+ }
+
+ mali_reg_writel(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_CLKEN0_ADDR(0), 1, 1, 1);
+ ret = mali_reg_readl(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_CLK_STAT0_ADDR(0), 1, 1);
+ if (ret != 1) {
+ pr_err("SET SC_PW_CLKEN0 failed!\n");
+ return -EFAULT;
+ }
+
+ mali_reg_writel(mali_soc_addr_table->soc_media_sctrl_base_addr,
+ SOC_MEDIA_SCTRL_SC_MEDIA_RSTDIS_ADDR(0), 0, 0, 1);
+ ret = mali_reg_readl(mali_soc_addr_table->soc_media_sctrl_base_addr,
+ SOC_MEDIA_SCTRL_SC_MEDIA_RST_STAT_ADDR(0), 0, 0);
+ if (ret != 0) {
+ pr_err("SET SC_MEDIA_RSTDIS failed!\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int mali_platform_powerup(void)
+{
+ int stat;
+
+ if (mali_gpu_power_status)
+ return 0;
+
+ stat = mali_clock_on();
+ if (stat)
+ return stat;
+
+ stat = mali_domain_powerup_finish();
+ if (stat)
+ return stat;
+
+ mali_gpu_power_status = true;
+
+ return 0;
+}
+
+static int mali_regulator_disable(void)
+{
+ mali_reg_writel(mali_soc_addr_table->soc_media_sctrl_base_addr,
+ SOC_MEDIA_SCTRL_SC_MEDIA_RSTEN_ADDR(0), 0, 0, 1);
+ mali_reg_writel(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_CLKDIS0_ADDR(0), 1, 1, 1);
+ mali_reg_writel(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_ISOEN0_ADDR(0), 1, 1, 1);
+ mali_reg_writel(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_RSTEN0_ADDR(0), 1, 1, 1);
+
+ return 0;
+}
+
+static int mali_platform_powerdown(void)
+{
+ int stat;
+
+ if (!mali_gpu_power_status)
+ return 0;
+
+ stat = mali_regulator_disable();
+ if (stat)
+ return stat;
+
+ mali_clock_off();
+ mali_gpu_power_status = false;
+
+ return 0;
+}
+
+static int mali_platform_power_mode_change(enum mali_power_mode power_mode)
+{
+ int stat;
+
+ switch (power_mode) {
+ case MALI_POWER_MODE_ON:
+ stat = mali_platform_powerup();
+ break;
+ case MALI_POWER_MODE_LIGHT_SLEEP:
+ case MALI_POWER_MODE_DEEP_SLEEP:
+ stat = mali_platform_powerdown();
+ break;
+ default:
+ pr_err("Invalid power mode\n");
+ stat = -EINVAL;
+ break;
+ }
+
+ return stat;
+}
+
+static int mali_os_suspend(struct device *device)
+{
+ int stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->suspend) {
+ stat = device->driver->pm->suspend(device);
+ } else {
+ stat = 0;
+ }
+
+ if (stat)
+ return stat;
+
+ return mali_platform_power_mode_change(MALI_POWER_MODE_DEEP_SLEEP);
+}
+
+static int mali_os_resume(struct device *device)
+{
+ int stat;
+
+ stat = mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+ if (stat)
+ return stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->resume) {
+ stat = device->driver->pm->resume(device);
+ }
+
+ return stat;
+}
+
+static int mali_os_freeze(struct device *device)
+{
+ int stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->freeze) {
+ stat = device->driver->pm->freeze(device);
+ } else {
+ stat = 0;
+ }
+
+ return stat;
+}
+
+static int mali_os_thaw(struct device *device)
+{
+ int stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->thaw) {
+ stat = device->driver->pm->thaw(device);
+ } else {
+ stat = 0;
+ }
+
+ return stat;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_runtime_suspend(struct device *device)
+{
+ int stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->runtime_suspend) {
+ stat = device->driver->pm->runtime_suspend(device);
+ } else {
+ stat = 0;
+ }
+
+ if (stat)
+ return stat;
+
+ return mali_platform_power_mode_change(MALI_POWER_MODE_LIGHT_SLEEP);
+}
+
+static int mali_runtime_resume(struct device *device)
+{
+ int stat;
+
+ stat = mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+ if (stat)
+ return stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->runtime_resume) {
+ stat = device->driver->pm->runtime_resume(device);
+ }
+
+ return stat;
+}
+
+static int mali_runtime_idle(struct device *device)
+{
+ int stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->runtime_idle) {
+ stat = device->driver->pm->runtime_idle(device);
+ } else {
+ stat = 0;
+ }
+
+ if (stat)
+ return stat;
+
+ return pm_runtime_suspend(device);
+}
+#endif
+
+static int init_mali_clock_regulator(struct platform_device *pdev)
+{
+ int stat, ret;
+
+ BUG_ON(mali_clk_g3d || mali_pclk_g3d);
+
+ mali_gpu_power_status = true;
+
+ /* clk init */
+ mali_clk_g3d = clk_get(&pdev->dev, "clk_g3d");
+ if (IS_ERR(mali_clk_g3d)) {
+ pr_err("failed to get source CLK_G3D\n");
+ return -ENODEV;
+ }
+
+ mali_pclk_g3d = clk_get(&pdev->dev, "pclk_g3d");
+ if (IS_ERR(mali_pclk_g3d)) {
+ pr_err("failed to get source PCLK_G3D\n");
+ return -ENODEV;
+ }
+
+ ret = mali_reg_readl(mali_soc_addr_table->soc_peri_sctrl_base_addr,
+ SOC_PERI_SCTRL_SC_PERIPH_CLKSTAT12_ADDR(0),
+ 10, 10);
+ if (ret != 1) {
+ mali_reg_writel(mali_soc_addr_table->soc_peri_sctrl_base_addr,
+ SOC_PERI_SCTRL_SC_PERIPH_CLKEN12_ADDR(0),
+ 10, 10, 1);
+ ret = mali_reg_readl(
+ mali_soc_addr_table->soc_peri_sctrl_base_addr,
+ SOC_PERI_SCTRL_SC_PERIPH_CLKSTAT12_ADDR(0), 10, 10);
+ if (ret != 1) {
+ pr_err("SET SC_PERIPH_CLKEN12 failed!\n");
+ return -EFAULT;
+ }
+ }
+
+ stat = mali_clock_on();
+ if (stat)
+ return stat;
+
+ mali_reg_writel(mali_soc_addr_table->soc_media_sctrl_base_addr,
+ SOC_MEDIA_SCTRL_SC_MEDIA_CLKCFG2_ADDR(0), 15, 15, 1);
+ ret = mali_reg_readl(mali_soc_addr_table->soc_media_sctrl_base_addr,
+ SOC_MEDIA_SCTRL_SC_MEDIA_CLKCFG2_ADDR(0), 15, 15);
+ if (ret != 1) {
+ pr_err("SET SC_MEDIA_CLKCFG2 failed!\n");
+ return -EFAULT;
+ }
+
+ return mali_domain_powerup_finish();
+}
+
+static int deinit_mali_clock_regulator(void)
+{
+ int stat;
+
+ BUG_ON(!mali_clk_g3d || !mali_pclk_g3d);
+
+ stat = mali_platform_powerdown();
+ if (stat)
+ return stat;
+
+ clk_put(mali_clk_g3d);
+ mali_clk_g3d = NULL;
+ clk_put(mali_pclk_g3d);
+ mali_pclk_g3d = NULL;
+
+ return 0;
+}
+
+static struct mali_gpu_device_data mali_gpu_data = {
+ .shared_mem_size = 1024 * 1024 * 1024, /* 1024MB */
+ .fb_start = MALI_FRAME_BUFFER_ADDR,
+ .fb_size = MALI_FRAME_BUFFER_SIZE,
+ .max_job_runtime = 2000, /* 2 seconds time out */
+ .control_interval = 50, /* 50ms */
+#ifdef CONFIG_MALI_DVFS
+ .utilization_callback = mali_gpu_utilization_proc,
+#endif
+};
+
+static const struct dev_pm_ops mali_gpu_device_type_pm_ops = {
+ .suspend = mali_os_suspend,
+ .resume = mali_os_resume,
+ .freeze = mali_os_freeze,
+ .thaw = mali_os_thaw,
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = mali_runtime_suspend,
+ .runtime_resume = mali_runtime_resume,
+ .runtime_idle = mali_runtime_idle,
+#endif
+};
+
+static struct device_type mali_gpu_device_device_type = {
+ .pm = &mali_gpu_device_type_pm_ops,
+};
+
+static enum mali_core_type mali_get_gpu_type(void)
+{
+ u32 gpu_type = MALI_CORE_TYPE_MAX;
+ int err = of_property_read_u32(mali_np, "mali_type", &gpu_type);
+
+ if (err) {
+ pr_err("failed to read mali_type from device tree\n");
+ return -EFAULT;
+ }
+
+ return gpu_type;
+}
+
+#if HISI6220_USE_ION
+static int mali_ion_mem_init(void)
+{
+ struct ion_heap_info_data mem_data;
+
+ if (hisi_ion_get_heap_info(ION_FB_HEAP_ID, &mem_data)) {
+ pr_err("Failed to get ION_FB_HEAP_ID\n");
+ return -EFAULT;
+ }
+
+ if (mem_data.heap_size == 0) {
+ pr_err("fb size is 0\n");
+ return -EINVAL;
+ }
+
+ mali_gpu_data.fb_size = mem_data.heap_size;
+ mali_gpu_data.fb_start = (unsigned long)(mem_data.heap_phy);
+ pr_debug("fb_size=0x%x, fb_start=0x%x\n",
+ mali_gpu_data.fb_size, mali_gpu_data.fb_start);
+
+ return 0;
+}
+#endif
+
+static int mali_remap_soc_addr(void)
+{
+ BUG_ON(mali_soc_addr_table);
+
+ mali_soc_addr_table = kmalloc(sizeof(struct mali_soc_remap_addr_table),
+ GFP_KERNEL);
+ if (!mali_soc_addr_table)
+ return -ENOMEM;
+
+ mali_soc_addr_table->soc_media_sctrl_base_addr =
+ ioremap(SOC_MEDIA_SCTRL_BASE_ADDR, REG_MEDIA_SC_IOSIZE);
+ mali_soc_addr_table->soc_ao_sctrl_base_addr =
+ ioremap(SOC_AO_SCTRL_BASE_ADDR, REG_SC_ON_IOSIZE);
+ mali_soc_addr_table->soc_peri_sctrl_base_addr =
+ ioremap(SOC_PERI_SCTRL_BASE_ADDR, REG_SC_OFF_IOSIZE);
+ mali_soc_addr_table->soc_pmctl_base_addr =
+ ioremap(SOC_PMCTRL_BASE_ADDR, REG_PMCTRL_IOSIZE);
+
+ if (!mali_soc_addr_table->soc_media_sctrl_base_addr
+ || !mali_soc_addr_table->soc_ao_sctrl_base_addr
+ || !mali_soc_addr_table->soc_peri_sctrl_base_addr
+ || !mali_soc_addr_table->soc_pmctl_base_addr) {
+ pr_err("Failed to remap SoC addresses\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mali_unmap_soc_addr(void)
+{
+ iounmap((void __iomem *)mali_soc_addr_table->soc_media_sctrl_base_addr);
+ iounmap((void __iomem *)mali_soc_addr_table->soc_ao_sctrl_base_addr);
+ iounmap((void __iomem *)mali_soc_addr_table->soc_peri_sctrl_base_addr);
+ iounmap((void __iomem *)mali_soc_addr_table->soc_pmctl_base_addr);
+ kfree(mali_soc_addr_table);
+ mali_soc_addr_table = NULL;
+}
+
+int mali_platform_device_init(struct platform_device *pdev)
+{
+ int stat;
+ int irq, i;
+
+#if HISI6220_USE_ION
+ stat = mali_ion_mem_init();
+ if (stat)
+ return stat;
+#endif
+
+ stat = mali_remap_soc_addr();
+ if (stat)
+ return stat;
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.type = &mali_gpu_device_device_type;
+ pdev->dev.platform_data = &mali_gpu_data;
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ mali_np = pdev->dev.of_node;
+
+ if (mali_get_gpu_type() != MALI_CORE_450_MP4) {
+ pr_err("Unexpected GPU type\n");
+ return -EINVAL;
+ }
+
+ /*
+ * We need to use DT to get the irq domain, so rewrite the static
+ * table with the irq given from platform_get_irq().
+ */
+ irq = platform_get_irq(pdev, 0);
+ for (i = 0; i < ARRAY_SIZE(mali_gpu_resources_m450_mp4); i++) {
+ if (IORESOURCE_IRQ & mali_gpu_resources_m450_mp4[i].flags) {
+ mali_gpu_resources_m450_mp4[i].start = irq;
+ mali_gpu_resources_m450_mp4[i].end = irq;
+ }
+ }
+ pdev->num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp4);
+ pdev->resource = mali_gpu_resources_m450_mp4;
+
+ stat = init_mali_clock_regulator(pdev);
+ if (stat)
+ return stat;
+
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_set_autosuspend_delay(&(pdev->dev), 1);
+ pm_runtime_use_autosuspend(&(pdev->dev));
+ pm_runtime_enable(&pdev->dev);
+#endif
+
+ return 0;
+}
+
+int mali_platform_device_deinit(void)
+{
+ int stat;
+
+ stat = deinit_mali_clock_regulator();
+ if (stat)
+ return stat;
+
+ mali_unmap_soc_addr();
+
+ return 0;
+}
diff --git a/drivers/gpu/arm/utgard/platform/hikey/mali_hikey_hi6220_registers_gpu.h b/drivers/gpu/arm/utgard/platform/hikey/mali_hikey_hi6220_registers_gpu.h
new file mode 100644
index 000000000000..0bdf4a0482fd
--- /dev/null
+++ b/drivers/gpu/arm/utgard/platform/hikey/mali_hikey_hi6220_registers_gpu.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2014 Hisilicon Co. Ltd.
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * Author: Xuzixin <Xuzixin@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef MALI_HIKEY_HI6220_REGISTERS_GPU_H
+#define MALI_HIKEY_HI6220_REGISTERS_GPU_H 1
+
+#include <linux/mm.h>
+
+#define SOC_G3D_S_BASE_ADDR 0xF4080000 /* G3D ctrl base addr */
+#define SOC_MEDIA_SCTRL_BASE_ADDR 0xF4410000 /* media ctrl base addr */
+#define REG_MEDIA_SC_IOSIZE PAGE_ALIGN(SZ_4K)
+#define SOC_PMCTRL_BASE_ADDR 0xF7032000 /* pm ctrl base addr */
+#define REG_PMCTRL_IOSIZE PAGE_ALIGN(SZ_4K)
+#define SOC_AO_SCTRL_BASE_ADDR 0xF7800000 /* ao ctrl base addr */
+#define SOC_PERI_SCTRL_BASE_ADDR 0xF7030000 /* peri ctrl base addr */
+#define REG_SC_ON_IOSIZE PAGE_ALIGN(SZ_8K)
+#define REG_SC_OFF_IOSIZE PAGE_ALIGN(SZ_4K)
+
+/* ----------------------------------------------------------------------------
+ * MEDIA SCTRL
+ */
+
+#define SOC_MEDIA_SCTRL_SC_MEDIA_SUBSYS_CTRL5_ADDR(base) ((base) + (0x51C))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_CLKCFG0_ADDR(base) ((base) + (0xCBC))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_CLKCFG2_ADDR(base) ((base) + (0xCC4))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_CLKEN_ADDR(base) ((base) + (0x520))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_CLKDIS_ADDR(base) ((base) + (0x524))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_RSTEN_ADDR(base) ((base) + (0x52C))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_RSTDIS_ADDR(base) ((base) + (0x530))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_RST_STAT_ADDR(base) ((base) + (0x534))
+
+/* ----------------------------------------------------------------------------
+ * AO SCTRL,only bit 1 is necessary for GPU.
+ */
+
+#define SOC_AO_SCTRL_SC_PW_CLKEN0_ADDR(base) ((base) + (0x800))
+#define SOC_AO_SCTRL_SC_PW_CLKDIS0_ADDR(base) ((base) + (0x804))
+#define SOC_AO_SCTRL_SC_PW_CLK_STAT0_ADDR(base) ((base) + (0x808))
+#define SOC_AO_SCTRL_SC_PW_RSTEN0_ADDR(base) ((base) + (0x810))
+#define SOC_AO_SCTRL_SC_PW_RSTDIS0_ADDR(base) ((base) + (0x814))
+#define SOC_AO_SCTRL_SC_PW_RST_STAT0_ADDR(base) ((base) + (0x818))
+#define SOC_AO_SCTRL_SC_PW_ISOEN0_ADDR(base) ((base) + (0x820))
+#define SOC_AO_SCTRL_SC_PW_ISODIS0_ADDR(base) ((base) + (0x824))
+#define SOC_AO_SCTRL_SC_PW_ISO_STAT0_ADDR(base) ((base) + (0x828))
+#define SOC_AO_SCTRL_SC_PW_MTCMOS_EN0_ADDR(base) ((base) + (0x830))
+#define SOC_AO_SCTRL_SC_PW_MTCMOS_DIS0_ADDR(base) ((base) + (0x834))
+#define SOC_AO_SCTRL_SC_PW_MTCMOS_STAT0_ADDR(base) ((base) + (0x838))
+
+/* ----------------------------------------------------------------------------
+ * PERI SCTRL,only bit 10 is necessary for GPU.
+ */
+
+#define SOC_PERI_SCTRL_SC_PERIPH_CLKEN12_ADDR(base) ((base) + (0x270))
+#define SOC_PERI_SCTRL_SC_PERIPH_CLKSTAT12_ADDR(base) ((base) + (0x278))
+
+#endif /* MALI_HIKEY_HI6220_REGISTERS_GPU_H */
diff --git a/drivers/gpu/arm/utgard/readme.txt b/drivers/gpu/arm/utgard/readme.txt
new file mode 100644
index 000000000000..6785ac933b38
--- /dev/null
+++ b/drivers/gpu/arm/utgard/readme.txt
@@ -0,0 +1,28 @@
+Building the Mali Device Driver for Linux
+-----------------------------------------
+
+Build the Mali Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> USING_UMP=<ump_option> BUILD=<build_option> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ ump_option: 1 = Enable UMP support(*)
+ 0 = disable UMP support
+ build_option: debug = debug build of driver
+ release = release build of driver
+
+(*) For newer Linux Kernels, the Module.symvers file for the UMP device driver
+ must be available. The UMP_SYMVERS_FILE variable in the Makefile should
+ point to this file. This file is generated when the UMP driver is built.
+
+The result will be a mali.ko file, which can be loaded into the Linux kernel
+by using the insmod command.
+
+Use of UMP is not recommended. The dma-buf API in the Linux kernel has
+replaced UMP. The Mali Device Driver will be built with dma-buf support if the
+kernel config includes enabled dma-buf.
+
+The kernel needs to be provided with a platform_device struct for the Mali GPU
+device. See the mali_utgard.h header file for how to set up the Mali GPU
+resources.
diff --git a/drivers/gpu/arm/utgard/regs/mali_200_regs.h b/drivers/gpu/arm/utgard/regs/mali_200_regs.h
new file mode 100644
index 000000000000..0345fb169a95
--- /dev/null
+++ b/drivers/gpu/arm/utgard/regs/mali_200_regs.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2010, 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI200_REGS_H_
+#define _MALI200_REGS_H_
+
+/**
+ * Enum for management register addresses.
+ */
+enum mali200_mgmt_reg {
+ MALI200_REG_ADDR_MGMT_VERSION = 0x1000,
+ MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x1004,
+ MALI200_REG_ADDR_MGMT_STATUS = 0x1008,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT = 0x100c,
+
+ MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x1020,
+ MALI200_REG_ADDR_MGMT_INT_CLEAR = 0x1024,
+ MALI200_REG_ADDR_MGMT_INT_MASK = 0x1028,
+ MALI200_REG_ADDR_MGMT_INT_STATUS = 0x102c,
+
+ MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x1050,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x1080,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x1084,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_LIMIT = 0x1088,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x108c,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x10a0,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x10a4,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x10ac,
+
+ MALI200_REG_ADDR_MGMT_PERFMON_CONTR = 0x10b0,
+ MALI200_REG_ADDR_MGMT_PERFMON_BASE = 0x10b4,
+
+ MALI200_REG_SIZEOF_REGISTER_BANK = 0x10f0
+
+};
+
+#define MALI200_REG_VAL_PERF_CNT_ENABLE 1
+
+enum mali200_mgmt_ctrl_mgmt {
+ MALI200_REG_VAL_CTRL_MGMT_STOP_BUS = (1 << 0),
+ MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES = (1 << 3),
+ MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET = (1 << 5),
+ MALI200_REG_VAL_CTRL_MGMT_START_RENDERING = (1 << 6),
+ MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET = (1 << 7), /* Only valid for Mali-300 and later */
+};
+
+enum mali200_mgmt_irq {
+ MALI200_REG_VAL_IRQ_END_OF_FRAME = (1 << 0),
+ MALI200_REG_VAL_IRQ_END_OF_TILE = (1 << 1),
+ MALI200_REG_VAL_IRQ_HANG = (1 << 2),
+ MALI200_REG_VAL_IRQ_FORCE_HANG = (1 << 3),
+ MALI200_REG_VAL_IRQ_BUS_ERROR = (1 << 4),
+ MALI200_REG_VAL_IRQ_BUS_STOP = (1 << 5),
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT = (1 << 6),
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT = (1 << 7),
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR = (1 << 8),
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1 << 9),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW = (1 << 10),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW = (1 << 11),
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED = (1 << 12),
+};
+
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW |\
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
+
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
+
+#define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0))
+
+enum mali200_mgmt_status {
+ MALI200_REG_VAL_STATUS_RENDERING_ACTIVE = (1 << 0),
+ MALI200_REG_VAL_STATUS_BUS_STOPPED = (1 << 4),
+};
+
+enum mali200_render_unit {
+ MALI200_REG_ADDR_FRAME = 0x0000,
+ MALI200_REG_ADDR_RSW = 0x0004,
+ MALI200_REG_ADDR_STACK = 0x0030,
+ MALI200_REG_ADDR_STACK_SIZE = 0x0034,
+ MALI200_REG_ADDR_ORIGIN_OFFSET_X = 0x0040
+};
+
+enum mali200_wb_unit {
+ MALI200_REG_ADDR_WB0 = 0x0100,
+ MALI200_REG_ADDR_WB1 = 0x0200,
+ MALI200_REG_ADDR_WB2 = 0x0300
+};
+
+enum mali200_wb_unit_regs {
+ MALI200_REG_ADDR_WB_SOURCE_SELECT = 0x0000,
+ MALI200_REG_ADDR_WB_SOURCE_ADDR = 0x0004,
+};
+
+/* This should be in the top 16 bit of the version register of Mali PP */
+#define MALI200_PP_PRODUCT_ID 0xC807
+#define MALI300_PP_PRODUCT_ID 0xCE07
+#define MALI400_PP_PRODUCT_ID 0xCD07
+#define MALI450_PP_PRODUCT_ID 0xCF07
+#define MALI470_PP_PRODUCT_ID 0xCF08
+
+
+
+#endif /* _MALI200_REGS_H_ */
diff --git a/drivers/gpu/arm/utgard/regs/mali_gp_regs.h b/drivers/gpu/arm/utgard/regs/mali_gp_regs.h
new file mode 100644
index 000000000000..7f8b58fd6c49
--- /dev/null
+++ b/drivers/gpu/arm/utgard/regs/mali_gp_regs.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2010, 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALIGP2_CONROL_REGS_H_
+#define _MALIGP2_CONROL_REGS_H_
+
+/**
+ * These are the different geometry processor control registers.
+ * Their usage is to control and monitor the operation of the
+ * Vertex Shader and the Polygon List Builder in the geometry processor.
+ * Addresses are in 32-bit word relative sizes.
+ * @see [P0081] "Geometry Processor Data Structures" for details
+ */
+
+typedef enum {
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR = 0x00,
+ MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR = 0x04,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR = 0x08,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR = 0x0c,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR = 0x10,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR = 0x14,
+ MALIGP2_REG_ADDR_MGMT_CMD = 0x20,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT = 0x24,
+ MALIGP2_REG_ADDR_MGMT_INT_CLEAR = 0x28,
+ MALIGP2_REG_ADDR_MGMT_INT_MASK = 0x2C,
+ MALIGP2_REG_ADDR_MGMT_INT_STAT = 0x30,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x3C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x40,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x44,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x48,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x4C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x50,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_LIMIT = 0x54,
+ MALIGP2_REG_ADDR_MGMT_STATUS = 0x68,
+ MALIGP2_REG_ADDR_MGMT_VERSION = 0x6C,
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ = 0x80,
+ MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ = 0x84,
+ MALIGP2_CONTR_AXI_BUS_ERROR_STAT = 0x94,
+ MALIGP2_REGISTER_ADDRESS_SPACE_SIZE = 0x98,
+} maligp_reg_addr_mgmt_addr;
+
+#define MALIGP2_REG_VAL_PERF_CNT_ENABLE 1
+
+/**
+ * Commands to geometry processor.
+ * @see MALIGP2_CTRL_REG_CMD
+ */
+typedef enum {
+ MALIGP2_REG_VAL_CMD_START_VS = (1 << 0),
+ MALIGP2_REG_VAL_CMD_START_PLBU = (1 << 1),
+ MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC = (1 << 4),
+ MALIGP2_REG_VAL_CMD_RESET = (1 << 5),
+ MALIGP2_REG_VAL_CMD_FORCE_HANG = (1 << 6),
+ MALIGP2_REG_VAL_CMD_STOP_BUS = (1 << 9),
+ MALI400GP_REG_VAL_CMD_SOFT_RESET = (1 << 10), /* only valid for Mali-300 and later */
+} mgp_contr_reg_val_cmd;
+
+
+/** @defgroup MALIGP2_IRQ
+ * Interrupt status of geometry processor.
+ * @see MALIGP2_CTRL_REG_INT_RAWSTAT, MALIGP2_REG_ADDR_MGMT_INT_CLEAR,
+ * MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_ADDR_MGMT_INT_STAT
+ * @{
+ */
+#define MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST (1 << 0)
+#define MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST (1 << 1)
+#define MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM (1 << 2)
+#define MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ (1 << 3)
+#define MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ (1 << 4)
+#define MALIGP2_REG_VAL_IRQ_HANG (1 << 5)
+#define MALIGP2_REG_VAL_IRQ_FORCE_HANG (1 << 6)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT (1 << 7)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT (1 << 8)
+#define MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR (1 << 9)
+#define MALIGP2_REG_VAL_IRQ_SYNC_ERROR (1 << 10)
+#define MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR (1 << 11)
+#define MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED (1 << 12)
+#define MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD (1 << 13)
+#define MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD (1 << 14)
+#define MALI400GP_REG_VAL_IRQ_RESET_COMPLETED (1 << 19)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW (1 << 20)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW (1 << 21)
+#define MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS (1 << 22)
+
+/* Mask defining all IRQs in Mali GP */
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_RESET_COMPLETED | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining the IRQs in Mali GP which we use */
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining non IRQs on MaliGP2*/
+#define MALIGP2_REG_VAL_IRQ_MASK_NONE 0
+
+/** }@ defgroup MALIGP2_IRQ*/
+
+/** @defgroup MALIGP2_STATUS
+ * The different Status values to the geometry processor.
+ * @see MALIGP2_CTRL_REG_STATUS
+ * @{
+ */
+#define MALIGP2_REG_VAL_STATUS_VS_ACTIVE 0x0002
+#define MALIGP2_REG_VAL_STATUS_BUS_STOPPED 0x0004
+#define MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE 0x0008
+#define MALIGP2_REG_VAL_STATUS_BUS_ERROR 0x0040
+#define MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR 0x0100
+/** }@ defgroup MALIGP2_STATUS*/
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\
+ MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
+ MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
+
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\
+ MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
+ MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
+
+/* This should be in the top 16 bit of the version register of gp.*/
+#define MALI200_GP_PRODUCT_ID 0xA07
+#define MALI300_GP_PRODUCT_ID 0xC07
+#define MALI400_GP_PRODUCT_ID 0xB07
+#define MALI450_GP_PRODUCT_ID 0xD07
+
+/**
+ * The different sources for instrumented on the geometry processor.
+ * @see MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC
+ */
+
+enum MALIGP2_cont_reg_perf_cnt_src {
+ MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED = 0x0a,
+};
+
+#endif
diff --git a/drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.c b/drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.c
new file mode 100644
index 000000000000..7df934c12122
--- /dev/null
+++ b/drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011, 2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.h b/drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.h
new file mode 100644
index 000000000000..f52097c1901b
--- /dev/null
+++ b/drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ /*
+ * reset counters and overflow flags
+ */
+
+ u32 mask = (1 << 0) | /* enable all three counters */
+ (0 << 1) | /* reset both Count Registers to 0x0 */
+ (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
+ (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
+ (0 << 4) | /* Count Register 0 interrupt enable */
+ (0 << 5) | /* Count Register 1 interrupt enable */
+ (0 << 6) | /* Cycle Counter interrupt enable */
+ (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
+ (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
+ (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
+
+ __asm__ __volatile__("MCR p15, 0, %0, c15, c12, 0" : : "r"(mask));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ u32 result;
+
+ /* this is for the clock cycles */
+ __asm__ __volatile__("MRC p15, 0, %0, c15, c12, 1" : "=r"(result));
+
+ return (u64)result;
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.c b/drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.c
new file mode 100644
index 000000000000..7df934c12122
--- /dev/null
+++ b/drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011, 2013, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.h b/drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.h
new file mode 100644
index 000000000000..709a16a82f31
--- /dev/null
+++ b/drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ return _mali_osk_boot_time_get_ns();
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/arm_gpu/Kbuild b/drivers/gpu/arm_gpu/Kbuild
new file mode 100644
index 000000000000..bac37b7bd615
--- /dev/null
+++ b/drivers/gpu/arm_gpu/Kbuild
@@ -0,0 +1,224 @@
+#
+# (C) COPYRIGHT 2012-2016, 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+# Driver version string which is returned to userspace via an ioctl
+MALI_RELEASE_NAME ?= "r7p0-01rel0"
+
+# Paths required for build
+KBASE_PATH = $(src)
+KBASE_PLATFORM_PATH = $(KBASE_PATH)/platform_dummy
+UMP_PATH = $(src)/../../../base
+
+ifeq ($(CONFIG_MALI_ERROR_INJECTION),y)
+MALI_ERROR_INJECT_ON = 1
+endif
+
+# Set up defaults if not defined by build system
+MALI_CUSTOMER_RELEASE ?= 1
+MALI_UNIT_TEST ?= 0
+MALI_KERNEL_TEST_API ?= 0
+MALI_ERROR_INJECT_ON ?= 0
+MALI_MOCK_TEST ?= 0
+MALI_COVERAGE ?= 0
+MALI_INSTRUMENTATION_LEVEL ?= 0
+# This workaround is for what seems to be a compiler bug we observed in
+# GCC 4.7 on AOSP 4.3. The bug caused an intermittent failure compiling
+# the "_Pragma" syntax, where an error message is returned:
+#
+# "internal compiler error: unspellable token PRAGMA"
+#
+# This regression has thus far only been seen on the GCC 4.7 compiler bundled
+# with AOSP 4.3.0. So this makefile, intended for in-tree kernel builds
+# which are not known to be used with AOSP, is hardcoded to disable the
+# workaround, i.e. set the define to 0.
+MALI_GCC_WORKAROUND_MIDCOM_4598 ?= 0
+
+# Set up our defines, which will be passed to gcc
+DEFINES = \
+ -DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \
+ -DMALI_KERNEL_TEST_API=$(MALI_KERNEL_TEST_API) \
+ -DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \
+ -DMALI_ERROR_INJECT_ON=$(MALI_ERROR_INJECT_ON) \
+ -DMALI_MOCK_TEST=$(MALI_MOCK_TEST) \
+ -DMALI_COVERAGE=$(MALI_COVERAGE) \
+ -DMALI_INSTRUMENTATION_LEVEL=$(MALI_INSTRUMENTATION_LEVEL) \
+ -DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\" \
+ -DMALI_GCC_WORKAROUND_MIDCOM_4598=$(MALI_GCC_WORKAROUND_MIDCOM_4598)
+
+ifeq ($(KBUILD_EXTMOD),)
+# in-tree
+DEFINES +=-DMALI_KBASE_THIRDPARTY_PATH=../../$(src)/platform/$(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME)
+else
+# out-of-tree
+DEFINES +=-DMALI_KBASE_THIRDPARTY_PATH=$(src)/platform/$(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME)
+endif
+
+DEFINES += -I$(srctree)/drivers/staging/android
+
+# Use our defines when compiling
+ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
+subdir-ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(OSK_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
+
+SRC := \
+ mali_kbase_device.c \
+ mali_kbase_cache_policy.c \
+ mali_kbase_mem.c \
+ mali_kbase_mmu.c \
+ mali_kbase_ctx_sched.c \
+ mali_kbase_jd.c \
+ mali_kbase_jd_debugfs.c \
+ mali_kbase_jm.c \
+ mali_kbase_gpuprops.c \
+ mali_kbase_js.c \
+ mali_kbase_js_ctx_attr.c \
+ mali_kbase_event.c \
+ mali_kbase_context.c \
+ mali_kbase_pm.c \
+ mali_kbase_config.c \
+ mali_kbase_vinstr.c \
+ mali_kbase_softjobs.c \
+ mali_kbase_10969_workaround.c \
+ mali_kbase_hw.c \
+ mali_kbase_utility.c \
+ mali_kbase_debug.c \
+ mali_kbase_trace_timeline.c \
+ mali_kbase_gpu_memory_debugfs.c \
+ mali_kbase_mem_linux.c \
+ mali_kbase_core_linux.c \
+ mali_kbase_replay.c \
+ mali_kbase_mem_profile_debugfs.c \
+ mali_kbase_mmu_mode_lpae.c \
+ mali_kbase_mmu_mode_aarch64.c \
+ mali_kbase_disjoint_events.c \
+ mali_kbase_gator_api.c \
+ mali_kbase_debug_mem_view.c \
+ mali_kbase_debug_job_fault.c \
+ mali_kbase_smc.c \
+ mali_kbase_mem_pool.c \
+ mali_kbase_mem_pool_debugfs.c \
+ mali_kbase_tlstream.c \
+ mali_kbase_strings.c \
+ mali_kbase_as_fault_debugfs.c \
+ mali_kbase_regs_history_debugfs.c
+
+
+
+
+ifeq ($(MALI_UNIT_TEST),1)
+ SRC += mali_kbase_tlstream_test.c
+endif
+
+ifeq ($(MALI_CUSTOMER_RELEASE),0)
+ SRC += mali_kbase_regs_dump_debugfs.c
+endif
+
+
+ccflags-y += -I$(KBASE_PATH)
+
+ifeq ($(CONFIG_MALI_PLATFORM_FAKE),y)
+ SRC += mali_kbase_platform_fake.c
+
+ ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS),y)
+ SRC += platform/vexpress/mali_kbase_config_vexpress.c \
+ platform/vexpress/mali_kbase_cpu_vexpress.c
+ ccflags-y += -I$(src)/platform/vexpress
+ endif
+
+ ifeq ($(CONFIG_MALI_PLATFORM_RTSM_VE),y)
+ SRC += platform/rtsm_ve/mali_kbase_config_vexpress.c
+ ccflags-y += -I$(src)/platform/rtsm_ve
+ endif
+
+ ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS_1XV7_A57),y)
+ SRC += platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
+ ccflags-y += -I$(src)/platform/vexpress_1xv7_a57
+ endif
+
+ ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS_6XVIRTEX7_10MHZ),y)
+ SRC += platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c \
+ platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c
+ ccflags-y += -I$(src)/platform/vexpress_6xvirtex7_10mhz
+ endif
+endif # CONFIG_MALI_PLATFORM_FAKE=y
+
+
+ifeq ($(CONFIG_MALI_PLATFORM_HISILICON),y)
+ SRC += platform/hisilicon/mali_kbase_config_hisilicon.c
+ ccflags-y += -I$(src)/platform/hisilicon
+endif
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI_MIDGARD) += mali_kbase.o
+
+# Tell the Linux build system to enable building of our .c files
+mali_kbase-y := $(SRC:.c=.o)
+
+ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY),y)
+ # Kconfig passes in the name with quotes for in-tree builds - remove them.
+ platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME))
+ MALI_PLATFORM_THIRDPARTY_DIR := platform/$(platform_name)
+ ccflags-y += -I$(src)/$(MALI_PLATFORM_THIRDPARTY_DIR)
+ include $(src)/$(MALI_PLATFORM_THIRDPARTY_DIR)/Kbuild
+endif
+
+ifeq ($(CONFIG_MALI_DEVFREQ),y)
+ ifeq ($(CONFIG_DEVFREQ_THERMAL),y)
+ include $(src)/ipa/Kbuild
+ endif
+endif
+
+mali_kbase-$(CONFIG_MALI_DMA_FENCE) += \
+ mali_kbase_dma_fence.o \
+ mali_kbase_fence.o
+mali_kbase-$(CONFIG_SYNC) += \
+ mali_kbase_sync_android.o \
+ mali_kbase_sync_common.o
+mali_kbase-$(CONFIG_SYNC_FILE) += \
+ mali_kbase_sync_file.o \
+ mali_kbase_sync_common.o \
+ mali_kbase_fence.o
+
+MALI_BACKEND_PATH ?= backend
+CONFIG_MALI_BACKEND ?= gpu
+CONFIG_MALI_BACKEND_REAL ?= $(CONFIG_MALI_BACKEND)
+
+ifeq ($(MALI_MOCK_TEST),1)
+ifeq ($(CONFIG_MALI_BACKEND_REAL),gpu)
+# Test functionality
+mali_kbase-y += tests/internal/src/mock/mali_kbase_pm_driver_mock.o
+endif
+endif
+
+include $(src)/$(MALI_BACKEND_PATH)/$(CONFIG_MALI_BACKEND_REAL)/Kbuild
+mali_kbase-y += $(BACKEND:.c=.o)
+
+
+ccflags-y += -I$(src)/$(MALI_BACKEND_PATH)/$(CONFIG_MALI_BACKEND_REAL)
+subdir-ccflags-y += -I$(src)/$(MALI_BACKEND_PATH)/$(CONFIG_MALI_BACKEND_REAL)
+
+# Default to devicetree platform if neither a fake platform or a thirdparty
+# platform is configured.
+ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY)$(CONFIG_MALI_PLATFORM_FAKE),)
+CONFIG_MALI_PLATFORM_DEVICETREE := y
+endif
+
+mali_kbase-$(CONFIG_MALI_PLATFORM_DEVICETREE) += \
+ platform/devicetree/mali_kbase_runtime_pm.o \
+ platform/devicetree/mali_kbase_config_devicetree.o
+ccflags-$(CONFIG_MALI_PLATFORM_DEVICETREE) += -I$(src)/platform/devicetree
+
+# For kutf and mali_kutf_irq_latency_test
+obj-$(CONFIG_MALI_KUTF) += tests/
diff --git a/drivers/gpu/arm_gpu/Kconfig b/drivers/gpu/arm_gpu/Kconfig
new file mode 100644
index 000000000000..55325bacada8
--- /dev/null
+++ b/drivers/gpu/arm_gpu/Kconfig
@@ -0,0 +1,263 @@
+#
+# (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+menuconfig MALI_MIDGARD
+ tristate "Mali Midgard series support"
+ select GPU_TRACEPOINTS if ANDROID
+ default n
+ help
+ Enable this option to build support for a ARM Mali Midgard GPU.
+
+ To compile this driver as a module, choose M here:
+ this will generate a single module, called mali_kbase.
+
+config MALI_GATOR_SUPPORT
+ bool "Streamline support via Gator"
+ depends on MALI_MIDGARD
+ default n
+ help
+ Adds diagnostic support for use with the ARM Streamline Performance Analyzer.
+ You will need the Gator device driver already loaded before loading this driver when enabling
+ Streamline debug support.
+ This is a legacy interface required by older versions of Streamline.
+
+config MALI_MIDGARD_DVFS
+ bool "Enable legacy DVFS"
+ depends on MALI_MIDGARD && !MALI_DEVFREQ && !MALI_PLATFORM_DEVICETREE
+ default n
+ help
+ Choose this option to enable legacy DVFS in the Mali Midgard DDK.
+
+config MALI_MIDGARD_ENABLE_TRACE
+ bool "Enable kbase tracing"
+ depends on MALI_MIDGARD
+ default n
+ help
+ Enables tracing in kbase. Trace log available through
+ the "mali_trace" debugfs file, when the CONFIG_DEBUG_FS is enabled
+
+config MALI_DEVFREQ
+ bool "devfreq support for Mali"
+ depends on MALI_MIDGARD && PM_DEVFREQ
+ help
+ Support devfreq for Mali.
+
+ Using the devfreq framework and, by default, the simpleondemand
+ governor, the frequency of Mali will be dynamically selected from the
+ available OPPs.
+
+config MALI_DMA_FENCE
+ bool "DMA_BUF fence support for Mali"
+ depends on MALI_MIDGARD && !KDS
+ default n
+ help
+ Support DMA_BUF fences for Mali.
+
+ This option should only be enabled if KDS is not present and
+ the Linux Kernel has built in support for DMA_BUF fences.
+
+# MALI_EXPERT configuration options
+
+menuconfig MALI_EXPERT
+ depends on MALI_MIDGARD
+ bool "Enable Expert Settings"
+ default n
+ help
+ Enabling this option and modifying the default settings may produce a driver with performance or
+ other limitations.
+
+config MALI_CORESTACK
+ bool "Support controlling power to the GPU core stack"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Enabling this feature on supported GPUs will let the driver powering
+ on/off the GPU core stack independently without involving the Power
+ Domain Controller. This should only be enabled on platforms which
+ integration of the PDC to the Mali GPU is known to be problematic.
+ This feature is currently only supported on t-Six and t-HEx GPUs.
+
+ If unsure, say N.
+
+config MALI_PRFCNT_SET_SECONDARY
+ bool "Use secondary set of performance counters"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Select this option to use secondary set of performance counters. Kernel
+ features that depend on an access to the primary set of counters may
+ become unavailable. Enabling this option will prevent power management
+ from working optimally and may cause instrumentation tools to return
+ bogus results.
+
+ If unsure, say N.
+
+config MALI_PLATFORM_FAKE
+ bool "Enable fake platform device support"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ When you start to work with the Mali Midgard series device driver the platform-specific code of
+ the Linux kernel for your platform may not be complete. In this situation the kernel device driver
+ supports creating the platform device outside of the Linux platform-specific code.
+ Enable this option if would like to use a platform device configuration from within the device driver.
+
+choice
+ prompt "Platform configuration"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default MALI_PLATFORM_DEVICETREE
+ help
+ Select the SOC platform that contains a Mali Midgard GPU
+
+config MALI_PLATFORM_DEVICETREE
+ bool "Device Tree platform"
+ depends on OF
+ help
+ Select this option to use Device Tree with the Mali driver.
+
+ When using this option the Mali driver will get the details of the
+ GPU hardware from the Device Tree. This means that the same driver
+ binary can run on multiple platforms as long as all the GPU hardware
+ details are described in the device tree.
+
+ Device Tree is the recommended method for the Mali driver platform
+ integration.
+
+config MALI_PLATFORM_VEXPRESS
+ depends on ARCH_VEXPRESS && (ARCH_VEXPRESS_CA9X4 || ARCH_VEXPRESS_CA15X4)
+ bool "Versatile Express"
+config MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ
+ depends on ARCH_VEXPRESS && (ARCH_VEXPRESS_CA9X4 || ARCH_VEXPRESS_CA15X4)
+ bool "Versatile Express w/Virtex7 @ 40Mhz"
+config MALI_PLATFORM_GOLDFISH
+ depends on ARCH_GOLDFISH
+ bool "Android Goldfish virtual CPU"
+config MALI_PLATFORM_PBX
+ depends on ARCH_REALVIEW && REALVIEW_EB_A9MP && MACH_REALVIEW_PBX
+ bool "Realview PBX-A9"
+config MALI_PLATFORM_HISILICON
+ bool "Hisilicon"
+ depends on MALI_MIDGARD && MALI_EXPERT
+
+config MALI_PLATFORM_THIRDPARTY
+ bool "Third Party Platform"
+endchoice
+
+config MALI_PLATFORM_THIRDPARTY_NAME
+ depends on MALI_MIDGARD && MALI_PLATFORM_THIRDPARTY && MALI_EXPERT
+ string "Third party platform name"
+ help
+ Enter the name of a third party platform that is supported. The third part configuration
+ file must be in midgard/config/tpip/mali_kbase_config_xxx.c where xxx is the name
+ specified here.
+
+config MALI_DEBUG
+ bool "Debug build"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Select this option for increased checking and reporting of errors.
+
+config MALI_FENCE_DEBUG
+ bool "Debug sync fence usage"
+ depends on MALI_MIDGARD && MALI_EXPERT && (SYNC || SYNC_FILE)
+ default y if MALI_DEBUG
+ help
+ Select this option to enable additional checking and reporting on the
+ use of sync fences in the Mali driver.
+
+ This will add a 3s timeout to all sync fence waits in the Mali
+ driver, so that when work for Mali has been waiting on a sync fence
+ for a long time a debug message will be printed, detailing what fence
+ is causing the block, and which dependent Mali atoms are blocked as a
+ result of this.
+
+ The timeout can be changed at runtime through the js_soft_timeout
+ device attribute, where the timeout is specified in milliseconds.
+
+config MALI_NO_MALI
+ bool "No Mali"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ This can be used to test the driver in a simulated environment
+ whereby the hardware is not physically present. If the hardware is physically
+ present it will not be used. This can be used to test the majority of the
+ driver without needing actual hardware or for software benchmarking.
+ All calls to the simulated hardware will complete immediately as if the hardware
+ completed the task.
+
+config MALI_ERROR_INJECT
+ bool "Error injection"
+ depends on MALI_MIDGARD && MALI_EXPERT && MALI_NO_MALI
+ default n
+ help
+ Enables insertion of errors to test module failure and recovery mechanisms.
+
+config MALI_TRACE_TIMELINE
+ bool "Timeline tracing"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Enables timeline tracing through the kernel tracepoint system.
+
+config MALI_SYSTEM_TRACE
+ bool "Enable system event tracing support"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Choose this option to enable system trace events for each
+ kbase event. This is typically used for debugging but has
+ minimal overhead when not in use. Enable only if you know what
+ you are doing.
+
+config MALI_GPU_MMU_AARCH64
+ bool "Use AArch64 page tables"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Use AArch64 format page tables for the GPU instead of LPAE-style.
+ The two formats have the same functionality and performance but a
+ future GPU may deprecate or remove the legacy LPAE-style format.
+
+ The LPAE-style format is supported on all Midgard and current Bifrost
+ GPUs. Enabling AArch64 format restricts the driver to only supporting
+ Bifrost GPUs.
+
+ If in doubt, say N.
+
+config MALI_2MB_ALLOC
+ bool "Attempt to allocate 2MB pages"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Rather than allocating all GPU memory page-by-page, attempt to
+ allocate 2MB pages from the kernel. This reduces TLB pressure and
+ helps to prevent memory fragmentation.
+
+ If in doubt, say N
+
+config MALI_PWRSOFT_765
+ bool "PWRSOFT-765 ticket"
+ default n
+ help
+ PWRSOFT-765 fixes devfreq cooling devices issues. However, they are
+ not merged in mainline kernel yet. So this define helps to guard those
+ parts of the code.
+
+source "drivers/gpu/arm_gpu/platform/Kconfig"
+#source "drivers/gpu/arm/midgard/platform/Kconfig"
+#source "drivers/gpu/arm/midgard/tests/Kconfig"
diff --git a/drivers/gpu/arm_gpu/Makefile b/drivers/gpu/arm_gpu/Makefile
new file mode 100644
index 000000000000..9aa242c4f8c4
--- /dev/null
+++ b/drivers/gpu/arm_gpu/Makefile
@@ -0,0 +1,42 @@
+#
+# (C) COPYRIGHT 2010-2016, 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+KDIR ?= /lib/modules/$(shell uname -r)/build
+
+BUSLOG_PATH_RELATIVE = $(CURDIR)/../../../..
+UMP_PATH_RELATIVE = $(CURDIR)/../../../base/ump
+KBASE_PATH_RELATIVE = $(CURDIR)
+KDS_PATH_RELATIVE = $(CURDIR)/../../../..
+EXTRA_SYMBOLS = $(UMP_PATH_RELATIVE)/src/Module.symvers
+
+ifeq ($(MALI_UNIT_TEST), 1)
+ EXTRA_SYMBOLS += $(KBASE_PATH_RELATIVE)/tests/internal/src/kernel_assert_module/linux/Module.symvers
+endif
+
+ifeq ($(MALI_BUS_LOG), 1)
+#Add bus logger symbols
+EXTRA_SYMBOLS += $(BUSLOG_PATH_RELATIVE)/drivers/base/bus_logger/Module.symvers
+endif
+
+# GPL driver supports KDS
+EXTRA_SYMBOLS += $(KDS_PATH_RELATIVE)/drivers/base/kds/Module.symvers
+
+# we get the symbols from modules using KBUILD_EXTRA_SYMBOLS to prevent warnings about unknown functions
+all:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../include -I$(CURDIR)/../../../../tests/include $(SCONS_CFLAGS)" $(SCONS_CONFIGS) KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules
+
+clean:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) clean
diff --git a/drivers/gpu/arm_gpu/Makefile.kbase b/drivers/gpu/arm_gpu/Makefile.kbase
new file mode 100644
index 000000000000..2bef9c25eaeb
--- /dev/null
+++ b/drivers/gpu/arm_gpu/Makefile.kbase
@@ -0,0 +1,17 @@
+#
+# (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+EXTRA_CFLAGS += -I$(ROOT) -I$(KBASE_PATH) -I$(OSK_PATH)/src/linux/include -I$(KBASE_PATH)/platform_$(PLATFORM)
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/Kbuild b/drivers/gpu/arm_gpu/backend/gpu/Kbuild
new file mode 100644
index 000000000000..5f700e9b6b44
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/Kbuild
@@ -0,0 +1,60 @@
+#
+# (C) COPYRIGHT 2014,2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+BACKEND += \
+ backend/gpu/mali_kbase_cache_policy_backend.c \
+ backend/gpu/mali_kbase_device_hw.c \
+ backend/gpu/mali_kbase_gpu.c \
+ backend/gpu/mali_kbase_gpuprops_backend.c \
+ backend/gpu/mali_kbase_debug_job_fault_backend.c \
+ backend/gpu/mali_kbase_irq_linux.c \
+ backend/gpu/mali_kbase_instr_backend.c \
+ backend/gpu/mali_kbase_jm_as.c \
+ backend/gpu/mali_kbase_jm_hw.c \
+ backend/gpu/mali_kbase_jm_rb.c \
+ backend/gpu/mali_kbase_js_affinity.c \
+ backend/gpu/mali_kbase_js_backend.c \
+ backend/gpu/mali_kbase_mmu_hw_direct.c \
+ backend/gpu/mali_kbase_pm_backend.c \
+ backend/gpu/mali_kbase_pm_driver.c \
+ backend/gpu/mali_kbase_pm_metrics.c \
+ backend/gpu/mali_kbase_pm_ca.c \
+ backend/gpu/mali_kbase_pm_ca_fixed.c \
+ backend/gpu/mali_kbase_pm_always_on.c \
+ backend/gpu/mali_kbase_pm_coarse_demand.c \
+ backend/gpu/mali_kbase_pm_demand.c \
+ backend/gpu/mali_kbase_pm_policy.c \
+ backend/gpu/mali_kbase_time.c
+
+ifeq ($(MALI_CUSTOMER_RELEASE),0)
+BACKEND += \
+ backend/gpu/mali_kbase_pm_ca_random.c \
+ backend/gpu/mali_kbase_pm_demand_always_powered.c \
+ backend/gpu/mali_kbase_pm_fast_start.c
+endif
+
+ifeq ($(CONFIG_MALI_DEVFREQ),y)
+BACKEND += \
+ backend/gpu/mali_kbase_devfreq.c \
+ backend/gpu/mali_kbase_pm_ca_devfreq.c
+endif
+
+ifeq ($(CONFIG_MALI_NO_MALI),y)
+ # Dummy model
+ BACKEND += backend/gpu/mali_kbase_model_dummy.c
+ BACKEND += backend/gpu/mali_kbase_model_linux.c
+ # HW error simulation
+ BACKEND += backend/gpu/mali_kbase_model_error_generator.c
+endif
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_backend_config.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_backend_config.h
new file mode 100644
index 000000000000..c8ae87eb84a2
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_backend_config.h
@@ -0,0 +1,29 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Backend specific configuration
+ */
+
+#ifndef _KBASE_BACKEND_CONFIG_H_
+#define _KBASE_BACKEND_CONFIG_H_
+
+/* Enable GPU reset API */
+#define KBASE_GPU_RESET_EN 1
+
+#endif /* _KBASE_BACKEND_CONFIG_H_ */
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_cache_policy_backend.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_cache_policy_backend.c
new file mode 100644
index 000000000000..fef9a2cb743e
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_cache_policy_backend.c
@@ -0,0 +1,29 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include "backend/gpu/mali_kbase_cache_policy_backend.h"
+#include <backend/gpu/mali_kbase_device_internal.h>
+
+void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
+ u32 mode)
+{
+ kbdev->current_gpu_coherency_mode = mode;
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG))
+ kbase_reg_write(kbdev, COHERENCY_ENABLE, mode, NULL);
+}
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_cache_policy_backend.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_cache_policy_backend.h
new file mode 100644
index 000000000000..fe9869109a82
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_cache_policy_backend.h
@@ -0,0 +1,34 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+#ifndef _KBASE_CACHE_POLICY_BACKEND_H_
+#define _KBASE_CACHE_POLICY_BACKEND_H_
+
+#include "mali_kbase.h"
+#include "mali_base_kernel.h"
+
+/**
+ * kbase_cache_set_coherency_mode() - Sets the system coherency mode
+ * in the GPU.
+ * @kbdev: Device pointer
+ * @mode: Coherency mode. COHERENCY_ACE/ACE_LITE
+ */
+void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
+ u32 mode);
+
+#endif /* _KBASE_CACHE_POLICY_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_debug_job_fault_backend.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_debug_job_fault_backend.c
new file mode 100644
index 000000000000..7851ea6466c7
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_debug_job_fault_backend.c
@@ -0,0 +1,157 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include "mali_kbase_debug_job_fault.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+/*GPU_CONTROL_REG(r)*/
+static int gpu_control_reg_snapshot[] = {
+ GPU_ID,
+ SHADER_READY_LO,
+ SHADER_READY_HI,
+ TILER_READY_LO,
+ TILER_READY_HI,
+ L2_READY_LO,
+ L2_READY_HI
+};
+
+/* JOB_CONTROL_REG(r) */
+static int job_control_reg_snapshot[] = {
+ JOB_IRQ_MASK,
+ JOB_IRQ_STATUS
+};
+
+/* JOB_SLOT_REG(n,r) */
+static int job_slot_reg_snapshot[] = {
+ JS_HEAD_LO,
+ JS_HEAD_HI,
+ JS_TAIL_LO,
+ JS_TAIL_HI,
+ JS_AFFINITY_LO,
+ JS_AFFINITY_HI,
+ JS_CONFIG,
+ JS_STATUS,
+ JS_HEAD_NEXT_LO,
+ JS_HEAD_NEXT_HI,
+ JS_AFFINITY_NEXT_LO,
+ JS_AFFINITY_NEXT_HI,
+ JS_CONFIG_NEXT
+};
+
+/*MMU_REG(r)*/
+static int mmu_reg_snapshot[] = {
+ MMU_IRQ_MASK,
+ MMU_IRQ_STATUS
+};
+
+/* MMU_AS_REG(n,r) */
+static int as_reg_snapshot[] = {
+ AS_TRANSTAB_LO,
+ AS_TRANSTAB_HI,
+ AS_MEMATTR_LO,
+ AS_MEMATTR_HI,
+ AS_FAULTSTATUS,
+ AS_FAULTADDRESS_LO,
+ AS_FAULTADDRESS_HI,
+ AS_STATUS
+};
+
+bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx,
+ int reg_range)
+{
+ int i, j;
+ int offset = 0;
+ int slot_number;
+ int as_number;
+
+ if (kctx->reg_dump == NULL)
+ return false;
+
+ slot_number = kctx->kbdev->gpu_props.num_job_slots;
+ as_number = kctx->kbdev->gpu_props.num_address_spaces;
+
+ /* get the GPU control registers*/
+ for (i = 0; i < sizeof(gpu_control_reg_snapshot)/4; i++) {
+ kctx->reg_dump[offset] =
+ GPU_CONTROL_REG(gpu_control_reg_snapshot[i]);
+ offset += 2;
+ }
+
+ /* get the Job control registers*/
+ for (i = 0; i < sizeof(job_control_reg_snapshot)/4; i++) {
+ kctx->reg_dump[offset] =
+ JOB_CONTROL_REG(job_control_reg_snapshot[i]);
+ offset += 2;
+ }
+
+ /* get the Job Slot registers*/
+ for (j = 0; j < slot_number; j++) {
+ for (i = 0; i < sizeof(job_slot_reg_snapshot)/4; i++) {
+ kctx->reg_dump[offset] =
+ JOB_SLOT_REG(j, job_slot_reg_snapshot[i]);
+ offset += 2;
+ }
+ }
+
+ /* get the MMU registers*/
+ for (i = 0; i < sizeof(mmu_reg_snapshot)/4; i++) {
+ kctx->reg_dump[offset] = MMU_REG(mmu_reg_snapshot[i]);
+ offset += 2;
+ }
+
+ /* get the Address space registers*/
+ for (j = 0; j < as_number; j++) {
+ for (i = 0; i < sizeof(as_reg_snapshot)/4; i++) {
+ kctx->reg_dump[offset] =
+ MMU_AS_REG(j, as_reg_snapshot[i]);
+ offset += 2;
+ }
+ }
+
+ WARN_ON(offset >= (reg_range*2/4));
+
+ /* set the termination flag*/
+ kctx->reg_dump[offset] = REGISTER_DUMP_TERMINATION_FLAG;
+ kctx->reg_dump[offset + 1] = REGISTER_DUMP_TERMINATION_FLAG;
+
+ dev_dbg(kctx->kbdev->dev, "kbase_job_fault_reg_snapshot_init:%d\n",
+ offset);
+
+ return true;
+}
+
+bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx)
+{
+ int offset = 0;
+
+ if (kctx->reg_dump == NULL)
+ return false;
+
+ while (kctx->reg_dump[offset] != REGISTER_DUMP_TERMINATION_FLAG) {
+ kctx->reg_dump[offset+1] =
+ kbase_reg_read(kctx->kbdev,
+ kctx->reg_dump[offset], NULL);
+ offset += 2;
+ }
+ return true;
+}
+
+
+#endif
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_devfreq.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_devfreq.c
new file mode 100644
index 000000000000..e280322e223d
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_devfreq.c
@@ -0,0 +1,413 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_config_defaults.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <linux/devfreq_cooling.h>
+#endif
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+#include <linux/pm_opp.h>
+#else /* Linux >= 3.13 */
+/* In 3.13 the OPP include header file, types, and functions were all
+ * renamed. Use the old filename for the include, and define the new names to
+ * the old, when an old kernel is detected.
+ */
+#include <linux/opp.h>
+#define dev_pm_opp opp
+#define dev_pm_opp_get_voltage opp_get_voltage
+#define dev_pm_opp_get_opp_count opp_get_opp_count
+#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil
+#define dev_pm_opp_find_freq_floor opp_find_freq_floor
+#endif /* Linux >= 3.13 */
+
+/**
+ * opp_translate - Translate nominal OPP frequency from devicetree into real
+ * frequency and core mask
+ * @kbdev: Device pointer
+ * @freq: Nominal frequency
+ * @core_mask: Pointer to u64 to store core mask to
+ *
+ * Return: Real target frequency
+ *
+ * This function will only perform translation if an operating-points-v2-mali
+ * table is present in devicetree. If one is not present then it will return an
+ * untranslated frequency and all cores enabled.
+ */
+static unsigned long opp_translate(struct kbase_device *kbdev,
+ unsigned long freq, u64 *core_mask)
+{
+ int i;
+
+ for (i = 0; i < kbdev->num_opps; i++) {
+ if (kbdev->opp_table[i].opp_freq == freq) {
+ *core_mask = kbdev->opp_table[i].core_mask;
+ return kbdev->opp_table[i].real_freq;
+ }
+ }
+
+ /* Failed to find OPP - return all cores enabled & nominal frequency */
+ *core_mask = kbdev->gpu_props.props.raw_props.shader_present;
+
+ return freq;
+}
+
+static int
+kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
+{
+ struct kbase_device *kbdev = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
+ unsigned long nominal_freq;
+ unsigned long freq = 0;
+ unsigned long voltage;
+ int err;
+ u64 core_mask;
+
+ freq = *target_freq;
+
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(dev, &freq, flags);
+ voltage = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+ if (IS_ERR_OR_NULL(opp)) {
+ dev_err(dev, "Failed to get opp (%ld)\n", PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+
+ nominal_freq = freq;
+
+ /*
+ * Only update if there is a change of frequency
+ */
+ if (kbdev->current_nominal_freq == nominal_freq) {
+ *target_freq = nominal_freq;
+ return 0;
+ }
+
+ freq = opp_translate(kbdev, nominal_freq, &core_mask);
+#ifdef CONFIG_REGULATOR
+ if (kbdev->regulator && kbdev->current_voltage != voltage
+ && kbdev->current_freq < freq) {
+ err = regulator_set_voltage(kbdev->regulator, voltage, voltage);
+ if (err) {
+ dev_err(dev, "Failed to increase voltage (%d)\n", err);
+ return err;
+ }
+ }
+#endif
+
+ err = clk_set_rate(kbdev->clock, freq);
+ if (err) {
+ dev_err(dev, "Failed to set clock %lu (target %lu)\n",
+ freq, *target_freq);
+ return err;
+ }
+
+#ifdef CONFIG_REGULATOR
+ if (kbdev->regulator && kbdev->current_voltage != voltage
+ && kbdev->current_freq > freq) {
+ err = regulator_set_voltage(kbdev->regulator, voltage, voltage);
+ if (err) {
+ dev_err(dev, "Failed to decrease voltage (%d)\n", err);
+ return err;
+ }
+ }
+#endif
+
+ if (kbdev->pm.backend.ca_current_policy->id ==
+ KBASE_PM_CA_POLICY_ID_DEVFREQ)
+ kbase_devfreq_set_core_mask(kbdev, core_mask);
+
+ *target_freq = nominal_freq;
+ kbdev->current_voltage = voltage;
+ kbdev->current_nominal_freq = nominal_freq;
+ kbdev->current_freq = freq;
+ kbdev->current_core_mask = core_mask;
+
+ KBASE_TLSTREAM_AUX_DEVFREQ_TARGET((u64)nominal_freq);
+
+ kbase_pm_reset_dvfs_utilisation(kbdev);
+
+ return err;
+}
+
+static int
+kbase_devfreq_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct kbase_device *kbdev = dev_get_drvdata(dev);
+
+ *freq = kbdev->current_nominal_freq;
+
+ return 0;
+}
+
+static int
+kbase_devfreq_status(struct device *dev, struct devfreq_dev_status *stat)
+{
+ struct kbase_device *kbdev = dev_get_drvdata(dev);
+
+ stat->current_frequency = kbdev->current_nominal_freq;
+
+ kbase_pm_get_dvfs_utilisation(kbdev,
+ &stat->total_time, &stat->busy_time);
+
+ stat->private_data = NULL;
+
+ return 0;
+}
+
+static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev,
+ struct devfreq_dev_profile *dp)
+{
+ int count;
+ int i = 0;
+ unsigned long freq;
+ struct dev_pm_opp *opp;
+
+ rcu_read_lock();
+ count = dev_pm_opp_get_opp_count(kbdev->dev);
+ if (count < 0) {
+ rcu_read_unlock();
+ return count;
+ }
+ rcu_read_unlock();
+
+ dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]),
+ GFP_KERNEL);
+ if (!dp->freq_table)
+ return -ENOMEM;
+
+ rcu_read_lock();
+ for (i = 0, freq = ULONG_MAX; i < count; i++, freq--) {
+ opp = dev_pm_opp_find_freq_floor(kbdev->dev, &freq);
+ if (IS_ERR(opp))
+ break;
+
+ dp->freq_table[i] = freq;
+ }
+ rcu_read_unlock();
+
+ if (count != i)
+ dev_warn(kbdev->dev, "Unable to enumerate all OPPs (%d!=%d\n",
+ count, i);
+
+ dp->max_state = i;
+
+ return 0;
+}
+
+static void kbase_devfreq_term_freq_table(struct kbase_device *kbdev)
+{
+ struct devfreq_dev_profile *dp = kbdev->devfreq->profile;
+
+ kfree(dp->freq_table);
+}
+
+static void kbase_devfreq_exit(struct device *dev)
+{
+ struct kbase_device *kbdev = dev_get_drvdata(dev);
+
+ kbase_devfreq_term_freq_table(kbdev);
+}
+
+static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
+{
+ struct device_node *opp_node = of_parse_phandle(kbdev->dev->of_node,
+ "operating-points-v2", 0);
+ struct device_node *node;
+ int i = 0;
+ int count;
+
+ if (!opp_node)
+ return 0;
+ if (!of_device_is_compatible(opp_node, "operating-points-v2-mali"))
+ return 0;
+
+ count = dev_pm_opp_get_opp_count(kbdev->dev);
+ kbdev->opp_table = kmalloc_array(count,
+ sizeof(struct kbase_devfreq_opp), GFP_KERNEL);
+ if (!kbdev->opp_table)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(opp_node, node) {
+ u64 core_mask;
+ u64 opp_freq, real_freq;
+ const void *core_count_p;
+
+ if (of_property_read_u64(node, "opp-hz", &opp_freq)) {
+ dev_warn(kbdev->dev, "OPP is missing required opp-hz property\n");
+ continue;
+ }
+ if (of_property_read_u64(node, "opp-hz-real", &real_freq))
+ real_freq = opp_freq;
+ if (of_property_read_u64(node, "opp-core-mask", &core_mask))
+ core_mask =
+ kbdev->gpu_props.props.raw_props.shader_present;
+ core_count_p = of_get_property(node, "opp-core-count", NULL);
+ if (core_count_p) {
+ u64 remaining_core_mask =
+ kbdev->gpu_props.props.raw_props.shader_present;
+ int core_count = be32_to_cpup(core_count_p);
+
+ core_mask = 0;
+
+ for (; core_count > 0; core_count--) {
+ int core = ffs(remaining_core_mask);
+
+ if (!core) {
+ dev_err(kbdev->dev, "OPP has more cores than GPU\n");
+ return -ENODEV;
+ }
+
+ core_mask |= (1ull << (core-1));
+ remaining_core_mask &= ~(1ull << (core-1));
+ }
+ }
+
+ if (!core_mask) {
+ dev_err(kbdev->dev, "OPP has invalid core mask of 0\n");
+ return -ENODEV;
+ }
+
+ kbdev->opp_table[i].opp_freq = opp_freq;
+ kbdev->opp_table[i].real_freq = real_freq;
+ kbdev->opp_table[i].core_mask = core_mask;
+
+ dev_info(kbdev->dev, "OPP %d : opp_freq=%llu real_freq=%llu core_mask=%llx\n",
+ i, opp_freq, real_freq, core_mask);
+
+ i++;
+ }
+
+ kbdev->num_opps = i;
+
+ return 0;
+}
+
+int kbase_devfreq_init(struct kbase_device *kbdev)
+{
+ struct devfreq_dev_profile *dp;
+ int err;
+
+ if (!kbdev->clock) {
+ dev_err(kbdev->dev, "Clock not available for devfreq\n");
+ return -ENODEV;
+ }
+
+ kbdev->current_freq = clk_get_rate(kbdev->clock);
+ kbdev->current_nominal_freq = kbdev->current_freq;
+
+ dp = &kbdev->devfreq_profile;
+
+ dp->initial_freq = kbdev->current_freq;
+ dp->polling_ms = 100;
+ dp->target = kbase_devfreq_target;
+ dp->get_dev_status = kbase_devfreq_status;
+ dp->get_cur_freq = kbase_devfreq_cur_freq;
+ dp->exit = kbase_devfreq_exit;
+
+ if (kbase_devfreq_init_freq_table(kbdev, dp))
+ return -EFAULT;
+
+ err = kbase_devfreq_init_core_mask_table(kbdev);
+ if (err)
+ return err;
+
+ kbdev->devfreq = devfreq_add_device(kbdev->dev, dp,
+ "simple_ondemand", NULL);
+ if (IS_ERR(kbdev->devfreq)) {
+ kbase_devfreq_term_freq_table(kbdev);
+ return PTR_ERR(kbdev->devfreq);
+ }
+
+ /* devfreq_add_device only copies a few of kbdev->dev's fields, so
+ * set drvdata explicitly so IPA models can access kbdev. */
+ dev_set_drvdata(&kbdev->devfreq->dev, kbdev);
+
+ err = devfreq_register_opp_notifier(kbdev->dev, kbdev->devfreq);
+ if (err) {
+ dev_err(kbdev->dev,
+ "Failed to register OPP notifier (%d)\n", err);
+ goto opp_notifier_failed;
+ }
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+ err = kbase_ipa_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "IPA initialization failed\n");
+ goto cooling_failed;
+ }
+
+ kbdev->devfreq_cooling = of_devfreq_cooling_register_power(
+ kbdev->dev->of_node,
+ kbdev->devfreq,
+ &kbase_ipa_power_model_ops);
+ if (IS_ERR_OR_NULL(kbdev->devfreq_cooling)) {
+ err = PTR_ERR(kbdev->devfreq_cooling);
+ dev_err(kbdev->dev,
+ "Failed to register cooling device (%d)\n",
+ err);
+ goto cooling_failed;
+ }
+#endif
+
+ return 0;
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+cooling_failed:
+ devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
+#endif /* CONFIG_DEVFREQ_THERMAL */
+opp_notifier_failed:
+ if (devfreq_remove_device(kbdev->devfreq))
+ dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
+ else
+ kbdev->devfreq = NULL;
+
+ return err;
+}
+
+void kbase_devfreq_term(struct kbase_device *kbdev)
+{
+ int err;
+
+ dev_dbg(kbdev->dev, "Term Mali devfreq\n");
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+ if (kbdev->devfreq_cooling)
+ devfreq_cooling_unregister(kbdev->devfreq_cooling);
+
+ kbase_ipa_term(kbdev);
+#endif
+
+ devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
+
+ err = devfreq_remove_device(kbdev->devfreq);
+ if (err)
+ dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
+ else
+ kbdev->devfreq = NULL;
+
+ kfree(kbdev->opp_table);
+}
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_devfreq.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_devfreq.h
new file mode 100644
index 000000000000..c0bf8b15b3bc
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_devfreq.h
@@ -0,0 +1,24 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _BASE_DEVFREQ_H_
+#define _BASE_DEVFREQ_H_
+
+int kbase_devfreq_init(struct kbase_device *kbdev);
+void kbase_devfreq_term(struct kbase_device *kbdev);
+
+#endif /* _BASE_DEVFREQ_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_device_hw.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_device_hw.c
new file mode 100644
index 000000000000..dcdf15cdc3e8
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_device_hw.c
@@ -0,0 +1,255 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ *
+ */
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_instr_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+#include <backend/gpu/mali_kbase_device_internal.h>
+
+#if !defined(CONFIG_MALI_NO_MALI)
+
+
+#ifdef CONFIG_DEBUG_FS
+
+
+int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size)
+{
+ struct kbase_io_access *old_buf;
+ struct kbase_io_access *new_buf;
+ unsigned long flags;
+
+ if (!new_size)
+ goto out_err; /* The new size must not be 0 */
+
+ new_buf = vmalloc(new_size * sizeof(*h->buf));
+ if (!new_buf)
+ goto out_err;
+
+ spin_lock_irqsave(&h->lock, flags);
+
+ old_buf = h->buf;
+
+ /* Note: we won't bother with copying the old data over. The dumping
+ * logic wouldn't work properly as it relies on 'count' both as a
+ * counter and as an index to the buffer which would have changed with
+ * the new array. This is a corner case that we don't need to support.
+ */
+ h->count = 0;
+ h->size = new_size;
+ h->buf = new_buf;
+
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ vfree(old_buf);
+
+ return 0;
+
+out_err:
+ return -1;
+}
+
+
+int kbase_io_history_init(struct kbase_io_history *h, u16 n)
+{
+ h->enabled = false;
+ spin_lock_init(&h->lock);
+ h->count = 0;
+ h->size = 0;
+ h->buf = NULL;
+ if (kbase_io_history_resize(h, n))
+ return -1;
+
+ return 0;
+}
+
+
+void kbase_io_history_term(struct kbase_io_history *h)
+{
+ vfree(h->buf);
+ h->buf = NULL;
+}
+
+
+/* kbase_io_history_add - add new entry to the register access history
+ *
+ * @h: Pointer to the history data structure
+ * @addr: Register address
+ * @value: The value that is either read from or written to the register
+ * @write: 1 if it's a register write, 0 if it's a read
+ */
+static void kbase_io_history_add(struct kbase_io_history *h,
+ void __iomem const *addr, u32 value, u8 write)
+{
+ struct kbase_io_access *io;
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->lock, flags);
+
+ io = &h->buf[h->count % h->size];
+ io->addr = (uintptr_t)addr | write;
+ io->value = value;
+ ++h->count;
+ /* If count overflows, move the index by the buffer size so the entire
+ * buffer will still be dumped later */
+ if (unlikely(!h->count))
+ h->count = h->size;
+
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+
+void kbase_io_history_dump(struct kbase_device *kbdev)
+{
+ struct kbase_io_history *const h = &kbdev->io_history;
+ u16 i;
+ size_t iters;
+ unsigned long flags;
+
+ if (!unlikely(h->enabled))
+ return;
+
+ spin_lock_irqsave(&h->lock, flags);
+
+ dev_err(kbdev->dev, "Register IO History:");
+ iters = (h->size > h->count) ? h->count : h->size;
+ dev_err(kbdev->dev, "Last %zu register accesses of %zu total:\n", iters,
+ h->count);
+ for (i = 0; i < iters; ++i) {
+ struct kbase_io_access *io =
+ &h->buf[(h->count - iters + i) % h->size];
+ char const access = (io->addr & 1) ? 'w' : 'r';
+
+ dev_err(kbdev->dev, "%6i: %c: reg 0x%p val %08x\n", i, access,
+ (void *)(io->addr & ~0x1), io->value);
+ }
+
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+
+#endif /* CONFIG_DEBUG_FS */
+
+
+void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value,
+ struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+ KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
+
+ writel(value, kbdev->reg + offset);
+
+#ifdef CONFIG_DEBUG_FS
+ if (unlikely(kbdev->io_history.enabled))
+ kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
+ value, 1);
+#endif /* CONFIG_DEBUG_FS */
+ dev_dbg(kbdev->dev, "w: reg %04x val %08x", offset, value);
+
+ if (kctx && kctx->jctx.tb)
+ kbase_device_trace_register_access(kctx, REG_WRITE, offset,
+ value);
+}
+
+KBASE_EXPORT_TEST_API(kbase_reg_write);
+
+u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset,
+ struct kbase_context *kctx)
+{
+ u32 val;
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+ KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
+
+ val = readl(kbdev->reg + offset);
+
+#ifdef CONFIG_DEBUG_FS
+ if (unlikely(kbdev->io_history.enabled))
+ kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
+ val, 0);
+#endif /* CONFIG_DEBUG_FS */
+ dev_dbg(kbdev->dev, "r: reg %04x val %08x", offset, val);
+
+ if (kctx && kctx->jctx.tb)
+ kbase_device_trace_register_access(kctx, REG_READ, offset, val);
+ return val;
+}
+
+KBASE_EXPORT_TEST_API(kbase_reg_read);
+#endif /* !defined(CONFIG_MALI_NO_MALI) */
+
+/**
+ * kbase_report_gpu_fault - Report a GPU fault.
+ * @kbdev: Kbase device pointer
+ * @multiple: Zero if only GPU_FAULT was raised, non-zero if MULTIPLE_GPU_FAULTS
+ * was also set
+ *
+ * This function is called from the interrupt handler when a GPU fault occurs.
+ * It reports the details of the fault using dev_warn().
+ */
+static void kbase_report_gpu_fault(struct kbase_device *kbdev, int multiple)
+{
+ u32 status;
+ u64 address;
+
+ status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL);
+ address = (u64) kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_FAULTADDRESS_HI), NULL) << 32;
+ address |= kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_FAULTADDRESS_LO), NULL);
+
+ dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx",
+ status & 0xFF,
+ kbase_exception_name(kbdev, status),
+ address);
+ if (multiple)
+ dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
+}
+
+void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
+{
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, NULL, 0u, val);
+ if (val & GPU_FAULT)
+ kbase_report_gpu_fault(kbdev, val & MULTIPLE_GPU_FAULTS);
+
+ if (val & RESET_COMPLETED)
+ kbase_pm_reset_done(kbdev);
+
+ if (val & PRFCNT_SAMPLE_COMPLETED)
+ kbase_instr_hwcnt_sample_done(kbdev);
+
+ if (val & CLEAN_CACHES_COMPLETED)
+ kbase_clean_caches_done(kbdev);
+
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, val);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val, NULL);
+
+ /* kbase_pm_check_transitions must be called after the IRQ has been
+ * cleared. This is because it might trigger further power transitions
+ * and we don't want to miss the interrupt raised to notify us that
+ * these further transitions have finished.
+ */
+ if (val & POWER_CHANGED_ALL)
+ kbase_pm_power_changed(kbdev);
+
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, NULL, 0u, val);
+}
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_device_internal.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_device_internal.h
new file mode 100644
index 000000000000..5b20445932fb
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_device_internal.h
@@ -0,0 +1,67 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Backend-specific HW access device APIs
+ */
+
+#ifndef _KBASE_DEVICE_INTERNAL_H_
+#define _KBASE_DEVICE_INTERNAL_H_
+
+/**
+ * kbase_reg_write - write to GPU register
+ * @kbdev: Kbase device pointer
+ * @offset: Offset of register
+ * @value: Value to write
+ * @kctx: Kbase context pointer. May be NULL
+ *
+ * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). If
+ * @kctx is not NULL then the caller must ensure it is scheduled (@kctx->as_nr
+ * != KBASEP_AS_NR_INVALID).
+ */
+void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value,
+ struct kbase_context *kctx);
+
+/**
+ * kbase_reg_read - read from GPU register
+ * @kbdev: Kbase device pointer
+ * @offset: Offset of register
+ * @kctx: Kbase context pointer. May be NULL
+ *
+ * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). If
+ * @kctx is not NULL then the caller must ensure it is scheduled (@kctx->as_nr
+ * != KBASEP_AS_NR_INVALID).
+ *
+ * Return: Value in desired register
+ */
+u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset,
+ struct kbase_context *kctx);
+
+
+/**
+ * kbase_gpu_interrupt - GPU interrupt handler
+ * @kbdev: Kbase device pointer
+ * @val: The value of the GPU IRQ status register which triggered the call
+ *
+ * This function is called from the interrupt handler when a GPU irq is to be
+ * handled.
+ */
+void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val);
+
+#endif /* _KBASE_DEVICE_INTERNAL_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_gpu.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_gpu.c
new file mode 100644
index 000000000000..d578fd78e825
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_gpu.c
@@ -0,0 +1,123 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register-based HW access backend APIs
+ */
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_backend.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+int kbase_backend_early_init(struct kbase_device *kbdev)
+{
+ int err;
+
+ err = kbasep_platform_device_init(kbdev);
+ if (err)
+ return err;
+
+ /* Ensure we can access the GPU registers */
+ kbase_pm_register_access_enable(kbdev);
+
+ /* Find out GPU properties based on the GPU feature registers */
+ kbase_gpuprops_set(kbdev);
+
+ /* We're done accessing the GPU registers for now. */
+ kbase_pm_register_access_disable(kbdev);
+
+ err = kbase_hwaccess_pm_init(kbdev);
+ if (err)
+ goto fail_pm;
+
+ err = kbase_install_interrupts(kbdev);
+ if (err)
+ goto fail_interrupts;
+
+ return 0;
+
+fail_interrupts:
+ kbase_hwaccess_pm_term(kbdev);
+fail_pm:
+ kbasep_platform_device_term(kbdev);
+
+ return err;
+}
+
+void kbase_backend_early_term(struct kbase_device *kbdev)
+{
+ kbase_release_interrupts(kbdev);
+ kbase_hwaccess_pm_term(kbdev);
+ kbasep_platform_device_term(kbdev);
+}
+
+int kbase_backend_late_init(struct kbase_device *kbdev)
+{
+ int err;
+
+ err = kbase_hwaccess_pm_powerup(kbdev, PM_HW_ISSUES_DETECT);
+ if (err)
+ return err;
+
+ err = kbase_backend_timer_init(kbdev);
+ if (err)
+ goto fail_timer;
+
+#ifdef CONFIG_MALI_DEBUG
+#ifndef CONFIG_MALI_NO_MALI
+ if (kbasep_common_test_interrupt_handlers(kbdev) != 0) {
+ dev_err(kbdev->dev, "Interrupt assigment check failed.\n");
+ err = -EINVAL;
+ goto fail_interrupt_test;
+ }
+#endif /* !CONFIG_MALI_NO_MALI */
+#endif /* CONFIG_MALI_DEBUG */
+
+ err = kbase_job_slot_init(kbdev);
+ if (err)
+ goto fail_job_slot;
+
+ init_waitqueue_head(&kbdev->hwaccess.backend.reset_wait);
+
+ return 0;
+
+fail_job_slot:
+
+#ifdef CONFIG_MALI_DEBUG
+#ifndef CONFIG_MALI_NO_MALI
+fail_interrupt_test:
+#endif /* !CONFIG_MALI_NO_MALI */
+#endif /* CONFIG_MALI_DEBUG */
+
+ kbase_backend_timer_term(kbdev);
+fail_timer:
+ kbase_hwaccess_pm_halt(kbdev);
+
+ return err;
+}
+
+void kbase_backend_late_term(struct kbase_device *kbdev)
+{
+ kbase_job_slot_halt(kbdev);
+ kbase_job_slot_term(kbdev);
+ kbase_backend_timer_term(kbdev);
+ kbase_hwaccess_pm_halt(kbdev);
+}
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_gpuprops_backend.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_gpuprops_backend.c
new file mode 100644
index 000000000000..b395325b556b
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_gpuprops_backend.c
@@ -0,0 +1,110 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Base kernel property query backend APIs
+ */
+
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <mali_kbase_hwaccess_gpuprops.h>
+
+void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
+ struct kbase_gpuprops_regdump *regdump)
+{
+ int i;
+
+ /* Fill regdump with the content of the relevant registers */
+ regdump->gpu_id = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID), NULL);
+
+ regdump->l2_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(L2_FEATURES), NULL);
+ regdump->suspend_size = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(SUSPEND_SIZE), NULL);
+ regdump->tiler_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TILER_FEATURES), NULL);
+ regdump->mem_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(MEM_FEATURES), NULL);
+ regdump->mmu_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(MMU_FEATURES), NULL);
+ regdump->as_present = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(AS_PRESENT), NULL);
+ regdump->js_present = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(JS_PRESENT), NULL);
+
+ for (i = 0; i < GPU_MAX_JOB_SLOTS; i++)
+ regdump->js_features[i] = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(JS_FEATURES_REG(i)), NULL);
+
+ for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+ regdump->texture_features[i] = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TEXTURE_FEATURES_REG(i)), NULL);
+
+ regdump->thread_max_threads = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(THREAD_MAX_THREADS), NULL);
+ regdump->thread_max_workgroup_size = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(THREAD_MAX_WORKGROUP_SIZE),
+ NULL);
+ regdump->thread_max_barrier_size = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(THREAD_MAX_BARRIER_SIZE), NULL);
+ regdump->thread_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(THREAD_FEATURES), NULL);
+
+ regdump->shader_present_lo = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(SHADER_PRESENT_LO), NULL);
+ regdump->shader_present_hi = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(SHADER_PRESENT_HI), NULL);
+
+ regdump->tiler_present_lo = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TILER_PRESENT_LO), NULL);
+ regdump->tiler_present_hi = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TILER_PRESENT_HI), NULL);
+
+ regdump->l2_present_lo = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(L2_PRESENT_LO), NULL);
+ regdump->l2_present_hi = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(L2_PRESENT_HI), NULL);
+
+ regdump->stack_present_lo = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(STACK_PRESENT_LO), NULL);
+ regdump->stack_present_hi = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(STACK_PRESENT_HI), NULL);
+}
+
+void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
+ struct kbase_gpuprops_regdump *regdump)
+{
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG)) {
+ /* Ensure we can access the GPU registers */
+ kbase_pm_register_access_enable(kbdev);
+
+ regdump->coherency_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(COHERENCY_FEATURES), NULL);
+
+ /* We're done accessing the GPU registers for now. */
+ kbase_pm_register_access_disable(kbdev);
+ } else {
+ /* Pre COHERENCY_FEATURES we only supported ACE_LITE */
+ regdump->coherency_features =
+ COHERENCY_FEATURE_BIT(COHERENCY_NONE) |
+ COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
+ }
+}
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_instr_backend.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_instr_backend.c
new file mode 100644
index 000000000000..7ad309e8d7f4
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_instr_backend.c
@@ -0,0 +1,492 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * GPU backend instrumentation APIs.
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_hwaccess_instr.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <backend/gpu/mali_kbase_instr_internal.h>
+
+/**
+ * kbasep_instr_hwcnt_cacheclean - Issue Cache Clean & Invalidate command to
+ * hardware
+ *
+ * @kbdev: Kbase device
+ */
+static void kbasep_instr_hwcnt_cacheclean(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ unsigned long pm_flags;
+ u32 irq_mask;
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_REQUEST_CLEAN);
+
+ /* Enable interrupt */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+ irq_mask | CLEAN_CACHES_COMPLETED, NULL);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+
+ /* clean&invalidate the caches so we're sure the mmu tables for the dump
+ * buffer is valid */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_CLEAN_INV_CACHES, NULL);
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANING;
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+}
+
+int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ struct kbase_uk_hwcnt_setup *setup)
+{
+ unsigned long flags, pm_flags;
+ int err = -EINVAL;
+ u32 irq_mask;
+ int ret;
+ u64 shader_cores_needed;
+ u32 prfcnt_config;
+
+ shader_cores_needed = kbase_pm_get_present_cores(kbdev,
+ KBASE_PM_CORE_SHADER);
+
+ /* alignment failure */
+ if ((setup->dump_buffer == 0ULL) || (setup->dump_buffer & (2048 - 1)))
+ goto out_err;
+
+ /* Override core availability policy to ensure all cores are available
+ */
+ kbase_pm_ca_instr_enable(kbdev);
+
+ /* Request the cores early on synchronously - we'll release them on any
+ * errors (e.g. instrumentation already active) */
+ kbase_pm_request_cores_sync(kbdev, true, shader_cores_needed);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) {
+ /* Instrumentation is already enabled */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ goto out_unrequest_cores;
+ }
+
+ /* Enable interrupt */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask |
+ PRFCNT_SAMPLE_COMPLETED, NULL);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+
+ /* In use, this context is the owner */
+ kbdev->hwcnt.kctx = kctx;
+ /* Remember the dump address so we can reprogram it later */
+ kbdev->hwcnt.addr = setup->dump_buffer;
+
+ /* Request the clean */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
+ kbdev->hwcnt.backend.triggered = 0;
+ /* Clean&invalidate the caches so we're sure the mmu tables for the dump
+ * buffer is valid */
+ ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq,
+ &kbdev->hwcnt.backend.cache_clean_work);
+ KBASE_DEBUG_ASSERT(ret);
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ /* Wait for cacheclean to complete */
+ wait_event(kbdev->hwcnt.backend.wait,
+ kbdev->hwcnt.backend.triggered != 0);
+
+ KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_IDLE);
+
+ kbase_pm_request_l2_caches(kbdev);
+
+ /* Configure */
+ prfcnt_config = kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT;
+#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
+ {
+ u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ u32 product_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID)
+ >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+ int arch_v6 = GPU_ID_IS_NEW_FORMAT(product_id);
+
+ if (arch_v6)
+ prfcnt_config |= 1 << PRFCNT_CONFIG_SETSELECT_SHIFT;
+ }
+#endif
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
+ prfcnt_config | PRFCNT_CONFIG_MODE_OFF, kctx);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
+ setup->dump_buffer & 0xFFFFFFFF, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
+ setup->dump_buffer >> 32, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN),
+ setup->jm_bm, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN),
+ setup->shader_bm, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN),
+ setup->mmu_l2_bm, kctx);
+ /* Due to PRLAM-8186 we need to disable the Tiler before we enable the
+ * HW counter dump. */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0,
+ kctx);
+ else
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
+ setup->tiler_bm, kctx);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
+ prfcnt_config | PRFCNT_CONFIG_MODE_MANUAL, kctx);
+
+ /* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump
+ */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
+ setup->tiler_bm, kctx);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+ kbdev->hwcnt.backend.triggered = 1;
+ wake_up(&kbdev->hwcnt.backend.wait);
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ err = 0;
+
+ dev_dbg(kbdev->dev, "HW counters dumping set-up for context %p", kctx);
+ return err;
+ out_unrequest_cores:
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_pm_unrequest_cores(kbdev, true, shader_cores_needed);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ out_err:
+ return err;
+}
+
+int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx)
+{
+ unsigned long flags, pm_flags;
+ int err = -EINVAL;
+ u32 irq_mask;
+ struct kbase_device *kbdev = kctx->kbdev;
+
+ while (1) {
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DISABLED) {
+ /* Instrumentation is not enabled */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ goto out;
+ }
+
+ if (kbdev->hwcnt.kctx != kctx) {
+ /* Instrumentation has been setup for another context */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ goto out;
+ }
+
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE)
+ break;
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ /* Ongoing dump/setup - wait for its completion */
+ wait_event(kbdev->hwcnt.backend.wait,
+ kbdev->hwcnt.backend.triggered != 0);
+ }
+
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
+ kbdev->hwcnt.backend.triggered = 0;
+
+ /* Disable interrupt */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+ irq_mask & ~PRFCNT_SAMPLE_COMPLETED, NULL);
+
+ /* Disable the counters */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0, kctx);
+
+ kbdev->hwcnt.kctx = NULL;
+ kbdev->hwcnt.addr = 0ULL;
+
+ kbase_pm_ca_instr_disable(kbdev);
+
+ kbase_pm_unrequest_cores(kbdev, true,
+ kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER));
+
+ kbase_pm_release_l2_caches(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p",
+ kctx);
+
+ err = 0;
+
+ out:
+ return err;
+}
+
+int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx)
+{
+ unsigned long flags;
+ int err = -EINVAL;
+ struct kbase_device *kbdev = kctx->kbdev;
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.kctx != kctx) {
+ /* The instrumentation has been setup for another context */
+ goto unlock;
+ }
+
+ if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_IDLE) {
+ /* HW counters are disabled or another dump is ongoing, or we're
+ * resetting */
+ goto unlock;
+ }
+
+ kbdev->hwcnt.backend.triggered = 0;
+
+ /* Mark that we're dumping - the PF handler can signal that we faulted
+ */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DUMPING;
+
+ /* Reconfigure the dump address */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
+ kbdev->hwcnt.addr & 0xFFFFFFFF, NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
+ kbdev->hwcnt.addr >> 32, NULL);
+
+ /* Start dumping */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_SAMPLE, NULL, NULL,
+ kbdev->hwcnt.addr, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_PRFCNT_SAMPLE, kctx);
+
+ dev_dbg(kbdev->dev, "HW counters dumping done for context %p", kctx);
+
+ err = 0;
+
+ unlock:
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_request_dump);
+
+bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx,
+ bool * const success)
+{
+ unsigned long flags;
+ bool complete = false;
+ struct kbase_device *kbdev = kctx->kbdev;
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE) {
+ *success = true;
+ complete = true;
+ } else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+ *success = false;
+ complete = true;
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ return complete;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump_complete);
+
+void kbasep_cache_clean_worker(struct work_struct *data)
+{
+ struct kbase_device *kbdev;
+ unsigned long flags;
+
+ kbdev = container_of(data, struct kbase_device,
+ hwcnt.backend.cache_clean_work);
+
+ mutex_lock(&kbdev->cacheclean_lock);
+ kbasep_instr_hwcnt_cacheclean(kbdev);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ /* Wait for our condition, and any reset to complete */
+ while (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_CLEANING) {
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.backend.cache_clean_wait,
+ kbdev->hwcnt.backend.state !=
+ KBASE_INSTR_STATE_CLEANING);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+ KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_CLEANED);
+
+ /* All finished and idle */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+ kbdev->hwcnt.backend.triggered = 1;
+ wake_up(&kbdev->hwcnt.backend.wait);
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ mutex_unlock(&kbdev->cacheclean_lock);
+}
+
+void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+ kbdev->hwcnt.backend.triggered = 1;
+ wake_up(&kbdev->hwcnt.backend.wait);
+ } else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING) {
+ int ret;
+ /* Always clean and invalidate the cache after a successful dump
+ */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
+ ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq,
+ &kbdev->hwcnt.backend.cache_clean_work);
+ KBASE_DEBUG_ASSERT(ret);
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+}
+
+void kbase_clean_caches_done(struct kbase_device *kbdev)
+{
+ u32 irq_mask;
+
+ if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) {
+ unsigned long flags;
+ unsigned long pm_flags;
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ /* Disable interrupt */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+ NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+ irq_mask & ~CLEAN_CACHES_COMPLETED, NULL);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+
+ /* Wakeup... */
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_CLEANING) {
+ /* Only wake if we weren't resetting */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANED;
+ wake_up(&kbdev->hwcnt.backend.cache_clean_wait);
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ }
+}
+
+int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ unsigned long flags;
+ int err;
+
+ /* Wait for dump & cacheclean to complete */
+ wait_event(kbdev->hwcnt.backend.wait,
+ kbdev->hwcnt.backend.triggered != 0);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+ err = -EINVAL;
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+ } else {
+ /* Dump done */
+ KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_IDLE);
+ err = 0;
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ return err;
+}
+
+int kbase_instr_hwcnt_clear(struct kbase_context *kctx)
+{
+ unsigned long flags;
+ int err = -EINVAL;
+ struct kbase_device *kbdev = kctx->kbdev;
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ /* Check it's the context previously set up and we're not already
+ * dumping */
+ if (kbdev->hwcnt.kctx != kctx || kbdev->hwcnt.backend.state !=
+ KBASE_INSTR_STATE_IDLE)
+ goto out;
+
+ /* Clear the counters */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_CLEAR, NULL, NULL, 0u, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_PRFCNT_CLEAR, kctx);
+
+ err = 0;
+
+out:
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_clear);
+
+int kbase_instr_backend_init(struct kbase_device *kbdev)
+{
+ int ret = 0;
+
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
+
+ init_waitqueue_head(&kbdev->hwcnt.backend.wait);
+ init_waitqueue_head(&kbdev->hwcnt.backend.cache_clean_wait);
+ INIT_WORK(&kbdev->hwcnt.backend.cache_clean_work,
+ kbasep_cache_clean_worker);
+ kbdev->hwcnt.backend.triggered = 0;
+
+ kbdev->hwcnt.backend.cache_clean_wq =
+ alloc_workqueue("Mali cache cleaning workqueue", 0, 1);
+ if (NULL == kbdev->hwcnt.backend.cache_clean_wq)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+void kbase_instr_backend_term(struct kbase_device *kbdev)
+{
+ destroy_workqueue(kbdev->hwcnt.backend.cache_clean_wq);
+}
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_instr_defs.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_instr_defs.h
new file mode 100644
index 000000000000..4794672da8f0
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_instr_defs.h
@@ -0,0 +1,58 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Backend-specific instrumentation definitions
+ */
+
+#ifndef _KBASE_INSTR_DEFS_H_
+#define _KBASE_INSTR_DEFS_H_
+
+/*
+ * Instrumentation State Machine States
+ */
+enum kbase_instr_state {
+ /* State where instrumentation is not active */
+ KBASE_INSTR_STATE_DISABLED = 0,
+ /* State machine is active and ready for a command. */
+ KBASE_INSTR_STATE_IDLE,
+ /* Hardware is currently dumping a frame. */
+ KBASE_INSTR_STATE_DUMPING,
+ /* We've requested a clean to occur on a workqueue */
+ KBASE_INSTR_STATE_REQUEST_CLEAN,
+ /* Hardware is currently cleaning and invalidating caches. */
+ KBASE_INSTR_STATE_CLEANING,
+ /* Cache clean completed, and either a) a dump is complete, or
+ * b) instrumentation can now be setup. */
+ KBASE_INSTR_STATE_CLEANED,
+ /* An error has occured during DUMPING (page fault). */
+ KBASE_INSTR_STATE_FAULT
+};
+
+/* Structure used for instrumentation and HW counters dumping */
+struct kbase_instr_backend {
+ wait_queue_head_t wait;
+ int triggered;
+
+ enum kbase_instr_state state;
+ wait_queue_head_t cache_clean_wait;
+ struct workqueue_struct *cache_clean_wq;
+ struct work_struct cache_clean_work;
+};
+
+#endif /* _KBASE_INSTR_DEFS_H_ */
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_instr_internal.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_instr_internal.h
new file mode 100644
index 000000000000..e96aeae786e1
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_instr_internal.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Backend-specific HW access instrumentation APIs
+ */
+
+#ifndef _KBASE_INSTR_INTERNAL_H_
+#define _KBASE_INSTR_INTERNAL_H_
+
+/**
+ * kbasep_cache_clean_worker() - Workqueue for handling cache cleaning
+ * @data: a &struct work_struct
+ */
+void kbasep_cache_clean_worker(struct work_struct *data);
+
+/**
+ * kbase_clean_caches_done() - Cache clean interrupt received
+ * @kbdev: Kbase device
+ */
+void kbase_clean_caches_done(struct kbase_device *kbdev);
+
+/**
+ * kbase_instr_hwcnt_sample_done() - Dump complete interrupt received
+ * @kbdev: Kbase device
+ */
+void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev);
+
+#endif /* _KBASE_INSTR_INTERNAL_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_irq_internal.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_irq_internal.h
new file mode 100644
index 000000000000..8781561e73d0
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_irq_internal.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Backend specific IRQ APIs
+ */
+
+#ifndef _KBASE_IRQ_INTERNAL_H_
+#define _KBASE_IRQ_INTERNAL_H_
+
+int kbase_install_interrupts(struct kbase_device *kbdev);
+
+void kbase_release_interrupts(struct kbase_device *kbdev);
+
+/**
+ * kbase_synchronize_irqs - Ensure that all IRQ handlers have completed
+ * execution
+ * @kbdev: The kbase device
+ */
+void kbase_synchronize_irqs(struct kbase_device *kbdev);
+
+int kbasep_common_test_interrupt_handlers(
+ struct kbase_device * const kbdev);
+
+#endif /* _KBASE_IRQ_INTERNAL_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_irq_linux.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_irq_linux.c
new file mode 100644
index 000000000000..8416b80e8b77
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_irq_linux.c
@@ -0,0 +1,469 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+
+#include <linux/interrupt.h>
+
+#if !defined(CONFIG_MALI_NO_MALI)
+
+/* GPU IRQ Tags */
+#define JOB_IRQ_TAG 0
+#define MMU_IRQ_TAG 1
+#define GPU_IRQ_TAG 2
+
+static void *kbase_tag(void *ptr, u32 tag)
+{
+ return (void *)(((uintptr_t) ptr) | tag);
+}
+
+static void *kbase_untag(void *ptr)
+{
+ return (void *)(((uintptr_t) ptr) & ~3);
+}
+
+static irqreturn_t kbase_job_irq_handler(int irq, void *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev = kbase_untag(data);
+ u32 val;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!kbdev->pm.backend.gpu_powered) {
+ /* GPU is turned off - IRQ is not for us */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ flags);
+ return IRQ_NONE;
+ }
+
+ val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
+
+#ifdef CONFIG_MALI_DEBUG
+ if (!kbdev->pm.backend.driver_ready_for_irqs)
+ dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+ __func__, irq, val);
+#endif /* CONFIG_MALI_DEBUG */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!val)
+ return IRQ_NONE;
+
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+ kbase_job_done(kbdev, val);
+
+ return IRQ_HANDLED;
+}
+
+KBASE_EXPORT_TEST_API(kbase_job_irq_handler);
+
+static irqreturn_t kbase_mmu_irq_handler(int irq, void *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev = kbase_untag(data);
+ u32 val;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!kbdev->pm.backend.gpu_powered) {
+ /* GPU is turned off - IRQ is not for us */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ flags);
+ return IRQ_NONE;
+ }
+
+ atomic_inc(&kbdev->faults_pending);
+
+ val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
+
+#ifdef CONFIG_MALI_DEBUG
+ if (!kbdev->pm.backend.driver_ready_for_irqs)
+ dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+ __func__, irq, val);
+#endif /* CONFIG_MALI_DEBUG */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!val) {
+ atomic_dec(&kbdev->faults_pending);
+ return IRQ_NONE;
+ }
+
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+ kbase_mmu_interrupt(kbdev, val);
+
+ atomic_dec(&kbdev->faults_pending);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev = kbase_untag(data);
+ u32 val;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!kbdev->pm.backend.gpu_powered) {
+ /* GPU is turned off - IRQ is not for us */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ flags);
+ return IRQ_NONE;
+ }
+
+ val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS), NULL);
+
+#ifdef CONFIG_MALI_DEBUG
+ if (!kbdev->pm.backend.driver_ready_for_irqs)
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+ __func__, irq, val);
+#endif /* CONFIG_MALI_DEBUG */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!val)
+ return IRQ_NONE;
+
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+ kbase_gpu_interrupt(kbdev, val);
+
+ return IRQ_HANDLED;
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_irq_handler);
+
+static irq_handler_t kbase_handler_table[] = {
+ [JOB_IRQ_TAG] = kbase_job_irq_handler,
+ [MMU_IRQ_TAG] = kbase_mmu_irq_handler,
+ [GPU_IRQ_TAG] = kbase_gpu_irq_handler,
+};
+
+#ifdef CONFIG_MALI_DEBUG
+#define JOB_IRQ_HANDLER JOB_IRQ_TAG
+#define MMU_IRQ_HANDLER MMU_IRQ_TAG
+#define GPU_IRQ_HANDLER GPU_IRQ_TAG
+
+/**
+ * kbase_set_custom_irq_handler - Set a custom IRQ handler
+ * @kbdev: Device for which the handler is to be registered
+ * @custom_handler: Handler to be registered
+ * @irq_type: Interrupt type
+ *
+ * Registers given interrupt handler for requested interrupt type
+ * In the case where irq handler is not specified, the default handler shall be
+ * registered
+ *
+ * Return: 0 case success, error code otherwise
+ */
+int kbase_set_custom_irq_handler(struct kbase_device *kbdev,
+ irq_handler_t custom_handler,
+ int irq_type)
+{
+ int result = 0;
+ irq_handler_t requested_irq_handler = NULL;
+
+ KBASE_DEBUG_ASSERT((JOB_IRQ_HANDLER <= irq_type) &&
+ (GPU_IRQ_HANDLER >= irq_type));
+
+ /* Release previous handler */
+ if (kbdev->irqs[irq_type].irq)
+ free_irq(kbdev->irqs[irq_type].irq, kbase_tag(kbdev, irq_type));
+
+ requested_irq_handler = (NULL != custom_handler) ? custom_handler :
+ kbase_handler_table[irq_type];
+
+ if (0 != request_irq(kbdev->irqs[irq_type].irq,
+ requested_irq_handler,
+ kbdev->irqs[irq_type].flags | IRQF_SHARED,
+ dev_name(kbdev->dev), kbase_tag(kbdev, irq_type))) {
+ result = -EINVAL;
+ dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
+ kbdev->irqs[irq_type].irq, irq_type);
+#ifdef CONFIG_SPARSE_IRQ
+ dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
+#endif /* CONFIG_SPARSE_IRQ */
+ }
+
+ return result;
+}
+
+KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler);
+
+/* test correct interrupt assigment and reception by cpu */
+struct kbasep_irq_test {
+ struct hrtimer timer;
+ wait_queue_head_t wait;
+ int triggered;
+ u32 timeout;
+};
+
+static struct kbasep_irq_test kbasep_irq_test_data;
+
+#define IRQ_TEST_TIMEOUT 500
+
+static irqreturn_t kbase_job_irq_test_handler(int irq, void *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev = kbase_untag(data);
+ u32 val;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!kbdev->pm.backend.gpu_powered) {
+ /* GPU is turned off - IRQ is not for us */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ flags);
+ return IRQ_NONE;
+ }
+
+ val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
+
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!val)
+ return IRQ_NONE;
+
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+ kbasep_irq_test_data.triggered = 1;
+ wake_up(&kbasep_irq_test_data.wait);
+
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val, NULL);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev = kbase_untag(data);
+ u32 val;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!kbdev->pm.backend.gpu_powered) {
+ /* GPU is turned off - IRQ is not for us */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ flags);
+ return IRQ_NONE;
+ }
+
+ val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
+
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!val)
+ return IRQ_NONE;
+
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+ kbasep_irq_test_data.triggered = 1;
+ wake_up(&kbasep_irq_test_data.wait);
+
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), val, NULL);
+
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart kbasep_test_interrupt_timeout(struct hrtimer *timer)
+{
+ struct kbasep_irq_test *test_data = container_of(timer,
+ struct kbasep_irq_test, timer);
+
+ test_data->timeout = 1;
+ test_data->triggered = 1;
+ wake_up(&test_data->wait);
+ return HRTIMER_NORESTART;
+}
+
+static int kbasep_common_test_interrupt(
+ struct kbase_device * const kbdev, u32 tag)
+{
+ int err = 0;
+ irq_handler_t test_handler;
+
+ u32 old_mask_val;
+ u16 mask_offset;
+ u16 rawstat_offset;
+
+ switch (tag) {
+ case JOB_IRQ_TAG:
+ test_handler = kbase_job_irq_test_handler;
+ rawstat_offset = JOB_CONTROL_REG(JOB_IRQ_RAWSTAT);
+ mask_offset = JOB_CONTROL_REG(JOB_IRQ_MASK);
+ break;
+ case MMU_IRQ_TAG:
+ test_handler = kbase_mmu_irq_test_handler;
+ rawstat_offset = MMU_REG(MMU_IRQ_RAWSTAT);
+ mask_offset = MMU_REG(MMU_IRQ_MASK);
+ break;
+ case GPU_IRQ_TAG:
+ /* already tested by pm_driver - bail out */
+ default:
+ return 0;
+ }
+
+ /* store old mask */
+ old_mask_val = kbase_reg_read(kbdev, mask_offset, NULL);
+ /* mask interrupts */
+ kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
+
+ if (kbdev->irqs[tag].irq) {
+ /* release original handler and install test handler */
+ if (kbase_set_custom_irq_handler(kbdev, test_handler, tag) != 0) {
+ err = -EINVAL;
+ } else {
+ kbasep_irq_test_data.timeout = 0;
+ hrtimer_init(&kbasep_irq_test_data.timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ kbasep_irq_test_data.timer.function =
+ kbasep_test_interrupt_timeout;
+
+ /* trigger interrupt */
+ kbase_reg_write(kbdev, mask_offset, 0x1, NULL);
+ kbase_reg_write(kbdev, rawstat_offset, 0x1, NULL);
+
+ hrtimer_start(&kbasep_irq_test_data.timer,
+ HR_TIMER_DELAY_MSEC(IRQ_TEST_TIMEOUT),
+ HRTIMER_MODE_REL);
+
+ wait_event(kbasep_irq_test_data.wait,
+ kbasep_irq_test_data.triggered != 0);
+
+ if (kbasep_irq_test_data.timeout != 0) {
+ dev_err(kbdev->dev, "Interrupt %d (index %d) didn't reach CPU.\n",
+ kbdev->irqs[tag].irq, tag);
+ err = -EINVAL;
+ } else {
+ dev_dbg(kbdev->dev, "Interrupt %d (index %d) reached CPU.\n",
+ kbdev->irqs[tag].irq, tag);
+ }
+
+ hrtimer_cancel(&kbasep_irq_test_data.timer);
+ kbasep_irq_test_data.triggered = 0;
+
+ /* mask interrupts */
+ kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
+
+ /* release test handler */
+ free_irq(kbdev->irqs[tag].irq, kbase_tag(kbdev, tag));
+ }
+
+ /* restore original interrupt */
+ if (request_irq(kbdev->irqs[tag].irq, kbase_handler_table[tag],
+ kbdev->irqs[tag].flags | IRQF_SHARED,
+ dev_name(kbdev->dev), kbase_tag(kbdev, tag))) {
+ dev_err(kbdev->dev, "Can't restore original interrupt %d (index %d)\n",
+ kbdev->irqs[tag].irq, tag);
+ err = -EINVAL;
+ }
+ }
+ /* restore old mask */
+ kbase_reg_write(kbdev, mask_offset, old_mask_val, NULL);
+
+ return err;
+}
+
+int kbasep_common_test_interrupt_handlers(
+ struct kbase_device * const kbdev)
+{
+ int err;
+
+ init_waitqueue_head(&kbasep_irq_test_data.wait);
+ kbasep_irq_test_data.triggered = 0;
+
+ /* A suspend won't happen during startup/insmod */
+ kbase_pm_context_active(kbdev);
+
+ err = kbasep_common_test_interrupt(kbdev, JOB_IRQ_TAG);
+ if (err) {
+ dev_err(kbdev->dev, "Interrupt JOB_IRQ didn't reach CPU. Check interrupt assignments.\n");
+ goto out;
+ }
+
+ err = kbasep_common_test_interrupt(kbdev, MMU_IRQ_TAG);
+ if (err) {
+ dev_err(kbdev->dev, "Interrupt MMU_IRQ didn't reach CPU. Check interrupt assignments.\n");
+ goto out;
+ }
+
+ dev_dbg(kbdev->dev, "Interrupts are correctly assigned.\n");
+
+ out:
+ kbase_pm_context_idle(kbdev);
+
+ return err;
+}
+#endif /* CONFIG_MALI_DEBUG */
+
+int kbase_install_interrupts(struct kbase_device *kbdev)
+{
+ u32 nr = ARRAY_SIZE(kbase_handler_table);
+ int err;
+ u32 i;
+
+ for (i = 0; i < nr; i++) {
+ err = request_irq(kbdev->irqs[i].irq, kbase_handler_table[i],
+ kbdev->irqs[i].flags | IRQF_SHARED,
+ dev_name(kbdev->dev),
+ kbase_tag(kbdev, i));
+ if (err) {
+ dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
+ kbdev->irqs[i].irq, i);
+#ifdef CONFIG_SPARSE_IRQ
+ dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
+#endif /* CONFIG_SPARSE_IRQ */
+ goto release;
+ }
+ }
+
+ return 0;
+
+ release:
+ while (i-- > 0)
+ free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
+
+ return err;
+}
+
+void kbase_release_interrupts(struct kbase_device *kbdev)
+{
+ u32 nr = ARRAY_SIZE(kbase_handler_table);
+ u32 i;
+
+ for (i = 0; i < nr; i++) {
+ if (kbdev->irqs[i].irq)
+ free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
+ }
+}
+
+void kbase_synchronize_irqs(struct kbase_device *kbdev)
+{
+ u32 nr = ARRAY_SIZE(kbase_handler_table);
+ u32 i;
+
+ for (i = 0; i < nr; i++) {
+ if (kbdev->irqs[i].irq)
+ synchronize_irq(kbdev->irqs[i].irq);
+ }
+}
+
+#endif /* !defined(CONFIG_MALI_NO_MALI) */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_as.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_as.c
new file mode 100644
index 000000000000..c660c80341f4
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_as.c
@@ -0,0 +1,235 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register backend context / address space management
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_ctx_sched.h>
+
+/**
+ * assign_and_activate_kctx_addr_space - Assign an AS to a context
+ * @kbdev: Kbase device
+ * @kctx: Kbase context
+ * @current_as: Address Space to assign
+ *
+ * Assign an Address Space (AS) to a context, and add the context to the Policy.
+ *
+ * This includes
+ * setting up the global runpool_irq structure and the context on the AS,
+ * Activating the MMU on the AS,
+ * Allowing jobs to be submitted on the AS.
+ *
+ * Context:
+ * kbasep_js_kctx_info.jsctx_mutex held,
+ * kbasep_js_device_data.runpool_mutex held,
+ * AS transaction mutex held,
+ * Runpool IRQ lock held
+ */
+static void assign_and_activate_kctx_addr_space(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ struct kbase_as *current_as)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* Attribute handling */
+ kbasep_js_ctx_attr_runpool_retain_ctx(kbdev, kctx);
+
+ /* Allow it to run jobs */
+ kbasep_js_set_submit_allowed(js_devdata, kctx);
+
+ kbase_js_runpool_inc_context_count(kbdev, kctx);
+}
+
+bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ int i;
+
+ if (kbdev->hwaccess.active_kctx == kctx) {
+ /* Context is already active */
+ return true;
+ }
+
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ if (kbdev->as_to_kctx[i] == kctx) {
+ /* Context already has ASID - mark as active */
+ return true;
+ }
+ }
+
+ /* Context does not have address space assigned */
+ return false;
+}
+
+void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ int as_nr = kctx->as_nr;
+
+ if (as_nr == KBASEP_AS_NR_INVALID) {
+ WARN(1, "Attempting to release context without ASID\n");
+ return;
+ }
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (atomic_read(&kctx->refcount) != 1) {
+ WARN(1, "Attempting to release active ASID\n");
+ return;
+ }
+
+ kbasep_js_clear_submit_allowed(&kbdev->js_data, kctx);
+
+ kbase_ctx_sched_release_ctx(kctx);
+ kbase_js_runpool_dec_context_count(kbdev, kctx);
+}
+
+void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+}
+
+int kbase_backend_find_and_release_free_address_space(
+ struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ unsigned long flags;
+ int i;
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ struct kbasep_js_kctx_info *as_js_kctx_info;
+ struct kbase_context *as_kctx;
+
+ as_kctx = kbdev->as_to_kctx[i];
+ as_js_kctx_info = &as_kctx->jctx.sched_info;
+
+ /* Don't release privileged or active contexts, or contexts with
+ * jobs running.
+ * Note that a context will have at least 1 reference (which
+ * was previously taken by kbasep_js_schedule_ctx()) until
+ * descheduled.
+ */
+ if (as_kctx && !kbase_ctx_flag(as_kctx, KCTX_PRIVILEGED) &&
+ atomic_read(&as_kctx->refcount) == 1) {
+ if (!kbasep_js_runpool_retain_ctx_nolock(kbdev,
+ as_kctx)) {
+ WARN(1, "Failed to retain active context\n");
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ flags);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ return KBASEP_AS_NR_INVALID;
+ }
+
+ kbasep_js_clear_submit_allowed(js_devdata, as_kctx);
+
+ /* Drop and retake locks to take the jsctx_mutex on the
+ * context we're about to release without violating lock
+ * ordering
+ */
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+
+ /* Release context from address space */
+ mutex_lock(&as_js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ kbasep_js_runpool_release_ctx_nolock(kbdev, as_kctx);
+
+ if (!kbase_ctx_flag(as_kctx, KCTX_SCHEDULED)) {
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev,
+ as_kctx,
+ true);
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
+
+ return i;
+ }
+
+ /* Context was retained while locks were dropped,
+ * continue looking for free AS */
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ }
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ return KBASEP_AS_NR_INVALID;
+}
+
+bool kbase_backend_use_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int as_nr)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbase_as *new_address_space = NULL;
+
+ js_devdata = &kbdev->js_data;
+
+ if (kbdev->hwaccess.active_kctx == kctx) {
+ WARN(1, "Context is already scheduled in\n");
+ return false;
+ }
+
+ new_address_space = &kbdev->as[as_nr];
+
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+ lockdep_assert_held(&kbdev->mmu_hw_mutex);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ assign_and_activate_kctx_addr_space(kbdev, kctx, new_address_space);
+
+ if (kbase_ctx_flag(kctx, KCTX_PRIVILEGED)) {
+ /* We need to retain it to keep the corresponding address space
+ */
+ kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+ }
+
+ return true;
+}
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_defs.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_defs.h
new file mode 100644
index 000000000000..08a7400e66d5
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_defs.h
@@ -0,0 +1,123 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register-based HW access backend specific definitions
+ */
+
+#ifndef _KBASE_HWACCESS_GPU_DEFS_H_
+#define _KBASE_HWACCESS_GPU_DEFS_H_
+
+/* SLOT_RB_SIZE must be < 256 */
+#define SLOT_RB_SIZE 2
+#define SLOT_RB_MASK (SLOT_RB_SIZE - 1)
+
+/**
+ * struct rb_entry - Ringbuffer entry
+ * @katom: Atom associated with this entry
+ */
+struct rb_entry {
+ struct kbase_jd_atom *katom;
+};
+
+/**
+ * struct slot_rb - Slot ringbuffer
+ * @entries: Ringbuffer entries
+ * @last_context: The last context to submit a job on this slot
+ * @read_idx: Current read index of buffer
+ * @write_idx: Current write index of buffer
+ * @job_chain_flag: Flag used to implement jobchain disambiguation
+ */
+struct slot_rb {
+ struct rb_entry entries[SLOT_RB_SIZE];
+
+ struct kbase_context *last_context;
+
+ u8 read_idx;
+ u8 write_idx;
+
+ u8 job_chain_flag;
+};
+
+/**
+ * struct kbase_backend_data - GPU backend specific data for HW access layer
+ * @slot_rb: Slot ringbuffers
+ * @rmu_workaround_flag: When PRLAM-8987 is present, this flag determines
+ * whether slots 0/1 or slot 2 are currently being
+ * pulled from
+ * @scheduling_timer: The timer tick used for rescheduling jobs
+ * @timer_running: Is the timer running? The runpool_mutex must be
+ * held whilst modifying this.
+ * @suspend_timer: Is the timer suspended? Set when a suspend
+ * occurs and cleared on resume. The runpool_mutex
+ * must be held whilst modifying this.
+ * @reset_gpu: Set to a KBASE_RESET_xxx value (see comments)
+ * @reset_workq: Work queue for performing the reset
+ * @reset_work: Work item for performing the reset
+ * @reset_wait: Wait event signalled when the reset is complete
+ * @reset_timer: Timeout for soft-stops before the reset
+ * @timeouts_updated: Have timeout values just been updated?
+ *
+ * The hwaccess_lock (a spinlock) must be held when accessing this structure
+ */
+struct kbase_backend_data {
+ struct slot_rb slot_rb[BASE_JM_MAX_NR_SLOTS];
+
+ bool rmu_workaround_flag;
+
+ struct hrtimer scheduling_timer;
+
+ bool timer_running;
+ bool suspend_timer;
+
+ atomic_t reset_gpu;
+
+/* The GPU reset isn't pending */
+#define KBASE_RESET_GPU_NOT_PENDING 0
+/* kbase_prepare_to_reset_gpu has been called */
+#define KBASE_RESET_GPU_PREPARED 1
+/* kbase_reset_gpu has been called - the reset will now definitely happen
+ * within the timeout period */
+#define KBASE_RESET_GPU_COMMITTED 2
+/* The GPU reset process is currently occuring (timeout has expired or
+ * kbasep_try_reset_gpu_early was called) */
+#define KBASE_RESET_GPU_HAPPENING 3
+/* Reset the GPU silently, used when resetting the GPU as part of normal
+ * behavior (e.g. when exiting protected mode). */
+#define KBASE_RESET_GPU_SILENT 4
+ struct workqueue_struct *reset_workq;
+ struct work_struct reset_work;
+ wait_queue_head_t reset_wait;
+ struct hrtimer reset_timer;
+
+ bool timeouts_updated;
+};
+
+/**
+ * struct kbase_jd_atom_backend - GPU backend specific katom data
+ */
+struct kbase_jd_atom_backend {
+};
+
+/**
+ * struct kbase_context_backend - GPU backend specific context data
+ */
+struct kbase_context_backend {
+};
+
+#endif /* _KBASE_HWACCESS_GPU_DEFS_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_hw.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_hw.c
new file mode 100644
index 000000000000..92c36d1e6bfe
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_hw.c
@@ -0,0 +1,1514 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Base kernel job manager APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_config.h>
+#include <mali_midg_regmap.h>
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include <mali_kbase_gator.h>
+#endif
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_vinstr.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_ctx_sched.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+#include <backend/gpu/mali_kbase_js_affinity.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+
+#define beenthere(kctx, f, a...) \
+ dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
+
+#if KBASE_GPU_RESET_EN
+static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev);
+static void kbasep_reset_timeout_worker(struct work_struct *data);
+static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer);
+#endif /* KBASE_GPU_RESET_EN */
+
+static inline int kbasep_jm_is_js_free(struct kbase_device *kbdev, int js,
+ struct kbase_context *kctx)
+{
+ return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), kctx);
+}
+
+void kbase_job_hw_submit(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom,
+ int js)
+{
+ struct kbase_context *kctx;
+ u32 cfg;
+ u64 jc_head = katom->jc;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ KBASE_DEBUG_ASSERT(katom);
+
+ kctx = katom->kctx;
+
+ /* Command register must be available */
+ KBASE_DEBUG_ASSERT(kbasep_jm_is_js_free(kbdev, js, kctx));
+ /* Affinity is not violating */
+ kbase_js_debug_log_current_affinities(kbdev);
+ KBASE_DEBUG_ASSERT(!kbase_js_affinity_would_violate(kbdev, js,
+ katom->affinity));
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO),
+ jc_head & 0xFFFFFFFF, kctx);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI),
+ jc_head >> 32, kctx);
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_LO),
+ katom->affinity & 0xFFFFFFFF, kctx);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_HI),
+ katom->affinity >> 32, kctx);
+
+ /* start MMU, medium priority, cache clean/flush on end, clean/flush on
+ * start */
+ cfg = kctx->as_nr;
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION) &&
+ !(kbdev->serialize_jobs & KBASE_SERIALIZE_RESET))
+ cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
+
+ if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_START))
+ cfg |= JS_CONFIG_START_FLUSH_NO_ACTION;
+ else
+ cfg |= JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE;
+
+ if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_END) &&
+ !(kbdev->serialize_jobs & KBASE_SERIALIZE_RESET))
+ cfg |= JS_CONFIG_END_FLUSH_NO_ACTION;
+ else
+ cfg |= JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10649))
+ cfg |= JS_CONFIG_START_MMU;
+
+ cfg |= JS_CONFIG_THREAD_PRI(8);
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE) &&
+ (katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED))
+ cfg |= JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK;
+
+ if (kbase_hw_has_feature(kbdev,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
+ if (!kbdev->hwaccess.backend.slot_rb[js].job_chain_flag) {
+ cfg |= JS_CONFIG_JOB_CHAIN_FLAG;
+ katom->atom_flags |= KBASE_KATOM_FLAGS_JOBCHAIN;
+ kbdev->hwaccess.backend.slot_rb[js].job_chain_flag =
+ true;
+ } else {
+ katom->atom_flags &= ~KBASE_KATOM_FLAGS_JOBCHAIN;
+ kbdev->hwaccess.backend.slot_rb[js].job_chain_flag =
+ false;
+ }
+ }
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_CONFIG_NEXT), cfg, kctx);
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION))
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_FLUSH_ID_NEXT),
+ katom->flush_id, kctx);
+
+ /* Write an approximate start timestamp.
+ * It's approximate because there might be a job in the HEAD register.
+ */
+ katom->start_timestamp = ktime_get();
+
+ /* GO ! */
+ dev_dbg(kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx, affinity=0x%llx",
+ katom, kctx, js, jc_head, katom->affinity);
+
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_SUBMIT, kctx, katom, jc_head, js,
+ (u32) katom->affinity);
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_job_slots_event(
+ GATOR_MAKE_EVENT(GATOR_JOB_SLOT_START, js),
+ kctx, kbase_jd_atom_id(kctx, katom));
+#endif
+ KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG(katom, jc_head,
+ katom->affinity, cfg);
+ KBASE_TLSTREAM_TL_RET_CTX_LPU(
+ kctx,
+ &kbdev->gpu_props.props.raw_props.js_features[
+ katom->slot_nr]);
+ KBASE_TLSTREAM_TL_RET_ATOM_AS(katom, &kbdev->as[kctx->as_nr]);
+ KBASE_TLSTREAM_TL_RET_ATOM_LPU(
+ katom,
+ &kbdev->gpu_props.props.raw_props.js_features[js],
+ "ctx_nr,atom_nr");
+#ifdef CONFIG_GPU_TRACEPOINTS
+ if (!kbase_backend_nr_atoms_submitted(kbdev, js)) {
+ /* If this is the only job on the slot, trace it as starting */
+ char js_string[16];
+
+ trace_gpu_sched_switch(
+ kbasep_make_job_slot_string(js, js_string,
+ sizeof(js_string)),
+ ktime_to_ns(katom->start_timestamp),
+ (u32)katom->kctx->id, 0, katom->work_id);
+ kbdev->hwaccess.backend.slot_rb[js].last_context = katom->kctx;
+ }
+#endif
+ kbase_timeline_job_slot_submit(kbdev, kctx, katom, js);
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT),
+ JS_COMMAND_START, katom->kctx);
+}
+
+/**
+ * kbasep_job_slot_update_head_start_timestamp - Update timestamp
+ * @kbdev: kbase device
+ * @js: job slot
+ * @end_timestamp: timestamp
+ *
+ * Update the start_timestamp of the job currently in the HEAD, based on the
+ * fact that we got an IRQ for the previous set of completed jobs.
+ *
+ * The estimate also takes into account the time the job was submitted, to
+ * work out the best estimate (which might still result in an over-estimate to
+ * the calculated time spent)
+ */
+static void kbasep_job_slot_update_head_start_timestamp(
+ struct kbase_device *kbdev,
+ int js,
+ ktime_t end_timestamp)
+{
+ if (kbase_backend_nr_atoms_on_slot(kbdev, js) > 0) {
+ struct kbase_jd_atom *katom;
+ ktime_t timestamp_diff;
+ /* The atom in the HEAD */
+ katom = kbase_gpu_inspect(kbdev, js, 0);
+
+ KBASE_DEBUG_ASSERT(katom != NULL);
+
+ timestamp_diff = ktime_sub(end_timestamp,
+ katom->start_timestamp);
+ if (ktime_to_ns(timestamp_diff) >= 0) {
+ /* Only update the timestamp if it's a better estimate
+ * than what's currently stored. This is because our
+ * estimate that accounts for the throttle time may be
+ * too much of an overestimate */
+ katom->start_timestamp = end_timestamp;
+ }
+ }
+}
+
+/**
+ * kbasep_trace_tl_event_lpu_softstop - Call event_lpu_softstop timeline
+ * tracepoint
+ * @kbdev: kbase device
+ * @js: job slot
+ *
+ * Make a tracepoint call to the instrumentation module informing that
+ * softstop happened on given lpu (job slot).
+ */
+static void kbasep_trace_tl_event_lpu_softstop(struct kbase_device *kbdev,
+ int js)
+{
+ KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP(
+ &kbdev->gpu_props.props.raw_props.js_features[js]);
+}
+
+void kbase_job_done(struct kbase_device *kbdev, u32 done)
+{
+ unsigned long flags;
+ int i;
+ u32 count = 0;
+ ktime_t end_timestamp = ktime_get();
+ struct kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ js_devdata = &kbdev->js_data;
+
+ KBASE_TRACE_ADD(kbdev, JM_IRQ, NULL, NULL, 0, done);
+
+ memset(&kbdev->slot_submit_count_irq[0], 0,
+ sizeof(kbdev->slot_submit_count_irq));
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ while (done) {
+ u32 failed = done >> 16;
+
+ /* treat failed slots as finished slots */
+ u32 finished = (done & 0xFFFF) | failed;
+
+ /* Note: This is inherently unfair, as we always check
+ * for lower numbered interrupts before the higher
+ * numbered ones.*/
+ i = ffs(finished) - 1;
+ KBASE_DEBUG_ASSERT(i >= 0);
+
+ do {
+ int nr_done;
+ u32 active;
+ u32 completion_code = BASE_JD_EVENT_DONE;/* assume OK */
+ u64 job_tail = 0;
+
+ if (failed & (1u << i)) {
+ /* read out the job slot status code if the job
+ * slot reported failure */
+ completion_code = kbase_reg_read(kbdev,
+ JOB_SLOT_REG(i, JS_STATUS), NULL);
+
+ switch (completion_code) {
+ case BASE_JD_EVENT_STOPPED:
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_job_slots_event(
+ GATOR_MAKE_EVENT(
+ GATOR_JOB_SLOT_SOFT_STOPPED, i),
+ NULL, 0);
+#endif
+
+ kbasep_trace_tl_event_lpu_softstop(
+ kbdev, i);
+
+ /* Soft-stopped job - read the value of
+ * JS<n>_TAIL so that the job chain can
+ * be resumed */
+ job_tail = (u64)kbase_reg_read(kbdev,
+ JOB_SLOT_REG(i, JS_TAIL_LO),
+ NULL) |
+ ((u64)kbase_reg_read(kbdev,
+ JOB_SLOT_REG(i, JS_TAIL_HI),
+ NULL) << 32);
+ break;
+ case BASE_JD_EVENT_NOT_STARTED:
+ /* PRLAM-10673 can cause a TERMINATED
+ * job to come back as NOT_STARTED, but
+ * the error interrupt helps us detect
+ * it */
+ completion_code =
+ BASE_JD_EVENT_TERMINATED;
+ /* fall through */
+ default:
+ dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)",
+ i, completion_code,
+ kbase_exception_name
+ (kbdev,
+ completion_code));
+ }
+
+ kbase_gpu_irq_evict(kbdev, i);
+ }
+
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR),
+ done & ((1 << i) | (1 << (i + 16))),
+ NULL);
+ active = kbase_reg_read(kbdev,
+ JOB_CONTROL_REG(JOB_IRQ_JS_STATE),
+ NULL);
+
+ if (((active >> i) & 1) == 0 &&
+ (((done >> (i + 16)) & 1) == 0)) {
+ /* There is a potential race we must work
+ * around:
+ *
+ * 1. A job slot has a job in both current and
+ * next registers
+ * 2. The job in current completes
+ * successfully, the IRQ handler reads
+ * RAWSTAT and calls this function with the
+ * relevant bit set in "done"
+ * 3. The job in the next registers becomes the
+ * current job on the GPU
+ * 4. Sometime before the JOB_IRQ_CLEAR line
+ * above the job on the GPU _fails_
+ * 5. The IRQ_CLEAR clears the done bit but not
+ * the failed bit. This atomically sets
+ * JOB_IRQ_JS_STATE. However since both jobs
+ * have now completed the relevant bits for
+ * the slot are set to 0.
+ *
+ * If we now did nothing then we'd incorrectly
+ * assume that _both_ jobs had completed
+ * successfully (since we haven't yet observed
+ * the fail bit being set in RAWSTAT).
+ *
+ * So at this point if there are no active jobs
+ * left we check to see if RAWSTAT has a failure
+ * bit set for the job slot. If it does we know
+ * that there has been a new failure that we
+ * didn't previously know about, so we make sure
+ * that we record this in active (but we wait
+ * for the next loop to deal with it).
+ *
+ * If we were handling a job failure (i.e. done
+ * has the relevant high bit set) then we know
+ * that the value read back from
+ * JOB_IRQ_JS_STATE is the correct number of
+ * remaining jobs because the failed job will
+ * have prevented any futher jobs from starting
+ * execution.
+ */
+ u32 rawstat = kbase_reg_read(kbdev,
+ JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL);
+
+ if ((rawstat >> (i + 16)) & 1) {
+ /* There is a failed job that we've
+ * missed - add it back to active */
+ active |= (1u << i);
+ }
+ }
+
+ dev_dbg(kbdev->dev, "Job ended with status 0x%08X\n",
+ completion_code);
+
+ nr_done = kbase_backend_nr_atoms_submitted(kbdev, i);
+ nr_done -= (active >> i) & 1;
+ nr_done -= (active >> (i + 16)) & 1;
+
+ if (nr_done <= 0) {
+ dev_warn(kbdev->dev, "Spurious interrupt on slot %d",
+ i);
+
+ goto spurious;
+ }
+
+ count += nr_done;
+
+ while (nr_done) {
+ if (nr_done == 1) {
+ kbase_gpu_complete_hw(kbdev, i,
+ completion_code,
+ job_tail,
+ &end_timestamp);
+ kbase_jm_try_kick_all(kbdev);
+ } else {
+ /* More than one job has completed.
+ * Since this is not the last job being
+ * reported this time it must have
+ * passed. This is because the hardware
+ * will not allow further jobs in a job
+ * slot to complete until the failed job
+ * is cleared from the IRQ status.
+ */
+ kbase_gpu_complete_hw(kbdev, i,
+ BASE_JD_EVENT_DONE,
+ 0,
+ &end_timestamp);
+ }
+ nr_done--;
+ }
+ spurious:
+ done = kbase_reg_read(kbdev,
+ JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10883)) {
+ /* Workaround for missing interrupt caused by
+ * PRLAM-10883 */
+ if (((active >> i) & 1) && (0 ==
+ kbase_reg_read(kbdev,
+ JOB_SLOT_REG(i,
+ JS_STATUS), NULL))) {
+ /* Force job slot to be processed again
+ */
+ done |= (1u << i);
+ }
+ }
+
+ failed = done >> 16;
+ finished = (done & 0xFFFF) | failed;
+ if (done)
+ end_timestamp = ktime_get();
+ } while (finished & (1 << i));
+
+ kbasep_job_slot_update_head_start_timestamp(kbdev, i,
+ end_timestamp);
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+#if KBASE_GPU_RESET_EN
+ if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+ KBASE_RESET_GPU_COMMITTED) {
+ /* If we're trying to reset the GPU then we might be able to do
+ * it early (without waiting for a timeout) because some jobs
+ * have completed
+ */
+ kbasep_try_reset_gpu_early(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+ KBASE_TRACE_ADD(kbdev, JM_IRQ_END, NULL, NULL, 0, count);
+}
+KBASE_EXPORT_TEST_API(kbase_job_done);
+
+static bool kbasep_soft_stop_allowed(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ bool soft_stops_allowed = true;
+
+ if (kbase_jd_katom_is_protected(katom)) {
+ soft_stops_allowed = false;
+ } else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) {
+ if ((katom->core_req & BASE_JD_REQ_T) != 0)
+ soft_stops_allowed = false;
+ }
+ return soft_stops_allowed;
+}
+
+static bool kbasep_hard_stop_allowed(struct kbase_device *kbdev,
+ base_jd_core_req core_reqs)
+{
+ bool hard_stops_allowed = true;
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8394)) {
+ if ((core_reqs & BASE_JD_REQ_T) != 0)
+ hard_stops_allowed = false;
+ }
+ return hard_stops_allowed;
+}
+
+void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
+ int js,
+ u32 action,
+ base_jd_core_req core_reqs,
+ struct kbase_jd_atom *target_katom)
+{
+ struct kbase_context *kctx = target_katom->kctx;
+#if KBASE_TRACE_ENABLE
+ u32 status_reg_before;
+ u64 job_in_head_before;
+ u32 status_reg_after;
+
+ KBASE_DEBUG_ASSERT(!(action & (~JS_COMMAND_MASK)));
+
+ /* Check the head pointer */
+ job_in_head_before = ((u64) kbase_reg_read(kbdev,
+ JOB_SLOT_REG(js, JS_HEAD_LO), NULL))
+ | (((u64) kbase_reg_read(kbdev,
+ JOB_SLOT_REG(js, JS_HEAD_HI), NULL))
+ << 32);
+ status_reg_before = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS),
+ NULL);
+#endif
+
+ if (action == JS_COMMAND_SOFT_STOP) {
+ bool soft_stop_allowed = kbasep_soft_stop_allowed(kbdev,
+ target_katom);
+
+ if (!soft_stop_allowed) {
+#ifdef CONFIG_MALI_DEBUG
+ dev_dbg(kbdev->dev,
+ "Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X",
+ (unsigned int)core_reqs);
+#endif /* CONFIG_MALI_DEBUG */
+ return;
+ }
+
+ /* We are about to issue a soft stop, so mark the atom as having
+ * been soft stopped */
+ target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED;
+
+ /* Mark the point where we issue the soft-stop command */
+ KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE(target_katom);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
+ int i;
+
+ for (i = 0;
+ i < kbase_backend_nr_atoms_submitted(kbdev, js);
+ i++) {
+ struct kbase_jd_atom *katom;
+
+ katom = kbase_gpu_inspect(kbdev, js, i);
+
+ KBASE_DEBUG_ASSERT(katom);
+
+ /* For HW_ISSUE_8316, only 'bad' jobs attacking
+ * the system can cause this issue: normally,
+ * all memory should be allocated in multiples
+ * of 4 pages, and growable memory should be
+ * changed size in multiples of 4 pages.
+ *
+ * Whilst such 'bad' jobs can be cleared by a
+ * GPU reset, the locking up of a uTLB entry
+ * caused by the bad job could also stall other
+ * ASs, meaning that other ASs' jobs don't
+ * complete in the 'grace' period before the
+ * reset. We don't want to lose other ASs' jobs
+ * when they would normally complete fine, so we
+ * must 'poke' the MMU regularly to help other
+ * ASs complete */
+ kbase_as_poking_timer_retain_atom(
+ kbdev, katom->kctx, katom);
+ }
+ }
+
+ if (kbase_hw_has_feature(
+ kbdev,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
+ action = (target_katom->atom_flags &
+ KBASE_KATOM_FLAGS_JOBCHAIN) ?
+ JS_COMMAND_SOFT_STOP_1 :
+ JS_COMMAND_SOFT_STOP_0;
+ }
+ } else if (action == JS_COMMAND_HARD_STOP) {
+ bool hard_stop_allowed = kbasep_hard_stop_allowed(kbdev,
+ core_reqs);
+
+ if (!hard_stop_allowed) {
+ /* Jobs can be hard-stopped for the following reasons:
+ * * CFS decides the job has been running too long (and
+ * soft-stop has not occurred). In this case the GPU
+ * will be reset by CFS if the job remains on the
+ * GPU.
+ *
+ * * The context is destroyed, kbase_jd_zap_context
+ * will attempt to hard-stop the job. However it also
+ * has a watchdog which will cause the GPU to be
+ * reset if the job remains on the GPU.
+ *
+ * * An (unhandled) MMU fault occurred. As long as
+ * BASE_HW_ISSUE_8245 is defined then the GPU will be
+ * reset.
+ *
+ * All three cases result in the GPU being reset if the
+ * hard-stop fails, so it is safe to just return and
+ * ignore the hard-stop request.
+ */
+ dev_warn(kbdev->dev,
+ "Attempt made to hard-stop a job that cannot be hard-stopped. core_reqs = 0x%X",
+ (unsigned int)core_reqs);
+ return;
+ }
+ target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_HARD_STOPPED;
+
+ if (kbase_hw_has_feature(
+ kbdev,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
+ action = (target_katom->atom_flags &
+ KBASE_KATOM_FLAGS_JOBCHAIN) ?
+ JS_COMMAND_HARD_STOP_1 :
+ JS_COMMAND_HARD_STOP_0;
+ }
+ }
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND), action, kctx);
+
+#if KBASE_TRACE_ENABLE
+ status_reg_after = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS),
+ NULL);
+ if (status_reg_after == BASE_JD_EVENT_ACTIVE) {
+ struct kbase_jd_atom *head;
+ struct kbase_context *head_kctx;
+
+ head = kbase_gpu_inspect(kbdev, js, 0);
+ head_kctx = head->kctx;
+
+ if (status_reg_before == BASE_JD_EVENT_ACTIVE)
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, head_kctx,
+ head, job_in_head_before, js);
+ else
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
+ 0, js);
+
+ switch (action) {
+ case JS_COMMAND_SOFT_STOP:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, head_kctx,
+ head, head->jc, js);
+ break;
+ case JS_COMMAND_SOFT_STOP_0:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, head_kctx,
+ head, head->jc, js);
+ break;
+ case JS_COMMAND_SOFT_STOP_1:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, head_kctx,
+ head, head->jc, js);
+ break;
+ case JS_COMMAND_HARD_STOP:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, head_kctx,
+ head, head->jc, js);
+ break;
+ case JS_COMMAND_HARD_STOP_0:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, head_kctx,
+ head, head->jc, js);
+ break;
+ case JS_COMMAND_HARD_STOP_1:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, head_kctx,
+ head, head->jc, js);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ } else {
+ if (status_reg_before == BASE_JD_EVENT_ACTIVE)
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
+ job_in_head_before, js);
+ else
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
+ 0, js);
+
+ switch (action) {
+ case JS_COMMAND_SOFT_STOP:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, NULL, NULL, 0,
+ js);
+ break;
+ case JS_COMMAND_SOFT_STOP_0:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, NULL, NULL,
+ 0, js);
+ break;
+ case JS_COMMAND_SOFT_STOP_1:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, NULL, NULL,
+ 0, js);
+ break;
+ case JS_COMMAND_HARD_STOP:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, NULL, NULL, 0,
+ js);
+ break;
+ case JS_COMMAND_HARD_STOP_0:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, NULL, NULL,
+ 0, js);
+ break;
+ case JS_COMMAND_HARD_STOP_1:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, NULL, NULL,
+ 0, js);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+#endif
+}
+
+void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev;
+ struct kbasep_js_device_data *js_devdata;
+ int i;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+
+ /* Cancel any remaining running jobs for this kctx */
+ mutex_lock(&kctx->jctx.lock);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* Invalidate all jobs in context, to prevent re-submitting */
+ for (i = 0; i < BASE_JD_ATOM_COUNT; i++) {
+ if (!work_pending(&kctx->jctx.atoms[i].work))
+ kctx->jctx.atoms[i].event_code =
+ BASE_JD_EVENT_JOB_CANCELLED;
+ }
+
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+ kbase_job_slot_hardstop(kctx, i, NULL);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kctx->jctx.lock);
+}
+
+void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
+ struct kbase_jd_atom *target_katom)
+{
+ struct kbase_device *kbdev;
+ int js = target_katom->slot_nr;
+ int priority = target_katom->sched_priority;
+ int i;
+ bool stop_sent = false;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) {
+ struct kbase_jd_atom *katom;
+
+ katom = kbase_gpu_inspect(kbdev, js, i);
+ if (!katom)
+ continue;
+
+ if (katom->kctx != kctx)
+ continue;
+
+ if (katom->sched_priority > priority) {
+ if (!stop_sent)
+ KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY_CHANGE(
+ target_katom);
+
+ kbase_job_slot_softstop(kbdev, js, katom);
+ stop_sent = true;
+ }
+ }
+}
+
+struct zap_reset_data {
+ /* The stages are:
+ * 1. The timer has never been called
+ * 2. The zap has timed out, all slots are soft-stopped - the GPU reset
+ * will happen. The GPU has been reset when
+ * kbdev->hwaccess.backend.reset_waitq is signalled
+ *
+ * (-1 - The timer has been cancelled)
+ */
+ int stage;
+ struct kbase_device *kbdev;
+ struct hrtimer timer;
+ spinlock_t lock; /* protects updates to stage member */
+};
+
+static enum hrtimer_restart zap_timeout_callback(struct hrtimer *timer)
+{
+ struct zap_reset_data *reset_data = container_of(timer,
+ struct zap_reset_data, timer);
+ struct kbase_device *kbdev = reset_data->kbdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&reset_data->lock, flags);
+
+ if (reset_data->stage == -1)
+ goto out;
+
+#if KBASE_GPU_RESET_EN
+ if (kbase_prepare_to_reset_gpu(kbdev)) {
+ dev_err(kbdev->dev, "Issueing GPU soft-reset because jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n",
+ ZAP_TIMEOUT);
+ kbase_reset_gpu(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+ reset_data->stage = 2;
+
+ out:
+ spin_unlock_irqrestore(&reset_data->lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct zap_reset_data reset_data;
+ unsigned long flags;
+
+ hrtimer_init_on_stack(&reset_data.timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ reset_data.timer.function = zap_timeout_callback;
+
+ spin_lock_init(&reset_data.lock);
+
+ reset_data.kbdev = kbdev;
+ reset_data.stage = 1;
+
+ hrtimer_start(&reset_data.timer, HR_TIMER_DELAY_MSEC(ZAP_TIMEOUT),
+ HRTIMER_MODE_REL);
+
+ /* Wait for all jobs to finish, and for the context to be not-scheduled
+ * (due to kbase_job_zap_context(), we also guarentee it's not in the JS
+ * policy queue either */
+ wait_event(kctx->jctx.zero_jobs_wait, kctx->jctx.job_nr == 0);
+ wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait,
+ !kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ spin_lock_irqsave(&reset_data.lock, flags);
+ if (reset_data.stage == 1) {
+ /* The timer hasn't run yet - so cancel it */
+ reset_data.stage = -1;
+ }
+ spin_unlock_irqrestore(&reset_data.lock, flags);
+
+ hrtimer_cancel(&reset_data.timer);
+
+ if (reset_data.stage == 2) {
+ /* The reset has already started.
+ * Wait for the reset to complete
+ */
+ wait_event(kbdev->hwaccess.backend.reset_wait,
+ atomic_read(&kbdev->hwaccess.backend.reset_gpu)
+ == KBASE_RESET_GPU_NOT_PENDING);
+ }
+ destroy_hrtimer_on_stack(&reset_data.timer);
+
+ dev_dbg(kbdev->dev, "Zap: Finished Context %p", kctx);
+
+ /* Ensure that the signallers of the waitqs have finished */
+ mutex_lock(&kctx->jctx.lock);
+ mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ mutex_unlock(&kctx->jctx.lock);
+}
+
+u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev)
+{
+ u32 flush_id = 0;
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION)) {
+ mutex_lock(&kbdev->pm.lock);
+ if (kbdev->pm.backend.gpu_powered)
+ flush_id = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(LATEST_FLUSH), NULL);
+ mutex_unlock(&kbdev->pm.lock);
+ }
+
+ return flush_id;
+}
+
+int kbase_job_slot_init(struct kbase_device *kbdev)
+{
+#if KBASE_GPU_RESET_EN
+ kbdev->hwaccess.backend.reset_workq = alloc_workqueue(
+ "Mali reset workqueue", 0, 1);
+ if (NULL == kbdev->hwaccess.backend.reset_workq)
+ return -EINVAL;
+
+ KBASE_DEBUG_ASSERT(0 ==
+ object_is_on_stack(&kbdev->hwaccess.backend.reset_work));
+ INIT_WORK(&kbdev->hwaccess.backend.reset_work,
+ kbasep_reset_timeout_worker);
+
+ hrtimer_init(&kbdev->hwaccess.backend.reset_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ kbdev->hwaccess.backend.reset_timer.function =
+ kbasep_reset_timer_callback;
+#endif
+
+ return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_job_slot_init);
+
+void kbase_job_slot_halt(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+void kbase_job_slot_term(struct kbase_device *kbdev)
+{
+#if KBASE_GPU_RESET_EN
+ destroy_workqueue(kbdev->hwaccess.backend.reset_workq);
+#endif
+}
+KBASE_EXPORT_TEST_API(kbase_job_slot_term);
+
+#if KBASE_GPU_RESET_EN
+/**
+ * kbasep_check_for_afbc_on_slot() - Check whether AFBC is in use on this slot
+ * @kbdev: kbase device pointer
+ * @kctx: context to check against
+ * @js: slot to check
+ * @target_katom: An atom to check, or NULL if all atoms from @kctx on
+ * slot @js should be checked
+ *
+ * This checks are based upon parameters that would normally be passed to
+ * kbase_job_slot_hardstop().
+ *
+ * In the event of @target_katom being NULL, this will check the last jobs that
+ * are likely to be running on the slot to see if a) they belong to kctx, and
+ * so would be stopped, and b) whether they have AFBC
+ *
+ * In that case, It's guaranteed that a job currently executing on the HW with
+ * AFBC will be detected. However, this is a conservative check because it also
+ * detects jobs that have just completed too.
+ *
+ * Return: true when hard-stop _might_ stop an afbc atom, else false.
+ */
+static bool kbasep_check_for_afbc_on_slot(struct kbase_device *kbdev,
+ struct kbase_context *kctx, int js,
+ struct kbase_jd_atom *target_katom)
+{
+ bool ret = false;
+ int i;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* When we have an atom the decision can be made straight away. */
+ if (target_katom)
+ return !!(target_katom->core_req & BASE_JD_REQ_FS_AFBC);
+
+ /* Otherwise, we must chweck the hardware to see if it has atoms from
+ * this context with AFBC. */
+ for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) {
+ struct kbase_jd_atom *katom;
+
+ katom = kbase_gpu_inspect(kbdev, js, i);
+ if (!katom)
+ continue;
+
+ /* Ignore atoms from other contexts, they won't be stopped when
+ * we use this for checking if we should hard-stop them */
+ if (katom->kctx != kctx)
+ continue;
+
+ /* An atom on this slot and this context: check for AFBC */
+ if (katom->core_req & BASE_JD_REQ_FS_AFBC) {
+ ret = true;
+ break;
+ }
+ }
+
+ return ret;
+}
+#endif /* KBASE_GPU_RESET_EN */
+
+/**
+ * kbase_job_slot_softstop_swflags - Soft-stop a job with flags
+ * @kbdev: The kbase device
+ * @js: The job slot to soft-stop
+ * @target_katom: The job that should be soft-stopped (or NULL for any job)
+ * @sw_flags: Flags to pass in about the soft-stop
+ *
+ * Context:
+ * The job slot lock must be held when calling this function.
+ * The job slot must not already be in the process of being soft-stopped.
+ *
+ * Soft-stop the specified job slot, with extra information about the stop
+ *
+ * Where possible any job in the next register is evicted before the soft-stop.
+ */
+void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
+ struct kbase_jd_atom *target_katom, u32 sw_flags)
+{
+ KBASE_DEBUG_ASSERT(!(sw_flags & JS_COMMAND_MASK));
+ kbase_backend_soft_hard_stop_slot(kbdev, NULL, js, target_katom,
+ JS_COMMAND_SOFT_STOP | sw_flags);
+}
+
+/**
+ * kbase_job_slot_softstop - Soft-stop the specified job slot
+ * @kbdev: The kbase device
+ * @js: The job slot to soft-stop
+ * @target_katom: The job that should be soft-stopped (or NULL for any job)
+ * Context:
+ * The job slot lock must be held when calling this function.
+ * The job slot must not already be in the process of being soft-stopped.
+ *
+ * Where possible any job in the next register is evicted before the soft-stop.
+ */
+void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
+ struct kbase_jd_atom *target_katom)
+{
+ kbase_job_slot_softstop_swflags(kbdev, js, target_katom, 0u);
+}
+
+/**
+ * kbase_job_slot_hardstop - Hard-stop the specified job slot
+ * @kctx: The kbase context that contains the job(s) that should
+ * be hard-stopped
+ * @js: The job slot to hard-stop
+ * @target_katom: The job that should be hard-stopped (or NULL for all
+ * jobs from the context)
+ * Context:
+ * The job slot lock must be held when calling this function.
+ */
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+ struct kbase_jd_atom *target_katom)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ bool stopped;
+#if KBASE_GPU_RESET_EN
+ /* We make the check for AFBC before evicting/stopping atoms. Note
+ * that no other thread can modify the slots whilst we have the
+ * hwaccess_lock. */
+ int needs_workaround_for_afbc =
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3542)
+ && kbasep_check_for_afbc_on_slot(kbdev, kctx, js,
+ target_katom);
+#endif
+
+ stopped = kbase_backend_soft_hard_stop_slot(kbdev, kctx, js,
+ target_katom,
+ JS_COMMAND_HARD_STOP);
+#if KBASE_GPU_RESET_EN
+ if (stopped && (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_8401) ||
+ kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_9510) ||
+ needs_workaround_for_afbc)) {
+ /* MIDBASE-2916 if a fragment job with AFBC encoding is
+ * hardstopped, ensure to do a soft reset also in order to
+ * clear the GPU status.
+ * Workaround for HW issue 8401 has an issue,so after
+ * hard-stopping just reset the GPU. This will ensure that the
+ * jobs leave the GPU.*/
+ if (kbase_prepare_to_reset_gpu_locked(kbdev)) {
+ dev_err(kbdev->dev, "Issueing GPU soft-reset after hard stopping due to hardware issue");
+ kbase_reset_gpu_locked(kbdev);
+ }
+ }
+#endif
+}
+
+/**
+ * kbase_job_check_enter_disjoint - potentiall enter disjoint mode
+ * @kbdev: kbase device
+ * @action: the event which has occurred
+ * @core_reqs: core requirements of the atom
+ * @target_katom: the atom which is being affected
+ *
+ * For a certain soft/hard-stop action, work out whether to enter disjoint
+ * state.
+ *
+ * This does not register multiple disjoint events if the atom has already
+ * started a disjoint period
+ *
+ * @core_reqs can be supplied as 0 if the atom had not started on the hardware
+ * (and so a 'real' soft/hard-stop was not required, but it still interrupted
+ * flow, perhaps on another context)
+ *
+ * kbase_job_check_leave_disjoint() should be used to end the disjoint
+ * state when the soft/hard-stop action is complete
+ */
+void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
+ base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom)
+{
+ u32 hw_action = action & JS_COMMAND_MASK;
+
+ /* For hard-stop, don't enter if hard-stop not allowed */
+ if (hw_action == JS_COMMAND_HARD_STOP &&
+ !kbasep_hard_stop_allowed(kbdev, core_reqs))
+ return;
+
+ /* For soft-stop, don't enter if soft-stop not allowed, or isn't
+ * causing disjoint */
+ if (hw_action == JS_COMMAND_SOFT_STOP &&
+ !(kbasep_soft_stop_allowed(kbdev, target_katom) &&
+ (action & JS_COMMAND_SW_CAUSES_DISJOINT)))
+ return;
+
+ /* Nothing to do if already logged disjoint state on this atom */
+ if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT)
+ return;
+
+ target_katom->atom_flags |= KBASE_KATOM_FLAG_IN_DISJOINT;
+ kbase_disjoint_state_up(kbdev);
+}
+
+/**
+ * kbase_job_check_enter_disjoint - potentially leave disjoint state
+ * @kbdev: kbase device
+ * @target_katom: atom which is finishing
+ *
+ * Work out whether to leave disjoint state when finishing an atom that was
+ * originated by kbase_job_check_enter_disjoint().
+ */
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+ struct kbase_jd_atom *target_katom)
+{
+ if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT) {
+ target_katom->atom_flags &= ~KBASE_KATOM_FLAG_IN_DISJOINT;
+ kbase_disjoint_state_down(kbdev);
+ }
+}
+
+
+#if KBASE_GPU_RESET_EN
+static void kbase_debug_dump_registers(struct kbase_device *kbdev)
+{
+ int i;
+
+ kbase_io_history_dump(kbdev);
+
+ dev_err(kbdev->dev, "Register state:");
+ dev_err(kbdev->dev, " GPU_IRQ_RAWSTAT=0x%08x GPU_STATUS=0x%08x",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS), NULL));
+ dev_err(kbdev->dev, " JOB_IRQ_RAWSTAT=0x%08x JOB_IRQ_JS_STATE=0x%08x",
+ kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL),
+ kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_JS_STATE), NULL));
+ for (i = 0; i < 3; i++) {
+ dev_err(kbdev->dev, " JS%d_STATUS=0x%08x JS%d_HEAD_LO=0x%08x",
+ i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_STATUS),
+ NULL),
+ i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_HEAD_LO),
+ NULL));
+ }
+ dev_err(kbdev->dev, " MMU_IRQ_RAWSTAT=0x%08x GPU_FAULTSTATUS=0x%08x",
+ kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_RAWSTAT), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL));
+ dev_err(kbdev->dev, " GPU_IRQ_MASK=0x%08x JOB_IRQ_MASK=0x%08x MMU_IRQ_MASK=0x%08x",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL),
+ kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), NULL),
+ kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL));
+ dev_err(kbdev->dev, " PWR_OVERRIDE0=0x%08x PWR_OVERRIDE1=0x%08x",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE0), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE1), NULL));
+ dev_err(kbdev->dev, " SHADER_CONFIG=0x%08x L2_MMU_CONFIG=0x%08x",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_CONFIG), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), NULL));
+ dev_err(kbdev->dev, " TILER_CONFIG=0x%08x JM_CONFIG=0x%08x",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(TILER_CONFIG), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(JM_CONFIG), NULL));
+}
+
+static void kbasep_reset_timeout_worker(struct work_struct *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev;
+ ktime_t end_timestamp = ktime_get();
+ struct kbasep_js_device_data *js_devdata;
+ bool try_schedule = false;
+ bool silent = false;
+ u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
+
+ KBASE_DEBUG_ASSERT(data);
+
+ kbdev = container_of(data, struct kbase_device,
+ hwaccess.backend.reset_work);
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ js_devdata = &kbdev->js_data;
+
+ if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+ KBASE_RESET_GPU_SILENT)
+ silent = true;
+
+ KBASE_TRACE_ADD(kbdev, JM_BEGIN_RESET_WORKER, NULL, NULL, 0u, 0);
+
+ /* Suspend vinstr.
+ * This call will block until vinstr is suspended. */
+ kbase_vinstr_suspend(kbdev->vinstr_ctx);
+
+ /* Make sure the timer has completed - this cannot be done from
+ * interrupt context, so this cannot be done within
+ * kbasep_try_reset_gpu_early. */
+ hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer);
+
+ if (kbase_pm_context_active_handle_suspend(kbdev,
+ KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+ /* This would re-activate the GPU. Since it's already idle,
+ * there's no need to reset it */
+ atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_NOT_PENDING);
+ kbase_disjoint_state_down(kbdev);
+ wake_up(&kbdev->hwaccess.backend.reset_wait);
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+ return;
+ }
+
+ KBASE_DEBUG_ASSERT(kbdev->irq_reset_flush == false);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ spin_lock(&kbdev->hwaccess_lock);
+ spin_lock(&kbdev->mmu_mask_change);
+ /* We're about to flush out the IRQs and their bottom half's */
+ kbdev->irq_reset_flush = true;
+
+ /* Disable IRQ to avoid IRQ handlers to kick in after releasing the
+ * spinlock; this also clears any outstanding interrupts */
+ kbase_pm_disable_interrupts_nolock(kbdev);
+
+ spin_unlock(&kbdev->mmu_mask_change);
+ spin_unlock(&kbdev->hwaccess_lock);
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ /* Ensure that any IRQ handlers have finished
+ * Must be done without any locks IRQ handlers will take */
+ kbase_synchronize_irqs(kbdev);
+
+ /* Flush out any in-flight work items */
+ kbase_flush_mmu_wqs(kbdev);
+
+ /* The flush has completed so reset the active indicator */
+ kbdev->irq_reset_flush = false;
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TMIX_8463)) {
+ /* Ensure that L2 is not transitioning when we send the reset
+ * command */
+ while (--max_loops && kbase_pm_get_trans_cores(kbdev,
+ KBASE_PM_CORE_L2))
+ ;
+
+ WARN(!max_loops, "L2 power transition timed out while trying to reset\n");
+ }
+
+ mutex_lock(&kbdev->pm.lock);
+ /* We hold the pm lock, so there ought to be a current policy */
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.pm_current_policy);
+
+ /* All slot have been soft-stopped and we've waited
+ * SOFT_STOP_RESET_TIMEOUT for the slots to clear, at this point we
+ * assume that anything that is still left on the GPU is stuck there and
+ * we'll kill it when we reset the GPU */
+
+ if (!silent)
+ dev_err(kbdev->dev, "Resetting GPU (allowing up to %d ms)",
+ RESET_TIMEOUT);
+
+ /* Output the state of some interesting registers to help in the
+ * debugging of GPU resets */
+ if (!silent)
+ kbase_debug_dump_registers(kbdev);
+
+ /* Complete any jobs that were still on the GPU */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbdev->protected_mode = false;
+ kbase_backend_reset(kbdev, &end_timestamp);
+ kbase_pm_metrics_update(kbdev, NULL);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ /* Reset the GPU */
+ kbase_pm_init_hw(kbdev, 0);
+
+ mutex_unlock(&kbdev->pm.lock);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_ctx_sched_restore_all_as(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ kbase_pm_enable_interrupts(kbdev);
+
+ atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_NOT_PENDING);
+
+ kbase_disjoint_state_down(kbdev);
+
+ wake_up(&kbdev->hwaccess.backend.reset_wait);
+ if (!silent)
+ dev_err(kbdev->dev, "Reset complete");
+
+ if (js_devdata->nr_contexts_pullable > 0 && !kbdev->poweroff_pending)
+ try_schedule = true;
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ mutex_lock(&kbdev->pm.lock);
+
+ /* Find out what cores are required now */
+ kbase_pm_update_cores_state(kbdev);
+
+ /* Synchronously request and wait for those cores, because if
+ * instrumentation is enabled it would need them immediately. */
+ kbase_pm_check_transitions_sync(kbdev);
+
+ mutex_unlock(&kbdev->pm.lock);
+
+ /* Try submitting some jobs to restart processing */
+ if (try_schedule) {
+ KBASE_TRACE_ADD(kbdev, JM_SUBMIT_AFTER_RESET, NULL, NULL, 0u,
+ 0);
+ kbase_js_sched_all(kbdev);
+ }
+
+ /* Process any pending slot updates */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_backend_slot_update(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ kbase_pm_context_idle(kbdev);
+
+ /* Release vinstr */
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+
+ KBASE_TRACE_ADD(kbdev, JM_END_RESET_WORKER, NULL, NULL, 0u, 0);
+}
+
+static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer)
+{
+ struct kbase_device *kbdev = container_of(timer, struct kbase_device,
+ hwaccess.backend.reset_timer);
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Reset still pending? */
+ if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) ==
+ KBASE_RESET_GPU_COMMITTED)
+ queue_work(kbdev->hwaccess.backend.reset_workq,
+ &kbdev->hwaccess.backend.reset_work);
+
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * If all jobs are evicted from the GPU then we can reset the GPU
+ * immediately instead of waiting for the timeout to elapse
+ */
+
+static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev)
+{
+ int i;
+ int pending_jobs = 0;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Count the number of jobs */
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+ pending_jobs += kbase_backend_nr_atoms_submitted(kbdev, i);
+
+ if (pending_jobs > 0) {
+ /* There are still jobs on the GPU - wait */
+ return;
+ }
+
+ /* To prevent getting incorrect registers when dumping failed job,
+ * skip early reset.
+ */
+ if (kbdev->job_fault_debug != false)
+ return;
+
+ /* Check that the reset has been committed to (i.e. kbase_reset_gpu has
+ * been called), and that no other thread beat this thread to starting
+ * the reset */
+ if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) !=
+ KBASE_RESET_GPU_COMMITTED) {
+ /* Reset has already occurred */
+ return;
+ }
+
+ queue_work(kbdev->hwaccess.backend.reset_workq,
+ &kbdev->hwaccess.backend.reset_work);
+}
+
+static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbasep_try_reset_gpu_early_locked(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+/**
+ * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU
+ * @kbdev: kbase device
+ *
+ * This function just soft-stops all the slots to ensure that as many jobs as
+ * possible are saved.
+ *
+ * Return:
+ * The function returns a boolean which should be interpreted as follows:
+ * true - Prepared for reset, kbase_reset_gpu_locked should be called.
+ * false - Another thread is performing a reset, kbase_reset_gpu should
+ * not be called.
+ */
+bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev)
+{
+ int i;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_NOT_PENDING,
+ KBASE_RESET_GPU_PREPARED) !=
+ KBASE_RESET_GPU_NOT_PENDING) {
+ /* Some other thread is already resetting the GPU */
+ return false;
+ }
+
+ kbase_disjoint_state_up(kbdev);
+
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+ kbase_job_slot_softstop(kbdev, i, NULL);
+
+ return true;
+}
+
+bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ ret = kbase_prepare_to_reset_gpu_locked(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return ret;
+}
+KBASE_EXPORT_TEST_API(kbase_prepare_to_reset_gpu);
+
+/*
+ * This function should be called after kbase_prepare_to_reset_gpu if it
+ * returns true. It should never be called without a corresponding call to
+ * kbase_prepare_to_reset_gpu.
+ *
+ * After this function is called (or not called if kbase_prepare_to_reset_gpu
+ * returned false), the caller should wait for
+ * kbdev->hwaccess.backend.reset_waitq to be signalled to know when the reset
+ * has completed.
+ */
+void kbase_reset_gpu(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Note this is an assert/atomic_set because it is a software issue for
+ * a race to be occuring here */
+ KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+ KBASE_RESET_GPU_PREPARED);
+ atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_COMMITTED);
+
+ dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n",
+ kbdev->reset_timeout_ms);
+
+ hrtimer_start(&kbdev->hwaccess.backend.reset_timer,
+ HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms),
+ HRTIMER_MODE_REL);
+
+ /* Try resetting early */
+ kbasep_try_reset_gpu_early(kbdev);
+}
+KBASE_EXPORT_TEST_API(kbase_reset_gpu);
+
+void kbase_reset_gpu_locked(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Note this is an assert/atomic_set because it is a software issue for
+ * a race to be occuring here */
+ KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+ KBASE_RESET_GPU_PREPARED);
+ atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_COMMITTED);
+
+ dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n",
+ kbdev->reset_timeout_ms);
+ hrtimer_start(&kbdev->hwaccess.backend.reset_timer,
+ HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms),
+ HRTIMER_MODE_REL);
+
+ /* Try resetting early */
+ kbasep_try_reset_gpu_early_locked(kbdev);
+}
+
+void kbase_reset_gpu_silent(struct kbase_device *kbdev)
+{
+ if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_NOT_PENDING,
+ KBASE_RESET_GPU_SILENT) !=
+ KBASE_RESET_GPU_NOT_PENDING) {
+ /* Some other thread is already resetting the GPU */
+ return;
+ }
+
+ kbase_disjoint_state_up(kbdev);
+
+ queue_work(kbdev->hwaccess.backend.reset_workq,
+ &kbdev->hwaccess.backend.reset_work);
+}
+
+bool kbase_reset_gpu_active(struct kbase_device *kbdev)
+{
+ if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+ KBASE_RESET_GPU_NOT_PENDING)
+ return false;
+
+ return true;
+}
+#endif /* KBASE_GPU_RESET_EN */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_internal.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_internal.h
new file mode 100644
index 000000000000..1f382b3c1af4
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_internal.h
@@ -0,0 +1,164 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Job Manager backend-specific low-level APIs.
+ */
+
+#ifndef _KBASE_JM_HWACCESS_H_
+#define _KBASE_JM_HWACCESS_H_
+
+#include <mali_kbase_hw.h>
+#include <mali_kbase_debug.h>
+#include <linux/atomic.h>
+
+#include <backend/gpu/mali_kbase_jm_rb.h>
+
+/**
+ * kbase_job_submit_nolock() - Submit a job to a certain job-slot
+ * @kbdev: Device pointer
+ * @katom: Atom to submit
+ * @js: Job slot to submit on
+ *
+ * The caller must check kbasep_jm_is_submit_slots_free() != false before
+ * calling this.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold the hwaccess_lock
+ */
+void kbase_job_submit_nolock(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom, int js);
+
+/**
+ * kbase_job_done_slot() - Complete the head job on a particular job-slot
+ * @kbdev: Device pointer
+ * @s: Job slot
+ * @completion_code: Completion code of job reported by GPU
+ * @job_tail: Job tail address reported by GPU
+ * @end_timestamp: Timestamp of job completion
+ */
+void kbase_job_done_slot(struct kbase_device *kbdev, int s, u32 completion_code,
+ u64 job_tail, ktime_t *end_timestamp);
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+static inline char *kbasep_make_job_slot_string(int js, char *js_string,
+ size_t js_size)
+{
+ snprintf(js_string, js_size, "job_slot_%i", js);
+ return js_string;
+}
+#endif
+
+/**
+ * kbase_job_hw_submit() - Submit a job to the GPU
+ * @kbdev: Device pointer
+ * @katom: Atom to submit
+ * @js: Job slot to submit on
+ *
+ * The caller must check kbasep_jm_is_submit_slots_free() != false before
+ * calling this.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold the hwaccess_lock
+ */
+void kbase_job_hw_submit(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom,
+ int js);
+
+/**
+ * kbasep_job_slot_soft_or_hard_stop_do_action() - Perform a soft or hard stop
+ * on the specified atom
+ * @kbdev: Device pointer
+ * @js: Job slot to stop on
+ * @action: The action to perform, either JSn_COMMAND_HARD_STOP or
+ * JSn_COMMAND_SOFT_STOP
+ * @core_reqs: Core requirements of atom to stop
+ * @target_katom: Atom to stop
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold the hwaccess_lock
+ */
+void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
+ int js,
+ u32 action,
+ base_jd_core_req core_reqs,
+ struct kbase_jd_atom *target_katom);
+
+/**
+ * kbase_backend_soft_hard_stop_slot() - Soft or hard stop jobs on a given job
+ * slot belonging to a given context.
+ * @kbdev: Device pointer
+ * @kctx: Context pointer. May be NULL
+ * @katom: Specific atom to stop. May be NULL
+ * @js: Job slot to hard stop
+ * @action: The action to perform, either JSn_COMMAND_HARD_STOP or
+ * JSn_COMMAND_SOFT_STOP
+ *
+ * If no context is provided then all jobs on the slot will be soft or hard
+ * stopped.
+ *
+ * If a katom is provided then only that specific atom will be stopped. In this
+ * case the kctx parameter is ignored.
+ *
+ * Jobs that are on the slot but are not yet on the GPU will be unpulled and
+ * returned to the job scheduler.
+ *
+ * Return: true if an atom was stopped, false otherwise
+ */
+bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js,
+ struct kbase_jd_atom *katom,
+ u32 action);
+
+/**
+ * kbase_job_slot_init - Initialise job slot framework
+ * @kbdev: Device pointer
+ *
+ * Called on driver initialisation
+ *
+ * Return: 0 on success
+ */
+int kbase_job_slot_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_job_slot_halt - Halt the job slot framework
+ * @kbdev: Device pointer
+ *
+ * Should prevent any further job slot processing
+ */
+void kbase_job_slot_halt(struct kbase_device *kbdev);
+
+/**
+ * kbase_job_slot_term - Terminate job slot framework
+ * @kbdev: Device pointer
+ *
+ * Called on driver termination
+ */
+void kbase_job_slot_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpu_cacheclean - Cause a GPU cache clean & flush
+ * @kbdev: Device pointer
+ *
+ * Caller must not be in IRQ context
+ */
+void kbase_gpu_cacheclean(struct kbase_device *kbdev);
+
+#endif /* _KBASE_JM_HWACCESS_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_rb.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_rb.c
new file mode 100644
index 000000000000..a41e7b5b7afb
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_rb.c
@@ -0,0 +1,1947 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register-based HW access backend specific APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_jm.h>
+#include <mali_kbase_js.h>
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_10969_workaround.h>
+#include <backend/gpu/mali_kbase_cache_policy_backend.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_js_affinity.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+/* Return whether the specified ringbuffer is empty. HW access lock must be
+ * held */
+#define SLOT_RB_EMPTY(rb) (rb->write_idx == rb->read_idx)
+/* Return number of atoms currently in the specified ringbuffer. HW access lock
+ * must be held */
+#define SLOT_RB_ENTRIES(rb) (int)(s8)(rb->write_idx - rb->read_idx)
+
+static void kbase_gpu_release_atom(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom,
+ ktime_t *end_timestamp);
+
+/**
+ * kbase_gpu_enqueue_atom - Enqueue an atom in the HW access ringbuffer
+ * @kbdev: Device pointer
+ * @katom: Atom to enqueue
+ *
+ * Context: Caller must hold the HW access lock
+ */
+static void kbase_gpu_enqueue_atom(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[katom->slot_nr];
+
+ WARN_ON(SLOT_RB_ENTRIES(rb) >= SLOT_RB_SIZE);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ rb->entries[rb->write_idx & SLOT_RB_MASK].katom = katom;
+ rb->write_idx++;
+
+ katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED;
+}
+
+/**
+ * kbase_gpu_dequeue_atom - Remove an atom from the HW access ringbuffer, once
+ * it has been completed
+ * @kbdev: Device pointer
+ * @js: Job slot to remove atom from
+ * @end_timestamp: Pointer to timestamp of atom completion. May be NULL, in
+ * which case current time will be used.
+ *
+ * Context: Caller must hold the HW access lock
+ *
+ * Return: Atom removed from ringbuffer
+ */
+static struct kbase_jd_atom *kbase_gpu_dequeue_atom(struct kbase_device *kbdev,
+ int js,
+ ktime_t *end_timestamp)
+{
+ struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
+ struct kbase_jd_atom *katom;
+
+ if (SLOT_RB_EMPTY(rb)) {
+ WARN(1, "GPU ringbuffer unexpectedly empty\n");
+ return NULL;
+ }
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ katom = rb->entries[rb->read_idx & SLOT_RB_MASK].katom;
+
+ kbase_gpu_release_atom(kbdev, katom, end_timestamp);
+
+ rb->read_idx++;
+
+ katom->gpu_rb_state = KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB;
+
+ kbase_js_debug_log_current_affinities(kbdev);
+
+ return katom;
+}
+
+struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js,
+ int idx)
+{
+ struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if ((SLOT_RB_ENTRIES(rb) - 1) < idx)
+ return NULL; /* idx out of range */
+
+ return rb->entries[(rb->read_idx + idx) & SLOT_RB_MASK].katom;
+}
+
+struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev,
+ int js)
+{
+ return kbase_gpu_inspect(kbdev, js, 0);
+}
+
+struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
+ int js)
+{
+ struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
+
+ if (SLOT_RB_EMPTY(rb))
+ return NULL;
+
+ return rb->entries[(rb->write_idx - 1) & SLOT_RB_MASK].katom;
+}
+
+/**
+ * kbase_gpu_atoms_submitted - Inspect whether a slot has any atoms currently
+ * on the GPU
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ *
+ * Return: true if there are atoms on the GPU for slot js,
+ * false otherwise
+ */
+static bool kbase_gpu_atoms_submitted(struct kbase_device *kbdev, int js)
+{
+ int i;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (i = 0; i < SLOT_RB_SIZE; i++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+ if (!katom)
+ return false;
+ if (katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED ||
+ katom->gpu_rb_state == KBASE_ATOM_GPU_RB_READY)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * kbase_gpu_atoms_submitted_any() - Inspect whether there are any atoms
+ * currently on the GPU
+ * @kbdev: Device pointer
+ *
+ * Return: true if there are any atoms on the GPU, false otherwise
+ */
+static bool kbase_gpu_atoms_submitted_any(struct kbase_device *kbdev)
+{
+ int js;
+ int i;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ for (i = 0; i < SLOT_RB_SIZE; i++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+ if (katom && katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED)
+ return true;
+ }
+ }
+ return false;
+}
+
+int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js)
+{
+ int nr = 0;
+ int i;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (i = 0; i < SLOT_RB_SIZE; i++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+ if (katom && (katom->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_SUBMITTED))
+ nr++;
+ }
+
+ return nr;
+}
+
+int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js)
+{
+ int nr = 0;
+ int i;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (i = 0; i < SLOT_RB_SIZE; i++) {
+ if (kbase_gpu_inspect(kbdev, js, i))
+ nr++;
+ }
+
+ return nr;
+}
+
+static int kbase_gpu_nr_atoms_on_slot_min(struct kbase_device *kbdev, int js,
+ enum kbase_atom_gpu_rb_state min_rb_state)
+{
+ int nr = 0;
+ int i;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (i = 0; i < SLOT_RB_SIZE; i++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+ if (katom && (katom->gpu_rb_state >= min_rb_state))
+ nr++;
+ }
+
+ return nr;
+}
+
+/**
+ * check_secure_atom - Check if the given atom is in the given secure state and
+ * has a ringbuffer state of at least
+ * KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION
+ * @katom: Atom pointer
+ * @secure: Desired secure state
+ *
+ * Return: true if atom is in the given state, false otherwise
+ */
+static bool check_secure_atom(struct kbase_jd_atom *katom, bool secure)
+{
+ if (katom->gpu_rb_state >=
+ KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION &&
+ ((kbase_jd_katom_is_protected(katom) && secure) ||
+ (!kbase_jd_katom_is_protected(katom) && !secure)))
+ return true;
+
+ return false;
+}
+
+/**
+ * kbase_gpu_check_secure_atoms - Check if there are any atoms in the given
+ * secure state in the ringbuffers of at least
+ * state
+ * KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE
+ * @kbdev: Device pointer
+ * @secure: Desired secure state
+ *
+ * Return: true if any atoms are in the given state, false otherwise
+ */
+static bool kbase_gpu_check_secure_atoms(struct kbase_device *kbdev,
+ bool secure)
+{
+ int js, i;
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ for (i = 0; i < SLOT_RB_SIZE; i++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
+ js, i);
+
+ if (katom) {
+ if (check_secure_atom(katom, secure))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+int kbase_backend_slot_free(struct kbase_device *kbdev, int js)
+{
+ if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) !=
+ KBASE_RESET_GPU_NOT_PENDING) {
+ /* The GPU is being reset - so prevent submission */
+ return 0;
+ }
+
+ return SLOT_RB_SIZE - kbase_backend_nr_atoms_on_slot(kbdev, js);
+}
+
+
+static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+
+static bool kbasep_js_job_check_ref_cores(struct kbase_device *kbdev,
+ int js,
+ struct kbase_jd_atom *katom)
+{
+ /* The most recently checked affinity. Having this at this scope allows
+ * us to guarantee that we've checked the affinity in this function
+ * call.
+ */
+ u64 recently_chosen_affinity = 0;
+ bool chosen_affinity = false;
+ bool retry;
+
+ do {
+ retry = false;
+
+ /* NOTE: The following uses a number of FALLTHROUGHs to optimize
+ * the calls to this function. Ending of the function is
+ * indicated by BREAK OUT */
+ switch (katom->coreref_state) {
+ /* State when job is first attempted to be run */
+ case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
+ KBASE_DEBUG_ASSERT(katom->affinity == 0);
+
+ /* Compute affinity */
+ if (false == kbase_js_choose_affinity(
+ &recently_chosen_affinity, kbdev, katom,
+ js)) {
+ /* No cores are currently available */
+ /* *** BREAK OUT: No state transition *** */
+ break;
+ }
+
+ chosen_affinity = true;
+
+ /* Request the cores */
+ kbase_pm_request_cores(kbdev,
+ katom->core_req & BASE_JD_REQ_T,
+ recently_chosen_affinity);
+
+ katom->affinity = recently_chosen_affinity;
+
+ /* Proceed to next state */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
+ {
+ enum kbase_pm_cores_ready cores_ready;
+
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
+ (katom->core_req & BASE_JD_REQ_T));
+
+ cores_ready = kbase_pm_register_inuse_cores(
+ kbdev,
+ katom->core_req & BASE_JD_REQ_T,
+ katom->affinity);
+ if (cores_ready == KBASE_NEW_AFFINITY) {
+ /* Affinity no longer valid - return to
+ * previous state */
+ kbasep_js_job_check_deref_cores(kbdev,
+ katom);
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev,
+ JS_CORE_REF_REGISTER_INUSE_FAILED,
+ katom->kctx, katom,
+ katom->jc, js,
+ (u32) katom->affinity);
+ /* *** BREAK OUT: Return to previous
+ * state, retry *** */
+ retry = true;
+ break;
+ }
+ if (cores_ready == KBASE_CORES_NOT_READY) {
+ /* Stay in this state and return, to
+ * retry at this state later */
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev,
+ JS_CORE_REF_REGISTER_INUSE_FAILED,
+ katom->kctx, katom,
+ katom->jc, js,
+ (u32) katom->affinity);
+ /* *** BREAK OUT: No state transition
+ * *** */
+ break;
+ }
+ /* Proceed to next state */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
+ }
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
+ (katom->core_req & BASE_JD_REQ_T));
+
+ /* Optimize out choosing the affinity twice in the same
+ * function call */
+ if (chosen_affinity == false) {
+ /* See if the affinity changed since a previous
+ * call. */
+ if (false == kbase_js_choose_affinity(
+ &recently_chosen_affinity,
+ kbdev, katom, js)) {
+ /* No cores are currently available */
+ kbasep_js_job_check_deref_cores(kbdev,
+ katom);
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev,
+ JS_CORE_REF_REQUEST_ON_RECHECK_FAILED,
+ katom->kctx, katom,
+ katom->jc, js,
+ (u32) recently_chosen_affinity);
+ /* *** BREAK OUT: Transition to lower
+ * state *** */
+ break;
+ }
+ chosen_affinity = true;
+ }
+
+ /* Now see if this requires a different set of cores */
+ if (recently_chosen_affinity != katom->affinity) {
+ enum kbase_pm_cores_ready cores_ready;
+
+ kbase_pm_request_cores(kbdev,
+ katom->core_req & BASE_JD_REQ_T,
+ recently_chosen_affinity);
+
+ /* Register new cores whilst we still hold the
+ * old ones, to minimize power transitions */
+ cores_ready =
+ kbase_pm_register_inuse_cores(kbdev,
+ katom->core_req & BASE_JD_REQ_T,
+ recently_chosen_affinity);
+ kbasep_js_job_check_deref_cores(kbdev, katom);
+
+ /* Fixup the state that was reduced by
+ * deref_cores: */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
+ katom->affinity = recently_chosen_affinity;
+ if (cores_ready == KBASE_NEW_AFFINITY) {
+ /* Affinity no longer valid - return to
+ * previous state */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
+
+ kbasep_js_job_check_deref_cores(kbdev,
+ katom);
+
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev,
+ JS_CORE_REF_REGISTER_INUSE_FAILED,
+ katom->kctx, katom,
+ katom->jc, js,
+ (u32) katom->affinity);
+ /* *** BREAK OUT: Return to previous
+ * state, retry *** */
+ retry = true;
+ break;
+ }
+ /* Now might be waiting for powerup again, with
+ * a new affinity */
+ if (cores_ready == KBASE_CORES_NOT_READY) {
+ /* Return to previous state */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev,
+ JS_CORE_REF_REGISTER_ON_RECHECK_FAILED,
+ katom->kctx, katom,
+ katom->jc, js,
+ (u32) katom->affinity);
+ /* *** BREAK OUT: Transition to lower
+ * state *** */
+ break;
+ }
+ }
+ /* Proceed to next state */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+ case KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS:
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
+ (katom->core_req & BASE_JD_REQ_T));
+ KBASE_DEBUG_ASSERT(katom->affinity ==
+ recently_chosen_affinity);
+
+ /* Note: this is where the caller must've taken the
+ * hwaccess_lock */
+
+ /* Check for affinity violations - if there are any,
+ * then we just ask the caller to requeue and try again
+ * later */
+ if (kbase_js_affinity_would_violate(kbdev, js,
+ katom->affinity) != false) {
+ /* Return to previous state */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
+ /* *** BREAK OUT: Transition to lower state ***
+ */
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev,
+ JS_CORE_REF_AFFINITY_WOULD_VIOLATE,
+ katom->kctx, katom, katom->jc, js,
+ (u32) katom->affinity);
+ break;
+ }
+
+ /* No affinity violations would result, so the cores are
+ * ready */
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_READY;
+ /* *** BREAK OUT: Cores Ready *** */
+ break;
+
+ default:
+ KBASE_DEBUG_ASSERT_MSG(false,
+ "Unhandled kbase_atom_coreref_state %d",
+ katom->coreref_state);
+ break;
+ }
+ } while (retry != false);
+
+ return (katom->coreref_state == KBASE_ATOM_COREREF_STATE_READY);
+}
+
+static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+
+ switch (katom->coreref_state) {
+ case KBASE_ATOM_COREREF_STATE_READY:
+ /* State where atom was submitted to the HW - just proceed to
+ * power-down */
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
+ (katom->core_req & BASE_JD_REQ_T));
+
+ /* *** FALLTHROUGH *** */
+
+ case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
+ /* State where cores were registered */
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
+ (katom->core_req & BASE_JD_REQ_T));
+ kbase_pm_release_cores(kbdev, katom->core_req & BASE_JD_REQ_T,
+ katom->affinity);
+
+ break;
+
+ case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
+ /* State where cores were requested, but not registered */
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
+ (katom->core_req & BASE_JD_REQ_T));
+ kbase_pm_unrequest_cores(kbdev, katom->core_req & BASE_JD_REQ_T,
+ katom->affinity);
+ break;
+
+ case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
+ /* Initial state - nothing required */
+ KBASE_DEBUG_ASSERT(katom->affinity == 0);
+ break;
+
+ default:
+ KBASE_DEBUG_ASSERT_MSG(false,
+ "Unhandled coreref_state: %d",
+ katom->coreref_state);
+ break;
+ }
+
+ katom->affinity = 0;
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
+}
+
+static void kbasep_js_job_check_deref_cores_nokatom(struct kbase_device *kbdev,
+ base_jd_core_req core_req, u64 affinity,
+ enum kbase_atom_coreref_state coreref_state)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ switch (coreref_state) {
+ case KBASE_ATOM_COREREF_STATE_READY:
+ /* State where atom was submitted to the HW - just proceed to
+ * power-down */
+ KBASE_DEBUG_ASSERT(affinity != 0 ||
+ (core_req & BASE_JD_REQ_T));
+
+ /* *** FALLTHROUGH *** */
+
+ case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
+ /* State where cores were registered */
+ KBASE_DEBUG_ASSERT(affinity != 0 ||
+ (core_req & BASE_JD_REQ_T));
+ kbase_pm_release_cores(kbdev, core_req & BASE_JD_REQ_T,
+ affinity);
+
+ break;
+
+ case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
+ /* State where cores were requested, but not registered */
+ KBASE_DEBUG_ASSERT(affinity != 0 ||
+ (core_req & BASE_JD_REQ_T));
+ kbase_pm_unrequest_cores(kbdev, core_req & BASE_JD_REQ_T,
+ affinity);
+ break;
+
+ case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
+ /* Initial state - nothing required */
+ KBASE_DEBUG_ASSERT(affinity == 0);
+ break;
+
+ default:
+ KBASE_DEBUG_ASSERT_MSG(false,
+ "Unhandled coreref_state: %d",
+ coreref_state);
+ break;
+ }
+}
+
+static void kbase_gpu_release_atom(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom,
+ ktime_t *end_timestamp)
+{
+ struct kbase_context *kctx = katom->kctx;
+
+ switch (katom->gpu_rb_state) {
+ case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB:
+ /* Should be impossible */
+ WARN(1, "Attempting to release atom not in ringbuffer\n");
+ break;
+
+ case KBASE_ATOM_GPU_RB_SUBMITTED:
+ /* Inform power management at start/finish of atom so it can
+ * update its GPU utilisation metrics. Mark atom as not
+ * submitted beforehand. */
+ katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY;
+ kbase_pm_metrics_update(kbdev, end_timestamp);
+
+ if (katom->core_req & BASE_JD_REQ_PERMON)
+ kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
+ /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+ KBASE_TLSTREAM_TL_NRET_ATOM_LPU(katom,
+ &kbdev->gpu_props.props.raw_props.js_features
+ [katom->slot_nr]);
+ KBASE_TLSTREAM_TL_NRET_ATOM_AS(katom, &kbdev->as[kctx->as_nr]);
+ KBASE_TLSTREAM_TL_NRET_CTX_LPU(kctx,
+ &kbdev->gpu_props.props.raw_props.js_features
+ [katom->slot_nr]);
+
+ case KBASE_ATOM_GPU_RB_READY:
+ /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
+ kbase_js_affinity_release_slot_cores(kbdev, katom->slot_nr,
+ katom->affinity);
+ /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
+ break;
+
+ case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION:
+ if (katom->protected_state.enter !=
+ KBASE_ATOM_ENTER_PROTECTED_CHECK ||
+ katom->protected_state.exit !=
+ KBASE_ATOM_EXIT_PROTECTED_CHECK)
+ kbdev->protected_mode_transition = false;
+
+ if (kbase_jd_katom_is_protected(katom) &&
+ (katom->protected_state.enter ==
+ KBASE_ATOM_ENTER_PROTECTED_IDLE_L2)) {
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+
+ /* Go back to configured model for IPA */
+ kbase_ipa_model_use_configured_locked(kbdev);
+ }
+
+
+ /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV:
+ /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_BLOCKED:
+ /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_RETURN_TO_JS:
+ break;
+ }
+
+ katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED;
+ katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
+}
+
+static void kbase_gpu_mark_atom_for_return(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ kbase_gpu_release_atom(kbdev, katom, NULL);
+ katom->gpu_rb_state = KBASE_ATOM_GPU_RB_RETURN_TO_JS;
+}
+
+static inline bool kbase_gpu_rmu_workaround(struct kbase_device *kbdev, int js)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+ bool slot_busy[3];
+
+ if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+ return true;
+ slot_busy[0] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 0,
+ KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
+ slot_busy[1] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 1,
+ KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
+ slot_busy[2] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 2,
+ KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
+
+ if ((js == 2 && !(slot_busy[0] || slot_busy[1])) ||
+ (js != 2 && !slot_busy[2]))
+ return true;
+
+ /* Don't submit slot 2 atom while GPU has jobs on slots 0/1 */
+ if (js == 2 && (kbase_gpu_atoms_submitted(kbdev, 0) ||
+ kbase_gpu_atoms_submitted(kbdev, 1) ||
+ backend->rmu_workaround_flag))
+ return false;
+
+ /* Don't submit slot 0/1 atom while GPU has jobs on slot 2 */
+ if (js != 2 && (kbase_gpu_atoms_submitted(kbdev, 2) ||
+ !backend->rmu_workaround_flag))
+ return false;
+
+ backend->rmu_workaround_flag = !backend->rmu_workaround_flag;
+
+ return true;
+}
+
+/**
+ * other_slots_busy - Determine if any job slots other than @js are currently
+ * running atoms
+ * @kbdev: Device pointer
+ * @js: Job slot
+ *
+ * Return: true if any slots other than @js are busy, false otherwise
+ */
+static inline bool other_slots_busy(struct kbase_device *kbdev, int js)
+{
+ int slot;
+
+ for (slot = 0; slot < kbdev->gpu_props.num_job_slots; slot++) {
+ if (slot == js)
+ continue;
+
+ if (kbase_gpu_nr_atoms_on_slot_min(kbdev, slot,
+ KBASE_ATOM_GPU_RB_SUBMITTED))
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool kbase_gpu_in_protected_mode(struct kbase_device *kbdev)
+{
+ return kbdev->protected_mode;
+}
+
+static int kbase_gpu_protected_mode_enter(struct kbase_device *kbdev)
+{
+ int err = -EINVAL;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ WARN_ONCE(!kbdev->protected_ops,
+ "Cannot enter protected mode: protected callbacks not specified.\n");
+
+ /*
+ * When entering into protected mode, we must ensure that the
+ * GPU is not operating in coherent mode as well. This is to
+ * ensure that no protected memory can be leaked.
+ */
+ if (kbdev->system_coherency == COHERENCY_ACE)
+ kbase_cache_set_coherency_mode(kbdev, COHERENCY_ACE_LITE);
+
+ if (kbdev->protected_ops) {
+ /* Switch GPU to protected mode */
+ err = kbdev->protected_ops->protected_mode_enable(
+ kbdev->protected_dev);
+
+ if (err)
+ dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n",
+ err);
+ else
+ kbdev->protected_mode = true;
+ }
+
+ return err;
+}
+
+static int kbase_gpu_protected_mode_reset(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ WARN_ONCE(!kbdev->protected_ops,
+ "Cannot exit protected mode: protected callbacks not specified.\n");
+
+ if (!kbdev->protected_ops)
+ return -EINVAL;
+
+ /* The protected mode disable callback will be called as part of reset
+ */
+ kbase_reset_gpu_silent(kbdev);
+
+ return 0;
+}
+
+static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
+ struct kbase_jd_atom **katom, int idx, int js)
+{
+ int err = 0;
+
+ switch (katom[idx]->protected_state.enter) {
+ case KBASE_ATOM_ENTER_PROTECTED_CHECK:
+ KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START(kbdev);
+ /* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV
+ * should ensure that we are not already transitiong, and that
+ * there are no atoms currently on the GPU. */
+ WARN_ON(kbdev->protected_mode_transition);
+ WARN_ON(kbase_gpu_atoms_submitted_any(kbdev));
+
+ kbdev->protected_mode_transition = true;
+ katom[idx]->protected_state.enter =
+ KBASE_ATOM_ENTER_PROTECTED_VINSTR;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_ENTER_PROTECTED_VINSTR:
+ if (kbase_vinstr_try_suspend(kbdev->vinstr_ctx) < 0) {
+ /*
+ * We can't switch now because
+ * the vinstr core state switch
+ * is not done yet.
+ */
+ return -EAGAIN;
+ }
+
+ /* Use generic model for IPA in protected mode */
+ kbase_ipa_model_use_fallback_locked(kbdev);
+
+ /* Once reaching this point GPU must be
+ * switched to protected mode or vinstr
+ * re-enabled. */
+
+ /*
+ * Not in correct mode, begin protected mode switch.
+ * Entering protected mode requires us to power down the L2,
+ * and drop out of fully coherent mode.
+ */
+ katom[idx]->protected_state.enter =
+ KBASE_ATOM_ENTER_PROTECTED_IDLE_L2;
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_ENTER_PROTECTED_IDLE_L2:
+ /* Avoid unnecessary waiting on non-ACE platforms. */
+ if (kbdev->current_gpu_coherency_mode == COHERENCY_ACE) {
+ if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
+ kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
+ /*
+ * The L2 is still powered, wait for all the users to
+ * finish with it before doing the actual reset.
+ */
+ return -EAGAIN;
+ }
+ }
+
+ katom[idx]->protected_state.enter =
+ KBASE_ATOM_ENTER_PROTECTED_FINISHED;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_ENTER_PROTECTED_FINISHED:
+
+ /* No jobs running, so we can switch GPU mode right now. */
+ err = kbase_gpu_protected_mode_enter(kbdev);
+
+ /*
+ * Regardless of result, we are no longer transitioning
+ * the GPU.
+ */
+ kbdev->protected_mode_transition = false;
+ KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(kbdev);
+ if (err) {
+ /*
+ * Failed to switch into protected mode, resume
+ * vinstr core and fail atom.
+ */
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+ katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
+ kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
+ /* Only return if head atom or previous atom
+ * already removed - as atoms must be returned
+ * in order. */
+ if (idx == 0 || katom[0]->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+ }
+
+ /* Go back to configured model for IPA */
+ kbase_ipa_model_use_configured_locked(kbdev);
+
+ return -EINVAL;
+ }
+
+ /* Protected mode sanity checks. */
+ KBASE_DEBUG_ASSERT_MSG(
+ kbase_jd_katom_is_protected(katom[idx]) ==
+ kbase_gpu_in_protected_mode(kbdev),
+ "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
+ kbase_jd_katom_is_protected(katom[idx]),
+ kbase_gpu_in_protected_mode(kbdev));
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_READY;
+ }
+
+ return 0;
+}
+
+static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
+ struct kbase_jd_atom **katom, int idx, int js)
+{
+ int err = 0;
+
+
+ switch (katom[idx]->protected_state.exit) {
+ case KBASE_ATOM_EXIT_PROTECTED_CHECK:
+ KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START(kbdev);
+ /* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV
+ * should ensure that we are not already transitiong, and that
+ * there are no atoms currently on the GPU. */
+ WARN_ON(kbdev->protected_mode_transition);
+ WARN_ON(kbase_gpu_atoms_submitted_any(kbdev));
+
+ /*
+ * Exiting protected mode requires a reset, but first the L2
+ * needs to be powered down to ensure it's not active when the
+ * reset is issued.
+ */
+ katom[idx]->protected_state.exit =
+ KBASE_ATOM_EXIT_PROTECTED_IDLE_L2;
+
+ kbdev->protected_mode_transition = true;
+ kbase_pm_update_cores_state_nolock(kbdev);
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+ case KBASE_ATOM_EXIT_PROTECTED_IDLE_L2:
+ if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
+ kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
+ /*
+ * The L2 is still powered, wait for all the users to
+ * finish with it before doing the actual reset.
+ */
+ return -EAGAIN;
+ }
+ katom[idx]->protected_state.exit =
+ KBASE_ATOM_EXIT_PROTECTED_RESET;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_EXIT_PROTECTED_RESET:
+ /* Issue the reset to the GPU */
+ err = kbase_gpu_protected_mode_reset(kbdev);
+
+ if (err) {
+ kbdev->protected_mode_transition = false;
+
+ /* Failed to exit protected mode, fail atom */
+ katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
+ kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
+ /* Only return if head atom or previous atom
+ * already removed - as atoms must be returned
+ * in order */
+ if (idx == 0 || katom[0]->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+ }
+
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+
+ /* Use generic model for IPA in protected mode */
+ kbase_ipa_model_use_fallback_locked(kbdev);
+
+ return -EINVAL;
+ }
+
+ katom[idx]->protected_state.exit =
+ KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT:
+ /* A GPU reset is issued when exiting protected mode. Once the
+ * reset is done all atoms' state will also be reset. For this
+ * reason, if the atom is still in this state we can safely
+ * say that the reset has not completed i.e., we have not
+ * finished exiting protected mode yet.
+ */
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+void kbase_backend_slot_update(struct kbase_device *kbdev)
+{
+ int js;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ struct kbase_jd_atom *katom[2];
+ int idx;
+
+ katom[0] = kbase_gpu_inspect(kbdev, js, 0);
+ katom[1] = kbase_gpu_inspect(kbdev, js, 1);
+ WARN_ON(katom[1] && !katom[0]);
+
+ for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
+ bool cores_ready;
+ int ret;
+
+ if (!katom[idx])
+ continue;
+
+ switch (katom[idx]->gpu_rb_state) {
+ case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB:
+ /* Should be impossible */
+ WARN(1, "Attempting to update atom not in ringbuffer\n");
+ break;
+
+ case KBASE_ATOM_GPU_RB_WAITING_BLOCKED:
+ if (katom[idx]->atom_flags &
+ KBASE_KATOM_FLAG_X_DEP_BLOCKED)
+ break;
+
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV:
+ if (kbase_gpu_check_secure_atoms(kbdev,
+ !kbase_jd_katom_is_protected(
+ katom[idx])))
+ break;
+
+ if ((idx == 1) && (kbase_jd_katom_is_protected(
+ katom[0]) !=
+ kbase_jd_katom_is_protected(
+ katom[1])))
+ break;
+
+ if (kbdev->protected_mode_transition)
+ break;
+
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION:
+
+ /*
+ * Exiting protected mode must be done before
+ * the references on the cores are taken as
+ * a power down the L2 is required which
+ * can't happen after the references for this
+ * atom are taken.
+ */
+
+ if (!kbase_gpu_in_protected_mode(kbdev) &&
+ kbase_jd_katom_is_protected(katom[idx])) {
+ /* Atom needs to transition into protected mode. */
+ ret = kbase_jm_enter_protected_mode(kbdev,
+ katom, idx, js);
+ if (ret)
+ break;
+ } else if (kbase_gpu_in_protected_mode(kbdev) &&
+ !kbase_jd_katom_is_protected(katom[idx])) {
+ /* Atom needs to transition out of protected mode. */
+ ret = kbase_jm_exit_protected_mode(kbdev,
+ katom, idx, js);
+ if (ret)
+ break;
+ }
+ katom[idx]->protected_state.exit =
+ KBASE_ATOM_EXIT_PROTECTED_CHECK;
+
+ /* Atom needs no protected mode transition. */
+
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
+ if (katom[idx]->will_fail_event_code) {
+ kbase_gpu_mark_atom_for_return(kbdev,
+ katom[idx]);
+ /* Set EVENT_DONE so this atom will be
+ completed, not unpulled. */
+ katom[idx]->event_code =
+ BASE_JD_EVENT_DONE;
+ /* Only return if head atom or previous
+ * atom already removed - as atoms must
+ * be returned in order. */
+ if (idx == 0 || katom[0]->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+ }
+ break;
+ }
+
+ cores_ready =
+ kbasep_js_job_check_ref_cores(kbdev, js,
+ katom[idx]);
+
+ if (katom[idx]->event_code ==
+ BASE_JD_EVENT_PM_EVENT) {
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_RETURN_TO_JS;
+ break;
+ }
+
+ if (!cores_ready)
+ break;
+
+ kbase_js_affinity_retain_slot_cores(kbdev, js,
+ katom[idx]->affinity);
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_WAITING_AFFINITY;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
+ if (!kbase_gpu_rmu_workaround(kbdev, js))
+ break;
+
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_READY;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_READY:
+
+ if (idx == 1) {
+ /* Only submit if head atom or previous
+ * atom already submitted */
+ if ((katom[0]->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_SUBMITTED &&
+ katom[0]->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB))
+ break;
+
+ /* If intra-slot serialization in use
+ * then don't submit atom to NEXT slot
+ */
+ if (kbdev->serialize_jobs &
+ KBASE_SERIALIZE_INTRA_SLOT)
+ break;
+ }
+
+ /* If inter-slot serialization in use then don't
+ * submit atom if any other slots are in use */
+ if ((kbdev->serialize_jobs &
+ KBASE_SERIALIZE_INTER_SLOT) &&
+ other_slots_busy(kbdev, js))
+ break;
+
+ if ((kbdev->serialize_jobs &
+ KBASE_SERIALIZE_RESET) &&
+ kbase_reset_gpu_active(kbdev))
+ break;
+
+ /* Check if this job needs the cycle counter
+ * enabled before submission */
+ if (katom[idx]->core_req & BASE_JD_REQ_PERMON)
+ kbase_pm_request_gpu_cycle_counter_l2_is_on(
+ kbdev);
+
+ kbase_job_hw_submit(kbdev, katom[idx], js);
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_SUBMITTED;
+
+ /* Inform power management at start/finish of
+ * atom so it can update its GPU utilisation
+ * metrics. */
+ kbase_pm_metrics_update(kbdev,
+ &katom[idx]->start_timestamp);
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_SUBMITTED:
+ /* Atom submitted to HW, nothing else to do */
+ break;
+
+ case KBASE_ATOM_GPU_RB_RETURN_TO_JS:
+ /* Only return if head atom or previous atom
+ * already removed - as atoms must be returned
+ * in order */
+ if (idx == 0 || katom[0]->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ kbase_jm_return_atom_to_js(kbdev,
+ katom[idx]);
+ }
+ break;
+ }
+ }
+ }
+
+ /* Warn if PRLAM-8987 affinity restrictions are violated */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+ WARN_ON((kbase_gpu_atoms_submitted(kbdev, 0) ||
+ kbase_gpu_atoms_submitted(kbdev, 1)) &&
+ kbase_gpu_atoms_submitted(kbdev, 2));
+}
+
+
+void kbase_backend_run_atom(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ kbase_gpu_enqueue_atom(kbdev, katom);
+ kbase_backend_slot_update(kbdev);
+}
+
+#define HAS_DEP(katom) (katom->pre_dep || katom->atom_flags & \
+ (KBASE_KATOM_FLAG_X_DEP_BLOCKED | KBASE_KATOM_FLAG_FAIL_BLOCKER))
+
+bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js)
+{
+ struct kbase_jd_atom *katom;
+ struct kbase_jd_atom *next_katom;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ katom = kbase_gpu_inspect(kbdev, js, 0);
+ next_katom = kbase_gpu_inspect(kbdev, js, 1);
+
+ if (next_katom && katom->kctx == next_katom->kctx &&
+ next_katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED &&
+ HAS_DEP(next_katom) &&
+ (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), NULL)
+ != 0 ||
+ kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), NULL)
+ != 0)) {
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT),
+ JS_COMMAND_NOP, NULL);
+ next_katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY;
+
+ KBASE_TLSTREAM_TL_NRET_ATOM_LPU(katom,
+ &kbdev->gpu_props.props.raw_props.js_features
+ [katom->slot_nr]);
+ KBASE_TLSTREAM_TL_NRET_ATOM_AS(katom, &kbdev->as
+ [katom->kctx->as_nr]);
+ KBASE_TLSTREAM_TL_NRET_CTX_LPU(katom->kctx,
+ &kbdev->gpu_props.props.raw_props.js_features
+ [katom->slot_nr]);
+
+ return true;
+ }
+
+ return false;
+}
+
+void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+ u32 completion_code,
+ u64 job_tail,
+ ktime_t *end_timestamp)
+{
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0);
+ struct kbase_context *kctx = katom->kctx;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /*
+ * When a hard-stop is followed close after a soft-stop, the completion
+ * code may be set to STOPPED, even though the job is terminated
+ */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TMIX_8438)) {
+ if (completion_code == BASE_JD_EVENT_STOPPED &&
+ (katom->atom_flags &
+ KBASE_KATOM_FLAG_BEEN_HARD_STOPPED)) {
+ completion_code = BASE_JD_EVENT_TERMINATED;
+ }
+ }
+
+ if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6787) || (katom->core_req &
+ BASE_JD_REQ_SKIP_CACHE_END)) &&
+ completion_code != BASE_JD_EVENT_DONE &&
+ !(completion_code & BASE_JD_SW_EVENT)) {
+ /* When a job chain fails, on a T60x or when
+ * BASE_JD_REQ_SKIP_CACHE_END is set, the GPU cache is not
+ * flushed. To prevent future evictions causing possible memory
+ * corruption we need to flush the cache manually before any
+ * affected memory gets reused. */
+ katom->need_cache_flush_cores_retained = katom->affinity;
+ kbase_pm_request_cores(kbdev, false, katom->affinity);
+ } else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10676)) {
+ if (kbdev->gpu_props.num_core_groups > 1 &&
+ !(katom->affinity &
+ kbdev->gpu_props.props.coherency_info.group[0].core_mask
+ ) &&
+ (katom->affinity &
+ kbdev->gpu_props.props.coherency_info.group[1].core_mask
+ )) {
+ dev_info(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
+ katom->need_cache_flush_cores_retained =
+ katom->affinity;
+ kbase_pm_request_cores(kbdev, false,
+ katom->affinity);
+ }
+ }
+
+ katom = kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
+ kbase_timeline_job_slot_done(kbdev, katom->kctx, katom, js, 0);
+
+ if (completion_code == BASE_JD_EVENT_STOPPED) {
+ struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
+ 0);
+
+ /*
+ * Dequeue next atom from ringbuffers on same slot if required.
+ * This atom will already have been removed from the NEXT
+ * registers by kbase_gpu_soft_hard_stop_slot(), to ensure that
+ * the atoms on this slot are returned in the correct order.
+ */
+ if (next_katom && katom->kctx == next_katom->kctx &&
+ next_katom->sched_priority ==
+ katom->sched_priority) {
+ kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
+ kbase_jm_return_atom_to_js(kbdev, next_katom);
+ }
+ } else if (completion_code != BASE_JD_EVENT_DONE) {
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ int i;
+
+#if KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR != 0
+ KBASE_TRACE_DUMP(kbdev);
+#endif
+ kbasep_js_clear_submit_allowed(js_devdata, katom->kctx);
+
+ /*
+ * Remove all atoms on the same context from ringbuffers. This
+ * will not remove atoms that are already on the GPU, as these
+ * are guaranteed not to have fail dependencies on the failed
+ * atom.
+ */
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) {
+ struct kbase_jd_atom *katom_idx0 =
+ kbase_gpu_inspect(kbdev, i, 0);
+ struct kbase_jd_atom *katom_idx1 =
+ kbase_gpu_inspect(kbdev, i, 1);
+
+ if (katom_idx0 && katom_idx0->kctx == katom->kctx &&
+ HAS_DEP(katom_idx0) &&
+ katom_idx0->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_SUBMITTED) {
+ /* Dequeue katom_idx0 from ringbuffer */
+ kbase_gpu_dequeue_atom(kbdev, i, end_timestamp);
+
+ if (katom_idx1 &&
+ katom_idx1->kctx == katom->kctx
+ && HAS_DEP(katom_idx1) &&
+ katom_idx0->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_SUBMITTED) {
+ /* Dequeue katom_idx1 from ringbuffer */
+ kbase_gpu_dequeue_atom(kbdev, i,
+ end_timestamp);
+
+ katom_idx1->event_code =
+ BASE_JD_EVENT_STOPPED;
+ kbase_jm_return_atom_to_js(kbdev,
+ katom_idx1);
+ }
+ katom_idx0->event_code = BASE_JD_EVENT_STOPPED;
+ kbase_jm_return_atom_to_js(kbdev, katom_idx0);
+
+ } else if (katom_idx1 &&
+ katom_idx1->kctx == katom->kctx &&
+ HAS_DEP(katom_idx1) &&
+ katom_idx1->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_SUBMITTED) {
+ /* Can not dequeue this atom yet - will be
+ * dequeued when atom at idx0 completes */
+ katom_idx1->event_code = BASE_JD_EVENT_STOPPED;
+ kbase_gpu_mark_atom_for_return(kbdev,
+ katom_idx1);
+ }
+ }
+ }
+
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_JOB_DONE, kctx, katom, katom->jc,
+ js, completion_code);
+
+ if (job_tail != 0 && job_tail != katom->jc) {
+ bool was_updated = (job_tail != katom->jc);
+
+ /* Some of the job has been executed, so we update the job chain
+ * address to where we should resume from */
+ katom->jc = job_tail;
+ if (was_updated)
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_UPDATE_HEAD, katom->kctx,
+ katom, job_tail, js);
+ }
+
+ /* Only update the event code for jobs that weren't cancelled */
+ if (katom->event_code != BASE_JD_EVENT_JOB_CANCELLED)
+ katom->event_code = (base_jd_event_code)completion_code;
+
+ kbase_device_trace_register_access(kctx, REG_WRITE,
+ JOB_CONTROL_REG(JOB_IRQ_CLEAR),
+ 1 << js);
+
+ /* Complete the job, and start new ones
+ *
+ * Also defer remaining work onto the workqueue:
+ * - Re-queue Soft-stopped jobs
+ * - For any other jobs, queue the job back into the dependency system
+ * - Schedule out the parent context if necessary, and schedule a new
+ * one in.
+ */
+#ifdef CONFIG_GPU_TRACEPOINTS
+ {
+ /* The atom in the HEAD */
+ struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
+ 0);
+
+ if (next_katom && next_katom->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_SUBMITTED) {
+ char js_string[16];
+
+ trace_gpu_sched_switch(kbasep_make_job_slot_string(js,
+ js_string,
+ sizeof(js_string)),
+ ktime_to_ns(*end_timestamp),
+ (u32)next_katom->kctx->id, 0,
+ next_katom->work_id);
+ kbdev->hwaccess.backend.slot_rb[js].last_context =
+ next_katom->kctx;
+ } else {
+ char js_string[16];
+
+ trace_gpu_sched_switch(kbasep_make_job_slot_string(js,
+ js_string,
+ sizeof(js_string)),
+ ktime_to_ns(ktime_get()), 0, 0,
+ 0);
+ kbdev->hwaccess.backend.slot_rb[js].last_context = 0;
+ }
+ }
+#endif
+
+ if (kbdev->serialize_jobs & KBASE_SERIALIZE_RESET)
+ kbase_reset_gpu_silent(kbdev);
+
+ if (completion_code == BASE_JD_EVENT_STOPPED)
+ katom = kbase_jm_return_atom_to_js(kbdev, katom);
+ else
+ katom = kbase_jm_complete(kbdev, katom, end_timestamp);
+
+ if (katom) {
+ /* Cross-slot dependency has now become runnable. Try to submit
+ * it. */
+
+ /* Check if there are lower priority jobs to soft stop */
+ kbase_job_slot_ctx_priority_check_locked(kctx, katom);
+
+ kbase_jm_try_kick(kbdev, 1 << katom->slot_nr);
+ }
+
+ /* Job completion may have unblocked other atoms. Try to update all job
+ * slots */
+ kbase_backend_slot_update(kbdev);
+}
+
+void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp)
+{
+ int js;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* Reset should always take the GPU out of protected mode */
+ WARN_ON(kbase_gpu_in_protected_mode(kbdev));
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ int atom_idx = 0;
+ int idx;
+
+ for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
+ js, atom_idx);
+ bool keep_in_jm_rb = false;
+
+ if (!katom)
+ break;
+ if (katom->protected_state.exit ==
+ KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT)
+ {
+ KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(kbdev);
+
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+
+ /* protected mode sanity checks */
+ KBASE_DEBUG_ASSERT_MSG(
+ kbase_jd_katom_is_protected(katom) == kbase_gpu_in_protected_mode(kbdev),
+ "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
+ kbase_jd_katom_is_protected(katom), kbase_gpu_in_protected_mode(kbdev));
+ KBASE_DEBUG_ASSERT_MSG(
+ (kbase_jd_katom_is_protected(katom) && js == 0) ||
+ !kbase_jd_katom_is_protected(katom),
+ "Protected atom on JS%d not supported", js);
+ }
+ if (katom->gpu_rb_state < KBASE_ATOM_GPU_RB_SUBMITTED)
+ keep_in_jm_rb = true;
+
+ kbase_gpu_release_atom(kbdev, katom, NULL);
+
+ /*
+ * If the atom wasn't on HW when the reset was issued
+ * then leave it in the RB and next time we're kicked
+ * it will be processed again from the starting state.
+ */
+ if (keep_in_jm_rb) {
+ kbasep_js_job_check_deref_cores(kbdev, katom);
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
+ katom->affinity = 0;
+ katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
+ /* As the atom was not removed, increment the
+ * index so that we read the correct atom in the
+ * next iteration. */
+ atom_idx++;
+ continue;
+ }
+
+ /*
+ * The atom was on the HW when the reset was issued
+ * all we can do is fail the atom.
+ */
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ kbase_jm_complete(kbdev, katom, end_timestamp);
+ }
+ }
+
+ kbdev->protected_mode_transition = false;
+}
+
+static inline void kbase_gpu_stop_atom(struct kbase_device *kbdev,
+ int js,
+ struct kbase_jd_atom *katom,
+ u32 action)
+{
+ u32 hw_action = action & JS_COMMAND_MASK;
+
+ kbase_job_check_enter_disjoint(kbdev, action, katom->core_req, katom);
+ kbasep_job_slot_soft_or_hard_stop_do_action(kbdev, js, hw_action,
+ katom->core_req, katom);
+ katom->kctx->blocked_js[js][katom->sched_priority] = true;
+}
+
+static inline void kbase_gpu_remove_atom(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom,
+ u32 action,
+ bool disjoint)
+{
+ katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
+ kbase_gpu_mark_atom_for_return(kbdev, katom);
+ katom->kctx->blocked_js[katom->slot_nr][katom->sched_priority] = true;
+
+ if (disjoint)
+ kbase_job_check_enter_disjoint(kbdev, action, katom->core_req,
+ katom);
+}
+
+static int should_stop_x_dep_slot(struct kbase_jd_atom *katom)
+{
+ if (katom->x_post_dep) {
+ struct kbase_jd_atom *dep_atom = katom->x_post_dep;
+
+ if (dep_atom->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB &&
+ dep_atom->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_RETURN_TO_JS)
+ return dep_atom->slot_nr;
+ }
+ return -1;
+}
+
+static void kbase_job_evicted(struct kbase_jd_atom *katom)
+{
+ kbase_timeline_job_slot_done(katom->kctx->kbdev, katom->kctx, katom,
+ katom->slot_nr, KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT);
+}
+
+bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js,
+ struct kbase_jd_atom *katom,
+ u32 action)
+{
+ struct kbase_jd_atom *katom_idx0;
+ struct kbase_jd_atom *katom_idx1;
+
+ bool katom_idx0_valid, katom_idx1_valid;
+
+ bool ret = false;
+
+ int stop_x_dep_idx0 = -1, stop_x_dep_idx1 = -1;
+ int prio_idx0 = 0, prio_idx1 = 0;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ katom_idx0 = kbase_gpu_inspect(kbdev, js, 0);
+ katom_idx1 = kbase_gpu_inspect(kbdev, js, 1);
+
+ if (katom_idx0)
+ prio_idx0 = katom_idx0->sched_priority;
+ if (katom_idx1)
+ prio_idx1 = katom_idx1->sched_priority;
+
+ if (katom) {
+ katom_idx0_valid = (katom_idx0 == katom);
+ /* If idx0 is to be removed and idx1 is on the same context,
+ * then idx1 must also be removed otherwise the atoms might be
+ * returned out of order */
+ if (katom_idx1)
+ katom_idx1_valid = (katom_idx1 == katom) ||
+ (katom_idx0_valid &&
+ (katom_idx0->kctx ==
+ katom_idx1->kctx));
+ else
+ katom_idx1_valid = false;
+ } else {
+ katom_idx0_valid = (katom_idx0 &&
+ (!kctx || katom_idx0->kctx == kctx));
+ katom_idx1_valid = (katom_idx1 &&
+ (!kctx || katom_idx1->kctx == kctx) &&
+ prio_idx0 == prio_idx1);
+ }
+
+ if (katom_idx0_valid)
+ stop_x_dep_idx0 = should_stop_x_dep_slot(katom_idx0);
+ if (katom_idx1_valid)
+ stop_x_dep_idx1 = should_stop_x_dep_slot(katom_idx1);
+
+ if (katom_idx0_valid) {
+ if (katom_idx0->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) {
+ /* Simple case - just dequeue and return */
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ if (katom_idx1_valid) {
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ katom_idx1->event_code =
+ BASE_JD_EVENT_REMOVED_FROM_NEXT;
+ kbase_jm_return_atom_to_js(kbdev, katom_idx1);
+ katom_idx1->kctx->blocked_js[js][prio_idx1] =
+ true;
+ }
+
+ katom_idx0->event_code =
+ BASE_JD_EVENT_REMOVED_FROM_NEXT;
+ kbase_jm_return_atom_to_js(kbdev, katom_idx0);
+ katom_idx0->kctx->blocked_js[js][prio_idx0] = true;
+ } else {
+ /* katom_idx0 is on GPU */
+ if (katom_idx1 && katom_idx1->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_SUBMITTED) {
+ /* katom_idx0 and katom_idx1 are on GPU */
+
+ if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+ JS_COMMAND_NEXT), NULL) == 0) {
+ /* idx0 has already completed - stop
+ * idx1 if needed*/
+ if (katom_idx1_valid) {
+ kbase_gpu_stop_atom(kbdev, js,
+ katom_idx1,
+ action);
+ ret = true;
+ }
+ } else {
+ /* idx1 is in NEXT registers - attempt
+ * to remove */
+ kbase_reg_write(kbdev,
+ JOB_SLOT_REG(js,
+ JS_COMMAND_NEXT),
+ JS_COMMAND_NOP, NULL);
+
+ if (kbase_reg_read(kbdev,
+ JOB_SLOT_REG(js,
+ JS_HEAD_NEXT_LO), NULL)
+ != 0 ||
+ kbase_reg_read(kbdev,
+ JOB_SLOT_REG(js,
+ JS_HEAD_NEXT_HI), NULL)
+ != 0) {
+ /* idx1 removed successfully,
+ * will be handled in IRQ */
+ kbase_job_evicted(katom_idx1);
+ kbase_gpu_remove_atom(kbdev,
+ katom_idx1,
+ action, true);
+ stop_x_dep_idx1 =
+ should_stop_x_dep_slot(katom_idx1);
+
+ /* stop idx0 if still on GPU */
+ kbase_gpu_stop_atom(kbdev, js,
+ katom_idx0,
+ action);
+ ret = true;
+ } else if (katom_idx1_valid) {
+ /* idx0 has already completed,
+ * stop idx1 if needed */
+ kbase_gpu_stop_atom(kbdev, js,
+ katom_idx1,
+ action);
+ ret = true;
+ }
+ }
+ } else if (katom_idx1_valid) {
+ /* idx1 not on GPU but must be dequeued*/
+
+ /* idx1 will be handled in IRQ */
+ kbase_gpu_remove_atom(kbdev, katom_idx1, action,
+ false);
+ /* stop idx0 */
+ /* This will be repeated for anything removed
+ * from the next registers, since their normal
+ * flow was also interrupted, and this function
+ * might not enter disjoint state e.g. if we
+ * don't actually do a hard stop on the head
+ * atom */
+ kbase_gpu_stop_atom(kbdev, js, katom_idx0,
+ action);
+ ret = true;
+ } else {
+ /* no atom in idx1 */
+ /* just stop idx0 */
+ kbase_gpu_stop_atom(kbdev, js, katom_idx0,
+ action);
+ ret = true;
+ }
+ }
+ } else if (katom_idx1_valid) {
+ if (katom_idx1->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) {
+ /* Mark for return */
+ /* idx1 will be returned once idx0 completes */
+ kbase_gpu_remove_atom(kbdev, katom_idx1, action,
+ false);
+ } else {
+ /* idx1 is on GPU */
+ if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+ JS_COMMAND_NEXT), NULL) == 0) {
+ /* idx0 has already completed - stop idx1 */
+ kbase_gpu_stop_atom(kbdev, js, katom_idx1,
+ action);
+ ret = true;
+ } else {
+ /* idx1 is in NEXT registers - attempt to
+ * remove */
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js,
+ JS_COMMAND_NEXT),
+ JS_COMMAND_NOP, NULL);
+
+ if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+ JS_HEAD_NEXT_LO), NULL) != 0 ||
+ kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+ JS_HEAD_NEXT_HI), NULL) != 0) {
+ /* idx1 removed successfully, will be
+ * handled in IRQ once idx0 completes */
+ kbase_job_evicted(katom_idx1);
+ kbase_gpu_remove_atom(kbdev, katom_idx1,
+ action,
+ false);
+ } else {
+ /* idx0 has already completed - stop
+ * idx1 */
+ kbase_gpu_stop_atom(kbdev, js,
+ katom_idx1,
+ action);
+ ret = true;
+ }
+ }
+ }
+ }
+
+
+ if (stop_x_dep_idx0 != -1)
+ kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx0,
+ NULL, action);
+
+ if (stop_x_dep_idx1 != -1)
+ kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx1,
+ NULL, action);
+
+ return ret;
+}
+
+void kbase_gpu_cacheclean(struct kbase_device *kbdev)
+{
+ /* Limit the number of loops to avoid a hang if the interrupt is missed
+ */
+ u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
+
+ mutex_lock(&kbdev->cacheclean_lock);
+
+ /* use GPU_COMMAND completion solution */
+ /* clean & invalidate the caches */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_CLEAN_INV_CACHES, NULL);
+
+ /* wait for cache flush to complete before continuing */
+ while (--max_loops &&
+ (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) &
+ CLEAN_CACHES_COMPLETED) == 0)
+ ;
+
+ /* clear the CLEAN_CACHES_COMPLETED irq */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u,
+ CLEAN_CACHES_COMPLETED);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR),
+ CLEAN_CACHES_COMPLETED, NULL);
+ KBASE_DEBUG_ASSERT_MSG(kbdev->hwcnt.backend.state !=
+ KBASE_INSTR_STATE_CLEANING,
+ "Instrumentation code was cleaning caches, but Job Management code cleared their IRQ - Instrumentation code will now hang.");
+
+ mutex_unlock(&kbdev->cacheclean_lock);
+}
+
+void kbase_backend_cacheclean(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ if (katom->need_cache_flush_cores_retained) {
+ unsigned long flags;
+
+ kbase_gpu_cacheclean(kbdev);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_pm_unrequest_cores(kbdev, false,
+ katom->need_cache_flush_cores_retained);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ katom->need_cache_flush_cores_retained = 0;
+ }
+}
+
+void kbase_backend_complete_wq(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ /*
+ * If cache flush required due to HW workaround then perform the flush
+ * now
+ */
+ kbase_backend_cacheclean(kbdev, katom);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10969) &&
+ (katom->core_req & BASE_JD_REQ_FS) &&
+ katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT &&
+ (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) &&
+ !(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)) {
+ dev_dbg(kbdev->dev, "Soft-stopped fragment shader job got a TILE_RANGE_FAULT. Possible HW issue, trying SW workaround\n");
+ if (kbasep_10969_workaround_clamp_coordinates(katom)) {
+ /* The job had a TILE_RANGE_FAULT after was soft-stopped
+ * Due to an HW issue we try to execute the job again.
+ */
+ dev_dbg(kbdev->dev,
+ "Clamping has been executed, try to rerun the job\n"
+ );
+ katom->event_code = BASE_JD_EVENT_STOPPED;
+ katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN;
+ }
+ }
+
+ /* Clear the coreref_state now - while check_deref_cores() may not have
+ * been called yet, the caller will have taken a copy of this field. If
+ * this is not done, then if the atom is re-scheduled (following a soft
+ * stop) then the core reference would not be retaken. */
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
+ katom->affinity = 0;
+}
+
+void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
+ base_jd_core_req core_req, u64 affinity,
+ enum kbase_atom_coreref_state coreref_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbasep_js_job_check_deref_cores_nokatom(kbdev, core_req, affinity,
+ coreref_state);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ if (!kbdev->pm.active_count) {
+ mutex_lock(&kbdev->js_data.runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+ kbase_pm_update_active(kbdev);
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&kbdev->js_data.runpool_mutex);
+ }
+}
+
+void kbase_gpu_dump_slots(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ int js;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ dev_info(kbdev->dev, "kbase_gpu_dump_slots:\n");
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ int idx;
+
+ for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
+ js,
+ idx);
+
+ if (katom)
+ dev_info(kbdev->dev,
+ " js%d idx%d : katom=%p gpu_rb_state=%d\n",
+ js, idx, katom, katom->gpu_rb_state);
+ else
+ dev_info(kbdev->dev, " js%d idx%d : empty\n",
+ js, idx);
+ }
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_rb.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_rb.h
new file mode 100644
index 000000000000..1e0e05ad3ea4
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_jm_rb.h
@@ -0,0 +1,76 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register-based HW access backend specific APIs
+ */
+
+#ifndef _KBASE_HWACCESS_GPU_H_
+#define _KBASE_HWACCESS_GPU_H_
+
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+/**
+ * kbase_gpu_irq_evict - Evict an atom from a NEXT slot
+ *
+ * @kbdev: Device pointer
+ * @js: Job slot to evict from
+ *
+ * Evict the atom in the NEXT slot for the specified job slot. This function is
+ * called from the job complete IRQ handler when the previous job has failed.
+ *
+ * Return: true if job evicted from NEXT registers, false otherwise
+ */
+bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_gpu_complete_hw - Complete an atom on job slot js
+ *
+ * @kbdev: Device pointer
+ * @js: Job slot that has completed
+ * @completion_code: Event code from job that has completed
+ * @job_tail: The tail address from the hardware if the job has partially
+ * completed
+ * @end_timestamp: Time of completion
+ */
+void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+ u32 completion_code,
+ u64 job_tail,
+ ktime_t *end_timestamp);
+
+/**
+ * kbase_gpu_inspect - Inspect the contents of the HW access ringbuffer
+ *
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ * @idx: Index into ringbuffer. 0 is the job currently running on
+ * the slot, 1 is the job waiting, all other values are invalid.
+ * Return: The atom at that position in the ringbuffer
+ * or NULL if no atom present
+ */
+struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js,
+ int idx);
+
+/**
+ * kbase_gpu_dump_slots - Print the contents of the slot ringbuffers
+ *
+ * @kbdev: Device pointer
+ */
+void kbase_gpu_dump_slots(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HWACCESS_GPU_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_affinity.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_affinity.c
new file mode 100644
index 000000000000..54d8ddd80097
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_affinity.c
@@ -0,0 +1,303 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Base kernel affinity manager APIs
+ */
+
+#include <mali_kbase.h>
+#include "mali_kbase_js_affinity.h"
+#include "mali_kbase_hw.h"
+
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+
+bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev,
+ int js)
+{
+ /*
+ * Here are the reasons for using job slot 2:
+ * - BASE_HW_ISSUE_8987 (which is entirely used for that purpose)
+ * - In absence of the above, then:
+ * - Atoms with BASE_JD_REQ_COHERENT_GROUP
+ * - But, only when there aren't contexts with
+ * KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES, because the atoms that run on
+ * all cores on slot 1 could be blocked by those using a coherent group
+ * on slot 2
+ * - And, only when you actually have 2 or more coregroups - if you
+ * only have 1 coregroup, then having jobs for slot 2 implies they'd
+ * also be for slot 1, meaning you'll get interference from them. Jobs
+ * able to run on slot 2 could also block jobs that can only run on
+ * slot 1 (tiler jobs)
+ */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+ return true;
+
+ if (js != 2)
+ return true;
+
+ /* Only deal with js==2 now: */
+ if (kbdev->gpu_props.num_core_groups > 1) {
+ /* Only use slot 2 in the 2+ coregroup case */
+ if (kbasep_js_ctx_attr_is_attr_on_runpool(kbdev,
+ KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES) ==
+ false) {
+ /* ...But only when we *don't* have atoms that run on
+ * all cores */
+
+ /* No specific check for BASE_JD_REQ_COHERENT_GROUP
+ * atoms - the policy will sort that out */
+ return true;
+ }
+ }
+
+ /* Above checks failed mean we shouldn't use slot 2 */
+ return false;
+}
+
+/*
+ * As long as it has been decided to have a deeper modification of
+ * what job scheduler, power manager and affinity manager will
+ * implement, this function is just an intermediate step that
+ * assumes:
+ * - all working cores will be powered on when this is called.
+ * - largest current configuration is 2 core groups.
+ * - It has been decided not to have hardcoded values so the low
+ * and high cores in a core split will be evently distributed.
+ * - Odd combinations of core requirements have been filtered out
+ * and do not get to this function (e.g. CS+T+NSS is not
+ * supported here).
+ * - This function is frequently called and can be optimized,
+ * (see notes in loops), but as the functionallity will likely
+ * be modified, optimization has not been addressed.
+*/
+bool kbase_js_choose_affinity(u64 * const affinity,
+ struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom, int js)
+{
+ base_jd_core_req core_req = katom->core_req;
+ unsigned int num_core_groups = kbdev->gpu_props.num_core_groups;
+ u64 core_availability_mask;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ core_availability_mask = kbase_pm_ca_get_core_mask(kbdev);
+
+ /*
+ * If no cores are currently available (core availability policy is
+ * transitioning) then fail.
+ */
+ if (0 == core_availability_mask) {
+ *affinity = 0;
+ return false;
+ }
+
+ KBASE_DEBUG_ASSERT(js >= 0);
+
+ if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) ==
+ BASE_JD_REQ_T) {
+ /* If the hardware supports XAFFINITY then we'll only enable
+ * the tiler (which is the default so this is a no-op),
+ * otherwise enable shader core 0. */
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
+ *affinity = 1;
+ else
+ *affinity = 0;
+
+ return true;
+ }
+
+ if (1 == kbdev->gpu_props.num_cores) {
+ /* trivial case only one core, nothing to do */
+ *affinity = core_availability_mask &
+ kbdev->pm.debug_core_mask[js];
+ } else {
+ if ((core_req & (BASE_JD_REQ_COHERENT_GROUP |
+ BASE_JD_REQ_SPECIFIC_COHERENT_GROUP))) {
+ if (js == 0 || num_core_groups == 1) {
+ /* js[0] and single-core-group systems just get
+ * the first core group */
+ *affinity =
+ kbdev->gpu_props.props.coherency_info.group[0].core_mask
+ & core_availability_mask &
+ kbdev->pm.debug_core_mask[js];
+ } else {
+ /* js[1], js[2] use core groups 0, 1 for
+ * dual-core-group systems */
+ u32 core_group_idx = ((u32) js) - 1;
+
+ KBASE_DEBUG_ASSERT(core_group_idx <
+ num_core_groups);
+ *affinity =
+ kbdev->gpu_props.props.coherency_info.group[core_group_idx].core_mask
+ & core_availability_mask &
+ kbdev->pm.debug_core_mask[js];
+
+ /* If the job is specifically targeting core
+ * group 1 and the core availability policy is
+ * keeping that core group off, then fail */
+ if (*affinity == 0 && core_group_idx == 1 &&
+ kbdev->pm.backend.cg1_disabled
+ == true)
+ katom->event_code =
+ BASE_JD_EVENT_PM_EVENT;
+ }
+ } else {
+ /* All cores are available when no core split is
+ * required */
+ *affinity = core_availability_mask &
+ kbdev->pm.debug_core_mask[js];
+ }
+ }
+
+ /*
+ * If no cores are currently available in the desired core group(s)
+ * (core availability policy is transitioning) then fail.
+ */
+ if (*affinity == 0)
+ return false;
+
+ /* Enable core 0 if tiler required for hardware without XAFFINITY
+ * support (notes above) */
+ if (core_req & BASE_JD_REQ_T) {
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
+ *affinity = *affinity | 1;
+ }
+
+ return true;
+}
+
+static inline bool kbase_js_affinity_is_violating(
+ struct kbase_device *kbdev,
+ u64 *affinities)
+{
+ /* This implementation checks whether the two slots involved in Generic
+ * thread creation have intersecting affinity. This is due to micro-
+ * architectural issues where a job in slot A targetting cores used by
+ * slot B could prevent the job in slot B from making progress until the
+ * job in slot A has completed.
+ */
+ u64 affinity_set_left;
+ u64 affinity_set_right;
+ u64 intersection;
+
+ KBASE_DEBUG_ASSERT(affinities != NULL);
+
+ affinity_set_left = affinities[1];
+
+ affinity_set_right = affinities[2];
+
+ /* A violation occurs when any bit in the left_set is also in the
+ * right_set */
+ intersection = affinity_set_left & affinity_set_right;
+
+ return (bool) (intersection != (u64) 0u);
+}
+
+bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js,
+ u64 affinity)
+{
+ struct kbasep_js_device_data *js_devdata;
+ u64 new_affinities[BASE_JM_MAX_NR_SLOTS];
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
+ js_devdata = &kbdev->js_data;
+
+ memcpy(new_affinities, js_devdata->runpool_irq.slot_affinities,
+ sizeof(js_devdata->runpool_irq.slot_affinities));
+
+ new_affinities[js] |= affinity;
+
+ return kbase_js_affinity_is_violating(kbdev, new_affinities);
+}
+
+void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js,
+ u64 affinity)
+{
+ struct kbasep_js_device_data *js_devdata;
+ u64 cores;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
+ js_devdata = &kbdev->js_data;
+
+ KBASE_DEBUG_ASSERT(kbase_js_affinity_would_violate(kbdev, js, affinity)
+ == false);
+
+ cores = affinity;
+ while (cores) {
+ int bitnum = fls64(cores) - 1;
+ u64 bit = 1ULL << bitnum;
+ s8 cnt;
+
+ cnt =
+ ++(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]);
+
+ if (cnt == 1)
+ js_devdata->runpool_irq.slot_affinities[js] |= bit;
+
+ cores &= ~bit;
+ }
+}
+
+void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js,
+ u64 affinity)
+{
+ struct kbasep_js_device_data *js_devdata;
+ u64 cores;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
+ js_devdata = &kbdev->js_data;
+
+ cores = affinity;
+ while (cores) {
+ int bitnum = fls64(cores) - 1;
+ u64 bit = 1ULL << bitnum;
+ s8 cnt;
+
+ KBASE_DEBUG_ASSERT(
+ js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum] > 0);
+
+ cnt =
+ --(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]);
+
+ if (0 == cnt)
+ js_devdata->runpool_irq.slot_affinities[js] &= ~bit;
+
+ cores &= ~bit;
+ }
+}
+
+#if KBASE_TRACE_ENABLE
+void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata;
+ int slot_nr;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+
+ for (slot_nr = 0; slot_nr < 3; ++slot_nr)
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_AFFINITY_CURRENT, NULL,
+ NULL, 0u, slot_nr,
+ (u32) js_devdata->runpool_irq.slot_affinities[slot_nr]);
+}
+#endif /* KBASE_TRACE_ENABLE */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_affinity.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_affinity.h
new file mode 100644
index 000000000000..35d9781ae092
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_affinity.h
@@ -0,0 +1,129 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Affinity Manager internal APIs.
+ */
+
+#ifndef _KBASE_JS_AFFINITY_H_
+#define _KBASE_JS_AFFINITY_H_
+
+/**
+ * kbase_js_can_run_job_on_slot_no_lock - Decide whether it is possible to
+ * submit a job to a particular job slot in the current status
+ *
+ * @kbdev: The kbase device structure of the device
+ * @js: Job slot number to check for allowance
+ *
+ * Will check if submitting to the given job slot is allowed in the current
+ * status. For example using job slot 2 while in soft-stoppable state and only
+ * having 1 coregroup is not allowed by the policy. This function should be
+ * called prior to submitting a job to a slot to make sure policy rules are not
+ * violated.
+ *
+ * The following locking conditions are made on the caller
+ * - it must hold hwaccess_lock
+ */
+bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_js_choose_affinity - Compute affinity for a given job.
+ *
+ * @affinity: Affinity bitmap computed
+ * @kbdev: The kbase device structure of the device
+ * @katom: Job chain of which affinity is going to be found
+ * @js: Slot the job chain is being submitted
+ *
+ * Currently assumes an all-on/all-off power management policy.
+ * Also assumes there is at least one core with tiler available.
+ *
+ * Returns true if a valid affinity was chosen, false if
+ * no cores were available.
+ */
+bool kbase_js_choose_affinity(u64 * const affinity,
+ struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom,
+ int js);
+
+/**
+ * kbase_js_affinity_would_violate - Determine whether a proposed affinity on
+ * job slot @js would cause a violation of affinity restrictions.
+ *
+ * @kbdev: Kbase device structure
+ * @js: The job slot to test
+ * @affinity: The affinity mask to test
+ *
+ * The following locks must be held by the caller
+ * - hwaccess_lock
+ *
+ * Return: true if the affinity would violate the restrictions
+ */
+bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js,
+ u64 affinity);
+
+/**
+ * kbase_js_affinity_retain_slot_cores - Affinity tracking: retain cores used by
+ * a slot
+ *
+ * @kbdev: Kbase device structure
+ * @js: The job slot retaining the cores
+ * @affinity: The cores to retain
+ *
+ * The following locks must be held by the caller
+ * - hwaccess_lock
+ */
+void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js,
+ u64 affinity);
+
+/**
+ * kbase_js_affinity_release_slot_cores - Affinity tracking: release cores used
+ * by a slot
+ *
+ * @kbdev: Kbase device structure
+ * @js: Job slot
+ * @affinity: Bit mask of core to be released
+ *
+ * Cores must be released as soon as a job is dequeued from a slot's 'submit
+ * slots', and before another job is submitted to those slots. Otherwise, the
+ * refcount could exceed the maximum number submittable to a slot,
+ * %BASE_JM_SUBMIT_SLOTS.
+ *
+ * The following locks must be held by the caller
+ * - hwaccess_lock
+ */
+void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js,
+ u64 affinity);
+
+/**
+ * kbase_js_debug_log_current_affinities - log the current affinities
+ *
+ * @kbdev: Kbase device structure
+ *
+ * Output to the Trace log the current tracked affinities on all slots
+ */
+#if KBASE_TRACE_ENABLE
+void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev);
+#else /* KBASE_TRACE_ENABLE */
+static inline void
+kbase_js_debug_log_current_affinities(struct kbase_device *kbdev)
+{
+}
+#endif /* KBASE_TRACE_ENABLE */
+
+#endif /* _KBASE_JS_AFFINITY_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_backend.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_backend.c
new file mode 100644
index 000000000000..a8c1af23a369
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_backend.c
@@ -0,0 +1,356 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register-based HW access backend specific job scheduler APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
+
+/*
+ * Define for when dumping is enabled.
+ * This should not be based on the instrumentation level as whether dumping is
+ * enabled for a particular level is down to the integrator. However this is
+ * being used for now as otherwise the cinstr headers would be needed.
+ */
+#define CINSTR_DUMPING_ENABLED (2 == MALI_INSTRUMENTATION_LEVEL)
+
+/*
+ * Hold the runpool_mutex for this
+ */
+static inline bool timer_callback_should_run(struct kbase_device *kbdev)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+ s8 nr_running_ctxs;
+
+ lockdep_assert_held(&kbdev->js_data.runpool_mutex);
+
+ /* Timer must stop if we are suspending */
+ if (backend->suspend_timer)
+ return false;
+
+ /* nr_contexts_pullable is updated with the runpool_mutex. However, the
+ * locking in the caller gives us a barrier that ensures
+ * nr_contexts_pullable is up-to-date for reading */
+ nr_running_ctxs = atomic_read(&kbdev->js_data.nr_contexts_runnable);
+
+#ifdef CONFIG_MALI_DEBUG
+ if (kbdev->js_data.softstop_always) {
+ /* Debug support for allowing soft-stop on a single context */
+ return true;
+ }
+#endif /* CONFIG_MALI_DEBUG */
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9435)) {
+ /* Timeouts would have to be 4x longer (due to micro-
+ * architectural design) to support OpenCL conformance tests, so
+ * only run the timer when there's:
+ * - 2 or more CL contexts
+ * - 1 or more GLES contexts
+ *
+ * NOTE: We will treat a context that has both Compute and Non-
+ * Compute jobs will be treated as an OpenCL context (hence, we
+ * don't check KBASEP_JS_CTX_ATTR_NON_COMPUTE).
+ */
+ {
+ s8 nr_compute_ctxs =
+ kbasep_js_ctx_attr_count_on_runpool(kbdev,
+ KBASEP_JS_CTX_ATTR_COMPUTE);
+ s8 nr_noncompute_ctxs = nr_running_ctxs -
+ nr_compute_ctxs;
+
+ return (bool) (nr_compute_ctxs >= 2 ||
+ nr_noncompute_ctxs > 0);
+ }
+ } else {
+ /* Run the timer callback whenever you have at least 1 context
+ */
+ return (bool) (nr_running_ctxs > 0);
+ }
+}
+
+static enum hrtimer_restart timer_callback(struct hrtimer *timer)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev;
+ struct kbasep_js_device_data *js_devdata;
+ struct kbase_backend_data *backend;
+ int s;
+ bool reset_needed = false;
+
+ KBASE_DEBUG_ASSERT(timer != NULL);
+
+ backend = container_of(timer, struct kbase_backend_data,
+ scheduling_timer);
+ kbdev = container_of(backend, struct kbase_device, hwaccess.backend);
+ js_devdata = &kbdev->js_data;
+
+ /* Loop through the slots */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ for (s = 0; s < kbdev->gpu_props.num_job_slots; s++) {
+ struct kbase_jd_atom *atom = NULL;
+
+ if (kbase_backend_nr_atoms_on_slot(kbdev, s) > 0) {
+ atom = kbase_gpu_inspect(kbdev, s, 0);
+ KBASE_DEBUG_ASSERT(atom != NULL);
+ }
+
+ if (atom != NULL) {
+ /* The current version of the model doesn't support
+ * Soft-Stop */
+ if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_5736)) {
+ u32 ticks = atom->ticks++;
+
+#if !CINSTR_DUMPING_ENABLED
+ u32 soft_stop_ticks, hard_stop_ticks,
+ gpu_reset_ticks;
+ if (atom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
+ soft_stop_ticks =
+ js_devdata->soft_stop_ticks_cl;
+ hard_stop_ticks =
+ js_devdata->hard_stop_ticks_cl;
+ gpu_reset_ticks =
+ js_devdata->gpu_reset_ticks_cl;
+ } else {
+ soft_stop_ticks =
+ js_devdata->soft_stop_ticks;
+ hard_stop_ticks =
+ js_devdata->hard_stop_ticks_ss;
+ gpu_reset_ticks =
+ js_devdata->gpu_reset_ticks_ss;
+ }
+
+ /* If timeouts have been changed then ensure
+ * that atom tick count is not greater than the
+ * new soft_stop timeout. This ensures that
+ * atoms do not miss any of the timeouts due to
+ * races between this worker and the thread
+ * changing the timeouts. */
+ if (backend->timeouts_updated &&
+ ticks > soft_stop_ticks)
+ ticks = atom->ticks = soft_stop_ticks;
+
+ /* Job is Soft-Stoppable */
+ if (ticks == soft_stop_ticks) {
+ int disjoint_threshold =
+ KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD;
+ u32 softstop_flags = 0u;
+ /* Job has been scheduled for at least
+ * js_devdata->soft_stop_ticks ticks.
+ * Soft stop the slot so we can run
+ * other jobs.
+ */
+ dev_dbg(kbdev->dev, "Soft-stop");
+#if !KBASE_DISABLE_SCHEDULING_SOFT_STOPS
+ /* nr_user_contexts_running is updated
+ * with the runpool_mutex, but we can't
+ * take that here.
+ *
+ * However, if it's about to be
+ * increased then the new context can't
+ * run any jobs until they take the
+ * hwaccess_lock, so it's OK to observe
+ * the older value.
+ *
+ * Similarly, if it's about to be
+ * decreased, the last job from another
+ * context has already finished, so it's
+ * not too bad that we observe the older
+ * value and register a disjoint event
+ * when we try soft-stopping */
+ if (js_devdata->nr_user_contexts_running
+ >= disjoint_threshold)
+ softstop_flags |=
+ JS_COMMAND_SW_CAUSES_DISJOINT;
+
+ kbase_job_slot_softstop_swflags(kbdev,
+ s, atom, softstop_flags);
+#endif
+ } else if (ticks == hard_stop_ticks) {
+ /* Job has been scheduled for at least
+ * js_devdata->hard_stop_ticks_ss ticks.
+ * It should have been soft-stopped by
+ * now. Hard stop the slot.
+ */
+#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
+ int ms =
+ js_devdata->scheduling_period_ns
+ / 1000000u;
+ dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)",
+ (unsigned long)ticks,
+ (unsigned long)ms);
+ kbase_job_slot_hardstop(atom->kctx, s,
+ atom);
+#endif
+ } else if (ticks == gpu_reset_ticks) {
+ /* Job has been scheduled for at least
+ * js_devdata->gpu_reset_ticks_ss ticks.
+ * It should have left the GPU by now.
+ * Signal that the GPU needs to be
+ * reset.
+ */
+ reset_needed = true;
+ }
+#else /* !CINSTR_DUMPING_ENABLED */
+ /* NOTE: During CINSTR_DUMPING_ENABLED, we use
+ * the alternate timeouts, which makes the hard-
+ * stop and GPU reset timeout much longer. We
+ * also ensure that we don't soft-stop at all.
+ */
+ if (ticks == js_devdata->soft_stop_ticks) {
+ /* Job has been scheduled for at least
+ * js_devdata->soft_stop_ticks. We do
+ * not soft-stop during
+ * CINSTR_DUMPING_ENABLED, however.
+ */
+ dev_dbg(kbdev->dev, "Soft-stop");
+ } else if (ticks ==
+ js_devdata->hard_stop_ticks_dumping) {
+ /* Job has been scheduled for at least
+ * js_devdata->hard_stop_ticks_dumping
+ * ticks. Hard stop the slot.
+ */
+#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
+ int ms =
+ js_devdata->scheduling_period_ns
+ / 1000000u;
+ dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)",
+ (unsigned long)ticks,
+ (unsigned long)ms);
+ kbase_job_slot_hardstop(atom->kctx, s,
+ atom);
+#endif
+ } else if (ticks ==
+ js_devdata->gpu_reset_ticks_dumping) {
+ /* Job has been scheduled for at least
+ * js_devdata->gpu_reset_ticks_dumping
+ * ticks. It should have left the GPU by
+ * now. Signal that the GPU needs to be
+ * reset.
+ */
+ reset_needed = true;
+ }
+#endif /* !CINSTR_DUMPING_ENABLED */
+ }
+ }
+ }
+#if KBASE_GPU_RESET_EN
+ if (reset_needed) {
+ dev_err(kbdev->dev, "JS: Job has been on the GPU for too long (JS_RESET_TICKS_SS/DUMPING timeout hit). Issueing GPU soft-reset to resolve.");
+
+ if (kbase_prepare_to_reset_gpu_locked(kbdev))
+ kbase_reset_gpu_locked(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+ /* the timer is re-issued if there is contexts in the run-pool */
+
+ if (backend->timer_running)
+ hrtimer_start(&backend->scheduling_timer,
+ HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
+ HRTIMER_MODE_REL);
+
+ backend->timeouts_updated = false;
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+void kbase_backend_ctx_count_changed(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+ unsigned long flags;
+
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+
+ if (!timer_callback_should_run(kbdev)) {
+ /* Take spinlock to force synchronisation with timer */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ backend->timer_running = false;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ /* From now on, return value of timer_callback_should_run() will
+ * also cause the timer to not requeue itself. Its return value
+ * cannot change, because it depends on variables updated with
+ * the runpool_mutex held, which the caller of this must also
+ * hold */
+ hrtimer_cancel(&backend->scheduling_timer);
+ }
+
+ if (timer_callback_should_run(kbdev) && !backend->timer_running) {
+ /* Take spinlock to force synchronisation with timer */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ backend->timer_running = true;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ hrtimer_start(&backend->scheduling_timer,
+ HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
+ HRTIMER_MODE_REL);
+
+ KBASE_TRACE_ADD(kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u,
+ 0u);
+ }
+}
+
+int kbase_backend_timer_init(struct kbase_device *kbdev)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+ hrtimer_init(&backend->scheduling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ backend->scheduling_timer.function = timer_callback;
+
+ backend->timer_running = false;
+
+ return 0;
+}
+
+void kbase_backend_timer_term(struct kbase_device *kbdev)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+ hrtimer_cancel(&backend->scheduling_timer);
+}
+
+void kbase_backend_timer_suspend(struct kbase_device *kbdev)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+ backend->suspend_timer = true;
+
+ kbase_backend_ctx_count_changed(kbdev);
+}
+
+void kbase_backend_timer_resume(struct kbase_device *kbdev)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+ backend->suspend_timer = false;
+
+ kbase_backend_ctx_count_changed(kbdev);
+}
+
+void kbase_backend_timeouts_changed(struct kbase_device *kbdev)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+ backend->timeouts_updated = true;
+}
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_internal.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_internal.h
new file mode 100644
index 000000000000..3f53779c6747
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_js_internal.h
@@ -0,0 +1,69 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register-based HW access backend specific job scheduler APIs
+ */
+
+#ifndef _KBASE_JS_BACKEND_H_
+#define _KBASE_JS_BACKEND_H_
+
+/**
+ * kbase_backend_timer_init() - Initialise the JS scheduling timer
+ * @kbdev: Device pointer
+ *
+ * This function should be called at driver initialisation
+ *
+ * Return: 0 on success
+ */
+int kbase_backend_timer_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timer_term() - Terminate the JS scheduling timer
+ * @kbdev: Device pointer
+ *
+ * This function should be called at driver termination
+ */
+void kbase_backend_timer_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timer_suspend - Suspend is happening, stop the JS scheduling
+ * timer
+ * @kbdev: Device pointer
+ *
+ * This function should be called on suspend, after the active count has reached
+ * zero. This is required as the timer may have been started on job submission
+ * to the job scheduler, but before jobs are submitted to the GPU.
+ *
+ * Caller must hold runpool_mutex.
+ */
+void kbase_backend_timer_suspend(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timer_resume - Resume is happening, re-evaluate the JS
+ * scheduling timer
+ * @kbdev: Device pointer
+ *
+ * This function should be called on resume. Note that is is not guaranteed to
+ * re-start the timer, only evalute whether it should be re-started.
+ *
+ * Caller must hold runpool_mutex.
+ */
+void kbase_backend_timer_resume(struct kbase_device *kbdev);
+
+#endif /* _KBASE_JS_BACKEND_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_mmu_hw_direct.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_mmu_hw_direct.c
new file mode 100644
index 000000000000..aa1817c8bca9
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_mmu_hw_direct.c
@@ -0,0 +1,401 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/bitops.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_mem.h>
+#include <mali_kbase_mmu_hw.h>
+#include <mali_kbase_tlstream.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <mali_kbase_as_fault_debugfs.h>
+
+static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn,
+ u32 num_pages)
+{
+ u64 region;
+
+ /* can't lock a zero sized range */
+ KBASE_DEBUG_ASSERT(num_pages);
+
+ region = pfn << PAGE_SHIFT;
+ /*
+ * fls returns (given the ASSERT above):
+ * 1 .. 32
+ *
+ * 10 + fls(num_pages)
+ * results in the range (11 .. 42)
+ */
+
+ /* gracefully handle num_pages being zero */
+ if (0 == num_pages) {
+ region |= 11;
+ } else {
+ u8 region_width;
+
+ region_width = 10 + fls(num_pages);
+ if (num_pages != (1ul << (region_width - 11))) {
+ /* not pow2, so must go up to the next pow2 */
+ region_width += 1;
+ }
+ KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE);
+ KBASE_DEBUG_ASSERT(region_width >= KBASE_LOCK_REGION_MIN_SIZE);
+ region |= region_width;
+ }
+
+ return region;
+}
+
+static int wait_ready(struct kbase_device *kbdev,
+ unsigned int as_nr, struct kbase_context *kctx)
+{
+ unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
+ u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
+
+ /* Wait for the MMU status to indicate there is no active command, in
+ * case one is pending. Do not log remaining register accesses. */
+ while (--max_loops && (val & AS_STATUS_AS_ACTIVE))
+ val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), NULL);
+
+ if (max_loops == 0) {
+ dev_err(kbdev->dev, "AS_ACTIVE bit stuck\n");
+ return -1;
+ }
+
+ /* If waiting in loop was performed, log last read value. */
+ if (KBASE_AS_INACTIVE_MAX_LOOPS - 1 > max_loops)
+ kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
+
+ return 0;
+}
+
+static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd,
+ struct kbase_context *kctx)
+{
+ int status;
+
+ /* write AS_COMMAND when MMU is ready to accept another command */
+ status = wait_ready(kbdev, as_nr, kctx);
+ if (status == 0)
+ kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd,
+ kctx);
+
+ return status;
+}
+
+static void validate_protected_page_fault(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ /* GPUs which support (native) protected mode shall not report page
+ * fault addresses unless it has protected debug mode and protected
+ * debug mode is turned on */
+ u32 protected_debug_mode = 0;
+
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE))
+ return;
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
+ protected_debug_mode = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_STATUS),
+ kctx) & GPU_DBGEN;
+ }
+
+ if (!protected_debug_mode) {
+ /* fault_addr should never be reported in protected mode.
+ * However, we just continue by printing an error message */
+ dev_err(kbdev->dev, "Fault address reported in protected mode\n");
+ }
+}
+
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
+{
+ const int num_as = 16;
+ const int busfault_shift = MMU_PAGE_FAULT_FLAGS;
+ const int pf_shift = 0;
+ const unsigned long as_bit_mask = (1UL << num_as) - 1;
+ unsigned long flags;
+ u32 new_mask;
+ u32 tmp;
+
+ /* bus faults */
+ u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask;
+ /* page faults (note: Ignore ASes with both pf and bf) */
+ u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+ /* remember current mask */
+ spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+ new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
+ /* mask interrupts for now */
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
+ spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+
+ while (bf_bits | pf_bits) {
+ struct kbase_as *as;
+ int as_no;
+ struct kbase_context *kctx;
+
+ /*
+ * the while logic ensures we have a bit set, no need to check
+ * for not-found here
+ */
+ as_no = ffs(bf_bits | pf_bits) - 1;
+ as = &kbdev->as[as_no];
+
+ /*
+ * Refcount the kctx ASAP - it shouldn't disappear anyway, since
+ * Bus/Page faults _should_ only occur whilst jobs are running,
+ * and a job causing the Bus/Page fault shouldn't complete until
+ * the MMU is updated
+ */
+ kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
+
+
+ /* find faulting address */
+ as->fault_addr = kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no,
+ AS_FAULTADDRESS_HI),
+ kctx);
+ as->fault_addr <<= 32;
+ as->fault_addr |= kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no,
+ AS_FAULTADDRESS_LO),
+ kctx);
+
+ /* Mark the fault protected or not */
+ as->protected_mode = kbdev->protected_mode;
+
+ if (kbdev->protected_mode && as->fault_addr)
+ {
+ /* check if address reporting is allowed */
+ validate_protected_page_fault(kbdev, kctx);
+ }
+
+ /* report the fault to debugfs */
+ kbase_as_fault_debugfs_new(kbdev, as_no);
+
+ /* record the fault status */
+ as->fault_status = kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no,
+ AS_FAULTSTATUS),
+ kctx);
+
+ /* find the fault type */
+ as->fault_type = (bf_bits & (1 << as_no)) ?
+ KBASE_MMU_FAULT_TYPE_BUS :
+ KBASE_MMU_FAULT_TYPE_PAGE;
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
+ as->fault_extra_addr = kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no, AS_FAULTEXTRA_HI),
+ kctx);
+ as->fault_extra_addr <<= 32;
+ as->fault_extra_addr |= kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no, AS_FAULTEXTRA_LO),
+ kctx);
+ }
+
+ if (kbase_as_has_bus_fault(as)) {
+ /* Mark bus fault as handled.
+ * Note that a bus fault is processed first in case
+ * where both a bus fault and page fault occur.
+ */
+ bf_bits &= ~(1UL << as_no);
+
+ /* remove the queued BF (and PF) from the mask */
+ new_mask &= ~(MMU_BUS_ERROR(as_no) |
+ MMU_PAGE_FAULT(as_no));
+ } else {
+ /* Mark page fault as handled */
+ pf_bits &= ~(1UL << as_no);
+
+ /* remove the queued PF from the mask */
+ new_mask &= ~MMU_PAGE_FAULT(as_no);
+ }
+
+ /* Process the interrupt for this address space */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_mmu_interrupt_process(kbdev, kctx, as);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+
+ /* reenable interrupts */
+ spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+ tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
+ new_mask |= tmp;
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask, NULL);
+ spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
+
+void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx)
+{
+ struct kbase_mmu_setup *current_setup = &as->current_setup;
+ u32 transcfg = 0;
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
+ transcfg = current_setup->transcfg & 0xFFFFFFFFUL;
+
+ /* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK */
+ /* Clear PTW_MEMATTR bits */
+ transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK;
+ /* Enable correct PTW_MEMATTR bits */
+ transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK;
+
+ if (kbdev->system_coherency == COHERENCY_ACE) {
+ /* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable) */
+ /* Clear PTW_SH bits */
+ transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK);
+ /* Enable correct PTW_SH bits */
+ transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
+ }
+
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO),
+ transcfg, kctx);
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI),
+ (current_setup->transcfg >> 32) & 0xFFFFFFFFUL,
+ kctx);
+ } else {
+ if (kbdev->system_coherency == COHERENCY_ACE)
+ current_setup->transtab |= AS_TRANSTAB_LPAE_SHARE_OUTER;
+ }
+
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
+ current_setup->transtab & 0xFFFFFFFFUL, kctx);
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI),
+ (current_setup->transtab >> 32) & 0xFFFFFFFFUL, kctx);
+
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO),
+ current_setup->memattr & 0xFFFFFFFFUL, kctx);
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
+ (current_setup->memattr >> 32) & 0xFFFFFFFFUL, kctx);
+
+ KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(as,
+ current_setup->transtab,
+ current_setup->memattr,
+ transcfg);
+
+ write_cmd(kbdev, as->number, AS_COMMAND_UPDATE, kctx);
+}
+
+int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, u64 vpfn, u32 nr, u32 op,
+ unsigned int handling_irq)
+{
+ int ret;
+
+ lockdep_assert_held(&kbdev->mmu_hw_mutex);
+
+ if (op == AS_COMMAND_UNLOCK) {
+ /* Unlock doesn't require a lock first */
+ ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
+ } else {
+ u64 lock_addr = lock_region(kbdev, vpfn, nr);
+
+ /* Lock the region that needs to be updated */
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO),
+ lock_addr & 0xFFFFFFFFUL, kctx);
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI),
+ (lock_addr >> 32) & 0xFFFFFFFFUL, kctx);
+ write_cmd(kbdev, as->number, AS_COMMAND_LOCK, kctx);
+
+ /* Run the MMU operation */
+ write_cmd(kbdev, as->number, op, kctx);
+
+ /* Wait for the flush to complete */
+ ret = wait_ready(kbdev, as->number, kctx);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) {
+ /* Issue an UNLOCK command to ensure that valid page
+ tables are re-read by the GPU after an update.
+ Note that, the FLUSH command should perform all the
+ actions necessary, however the bus logs show that if
+ multiple page faults occur within an 8 page region
+ the MMU does not always re-read the updated page
+ table entries for later faults or is only partially
+ read, it subsequently raises the page fault IRQ for
+ the same addresses, the unlock ensures that the MMU
+ cache is flushed, so updates can be re-read. As the
+ region is now unlocked we need to issue 2 UNLOCK
+ commands in order to flush the MMU/uTLB,
+ see PRLAM-8812.
+ */
+ write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
+ write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
+ }
+ }
+
+ return ret;
+}
+
+void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, enum kbase_mmu_fault_type type)
+{
+ unsigned long flags;
+ u32 pf_bf_mask;
+
+ spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+
+ /*
+ * A reset is in-flight and we're flushing the IRQ + bottom half
+ * so don't update anything as it could race with the reset code.
+ */
+ if (kbdev->irq_reset_flush)
+ goto unlock;
+
+ /* Clear the page (and bus fault IRQ as well in case one occurred) */
+ pf_bf_mask = MMU_PAGE_FAULT(as->number);
+ if (type == KBASE_MMU_FAULT_TYPE_BUS ||
+ type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
+ pf_bf_mask |= MMU_BUS_ERROR(as->number);
+
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask, kctx);
+
+unlock:
+ spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
+
+void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, enum kbase_mmu_fault_type type)
+{
+ unsigned long flags;
+ u32 irq_mask;
+
+ /* Enable the page fault IRQ (and bus fault IRQ as well in case one
+ * occurred) */
+ spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+
+ /*
+ * A reset is in-flight and we're flushing the IRQ + bottom half
+ * so don't update anything as it could race with the reset code.
+ */
+ if (kbdev->irq_reset_flush)
+ goto unlock;
+
+ irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx) |
+ MMU_PAGE_FAULT(as->number);
+
+ if (type == KBASE_MMU_FAULT_TYPE_BUS ||
+ type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
+ irq_mask |= MMU_BUS_ERROR(as->number);
+
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask, kctx);
+
+unlock:
+ spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_mmu_hw_direct.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_mmu_hw_direct.h
new file mode 100644
index 000000000000..c02253c6acc3
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_mmu_hw_direct.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Interface file for the direct implementation for MMU hardware access
+ *
+ * Direct MMU hardware interface
+ *
+ * This module provides the interface(s) that are required by the direct
+ * register access implementation of the MMU hardware interface
+ */
+
+#ifndef _MALI_KBASE_MMU_HW_DIRECT_H_
+#define _MALI_KBASE_MMU_HW_DIRECT_H_
+
+#include <mali_kbase_defs.h>
+
+/**
+ * kbase_mmu_interrupt - Process an MMU interrupt.
+ *
+ * Process the MMU interrupt that was reported by the &kbase_device.
+ *
+ * @kbdev: kbase context to clear the fault from.
+ * @irq_stat: Value of the MMU_IRQ_STATUS register
+ */
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
+
+#endif /* _MALI_KBASE_MMU_HW_DIRECT_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_always_on.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_always_on.c
new file mode 100644
index 000000000000..0614348e935a
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_always_on.c
@@ -0,0 +1,63 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * "Always on" power management policy
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+
+static u64 always_on_get_core_mask(struct kbase_device *kbdev)
+{
+ return kbdev->gpu_props.props.raw_props.shader_present;
+}
+
+static bool always_on_get_core_active(struct kbase_device *kbdev)
+{
+ return true;
+}
+
+static void always_on_init(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+static void always_on_term(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+/*
+ * The struct kbase_pm_policy structure for the demand power policy.
+ *
+ * This is the static structure that defines the demand power policy's callback
+ * and name.
+ */
+const struct kbase_pm_policy kbase_pm_always_on_policy_ops = {
+ "always_on", /* name */
+ always_on_init, /* init */
+ always_on_term, /* term */
+ always_on_get_core_mask, /* get_core_mask */
+ always_on_get_core_active, /* get_core_active */
+ 0u, /* flags */
+ KBASE_PM_POLICY_ID_ALWAYS_ON, /* id */
+};
+
+KBASE_EXPORT_TEST_API(kbase_pm_always_on_policy_ops);
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_always_on.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_always_on.h
new file mode 100644
index 000000000000..f9d244b01bc2
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_always_on.h
@@ -0,0 +1,77 @@
+
+/*
+ *
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * "Always on" power management policy
+ */
+
+#ifndef MALI_KBASE_PM_ALWAYS_ON_H
+#define MALI_KBASE_PM_ALWAYS_ON_H
+
+/**
+ * DOC:
+ * The "Always on" power management policy has the following
+ * characteristics:
+ *
+ * - When KBase indicates that the GPU will be powered up, but we don't yet
+ * know which Job Chains are to be run:
+ * All Shader Cores are powered up, regardless of whether or not they will
+ * be needed later.
+ *
+ * - When KBase indicates that a set of Shader Cores are needed to submit the
+ * currently queued Job Chains:
+ * All Shader Cores are kept powered, regardless of whether or not they will
+ * be needed
+ *
+ * - When KBase indicates that the GPU need not be powered:
+ * The Shader Cores are kept powered, regardless of whether or not they will
+ * be needed. The GPU itself is also kept powered, even though it is not
+ * needed.
+ *
+ * This policy is automatically overridden during system suspend: the desired
+ * core state is ignored, and the cores are forced off regardless of what the
+ * policy requests. After resuming from suspend, new changes to the desired
+ * core state made by the policy are honored.
+ *
+ * Note:
+ *
+ * - KBase indicates the GPU will be powered up when it has a User Process that
+ * has just started to submit Job Chains.
+ *
+ * - KBase indicates the GPU need not be powered when all the Job Chains from
+ * User Processes have finished, and it is waiting for a User Process to
+ * submit some more Job Chains.
+ */
+
+/**
+ * struct kbasep_pm_policy_always_on - Private struct for policy instance data
+ * @dummy: unused dummy variable
+ *
+ * This contains data that is private to the particular power policy that is
+ * active.
+ */
+struct kbasep_pm_policy_always_on {
+ int dummy;
+};
+
+extern const struct kbase_pm_policy kbase_pm_always_on_policy_ops;
+
+#endif /* MALI_KBASE_PM_ALWAYS_ON_H */
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_backend.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_backend.c
new file mode 100644
index 000000000000..c88b80a325dd
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_backend.c
@@ -0,0 +1,478 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * GPU backend implementation of base kernel power management APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_config_defaults.h>
+
+#include <mali_kbase_pm.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+
+static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
+
+void kbase_pm_register_access_enable(struct kbase_device *kbdev)
+{
+ struct kbase_pm_callback_conf *callbacks;
+
+ callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+
+ if (callbacks)
+ callbacks->power_on_callback(kbdev);
+
+ kbdev->pm.backend.gpu_powered = true;
+}
+
+void kbase_pm_register_access_disable(struct kbase_device *kbdev)
+{
+ struct kbase_pm_callback_conf *callbacks;
+
+ callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+
+ if (callbacks)
+ callbacks->power_off_callback(kbdev);
+
+ kbdev->pm.backend.gpu_powered = false;
+}
+
+int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
+{
+ int ret = 0;
+ struct kbase_pm_callback_conf *callbacks;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ mutex_init(&kbdev->pm.lock);
+
+ kbdev->pm.backend.gpu_poweroff_wait_wq = alloc_workqueue("kbase_pm_poweroff_wait",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ if (!kbdev->pm.backend.gpu_poweroff_wait_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work,
+ kbase_pm_gpu_poweroff_wait_wq);
+
+ kbdev->pm.backend.gpu_powered = false;
+ kbdev->pm.suspending = false;
+#ifdef CONFIG_MALI_DEBUG
+ kbdev->pm.backend.driver_ready_for_irqs = false;
+#endif /* CONFIG_MALI_DEBUG */
+ kbdev->pm.backend.gpu_in_desired_state = true;
+ init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
+
+ callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+ if (callbacks) {
+ kbdev->pm.backend.callback_power_on =
+ callbacks->power_on_callback;
+ kbdev->pm.backend.callback_power_off =
+ callbacks->power_off_callback;
+ kbdev->pm.backend.callback_power_suspend =
+ callbacks->power_suspend_callback;
+ kbdev->pm.backend.callback_power_resume =
+ callbacks->power_resume_callback;
+ kbdev->pm.callback_power_runtime_init =
+ callbacks->power_runtime_init_callback;
+ kbdev->pm.callback_power_runtime_term =
+ callbacks->power_runtime_term_callback;
+ kbdev->pm.backend.callback_power_runtime_on =
+ callbacks->power_runtime_on_callback;
+ kbdev->pm.backend.callback_power_runtime_off =
+ callbacks->power_runtime_off_callback;
+ kbdev->pm.backend.callback_power_runtime_idle =
+ callbacks->power_runtime_idle_callback;
+ } else {
+ kbdev->pm.backend.callback_power_on = NULL;
+ kbdev->pm.backend.callback_power_off = NULL;
+ kbdev->pm.backend.callback_power_suspend = NULL;
+ kbdev->pm.backend.callback_power_resume = NULL;
+ kbdev->pm.callback_power_runtime_init = NULL;
+ kbdev->pm.callback_power_runtime_term = NULL;
+ kbdev->pm.backend.callback_power_runtime_on = NULL;
+ kbdev->pm.backend.callback_power_runtime_off = NULL;
+ kbdev->pm.backend.callback_power_runtime_idle = NULL;
+ }
+
+ /* Initialise the metrics subsystem */
+ ret = kbasep_pm_metrics_init(kbdev);
+ if (ret)
+ return ret;
+
+ init_waitqueue_head(&kbdev->pm.backend.l2_powered_wait);
+ kbdev->pm.backend.l2_powered = 0;
+
+ init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
+ kbdev->pm.backend.reset_done = false;
+
+ init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
+ kbdev->pm.active_count = 0;
+
+ spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
+ spin_lock_init(&kbdev->pm.backend.gpu_powered_lock);
+
+ init_waitqueue_head(&kbdev->pm.backend.poweroff_wait);
+
+ if (kbase_pm_ca_init(kbdev) != 0)
+ goto workq_fail;
+
+ if (kbase_pm_policy_init(kbdev) != 0)
+ goto pm_policy_fail;
+
+ return 0;
+
+pm_policy_fail:
+ kbase_pm_ca_term(kbdev);
+workq_fail:
+ kbasep_pm_metrics_term(kbdev);
+ return -EINVAL;
+}
+
+void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
+{
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ /* Turn clocks and interrupts on - no-op if we haven't done a previous
+ * kbase_pm_clock_off() */
+ kbase_pm_clock_on(kbdev, is_resume);
+
+ /* Update core status as required by the policy */
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
+ kbase_pm_update_cores_state(kbdev);
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);
+
+ /* NOTE: We don't wait to reach the desired state, since running atoms
+ * will wait for that state to be reached anyway */
+}
+
+static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
+{
+ struct kbase_device *kbdev = container_of(data, struct kbase_device,
+ pm.backend.gpu_poweroff_wait_work);
+ struct kbase_pm_device_data *pm = &kbdev->pm;
+ struct kbase_pm_backend_data *backend = &pm->backend;
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ unsigned long flags;
+
+#if !PLATFORM_POWER_DOWN_ONLY
+ /* Wait for power transitions to complete. We do this with no locks held
+ * so that we don't deadlock with any pending workqueues */
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
+ kbase_pm_check_transitions_sync(kbdev);
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
+#endif /* !PLATFORM_POWER_DOWN_ONLY */
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+#if PLATFORM_POWER_DOWN_ONLY
+ if (kbdev->pm.backend.gpu_powered) {
+ if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2)) {
+ /* If L2 cache is powered then we must flush it before
+ * we power off the GPU. Normally this would have been
+ * handled when the L2 was powered off. */
+ kbase_gpu_cacheclean(kbdev);
+ }
+ }
+#endif /* PLATFORM_POWER_DOWN_ONLY */
+
+ if (!backend->poweron_required) {
+#if !PLATFORM_POWER_DOWN_ONLY
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ WARN_ON(kbdev->l2_available_bitmap ||
+ kbdev->shader_available_bitmap ||
+ kbdev->tiler_available_bitmap);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+#endif /* !PLATFORM_POWER_DOWN_ONLY */
+
+ /* Consume any change-state events */
+ kbase_timeline_pm_check_handle_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+
+ /* Disable interrupts and turn the clock off */
+ if (!kbase_pm_clock_off(kbdev, backend->poweroff_is_suspend)) {
+ /*
+ * Page/bus faults are pending, must drop locks to
+ * process. Interrupts are disabled so no more faults
+ * should be generated at this point.
+ */
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ kbase_flush_mmu_wqs(kbdev);
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+ /* Turn off clock now that fault have been handled. We
+ * dropped locks so poweron_required may have changed -
+ * power back on if this is the case.*/
+ if (backend->poweron_required)
+ kbase_pm_clock_on(kbdev, false);
+ else
+ WARN_ON(!kbase_pm_clock_off(kbdev,
+ backend->poweroff_is_suspend));
+ }
+ }
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ backend->poweroff_wait_in_progress = false;
+ if (backend->poweron_required) {
+ backend->poweron_required = false;
+ kbase_pm_update_cores_state_nolock(kbdev);
+ kbase_backend_slot_update(kbdev);
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ wake_up(&kbdev->pm.backend.poweroff_wait);
+}
+
+void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
+{
+ unsigned long flags;
+
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ if (!kbdev->pm.backend.poweroff_wait_in_progress) {
+ /* Force all cores off */
+ kbdev->pm.backend.desired_shader_state = 0;
+ kbdev->pm.backend.desired_tiler_state = 0;
+
+ /* Force all cores to be unavailable, in the situation where
+ * transitions are in progress for some cores but not others,
+ * and kbase_pm_check_transitions_nolock can not immediately
+ * power off the cores */
+ kbdev->shader_available_bitmap = 0;
+ kbdev->tiler_available_bitmap = 0;
+ kbdev->l2_available_bitmap = 0;
+
+ kbdev->pm.backend.poweroff_wait_in_progress = true;
+ kbdev->pm.backend.poweroff_is_suspend = is_suspend;
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ /*Kick off wq here. Callers will have to wait*/
+ queue_work(kbdev->pm.backend.gpu_poweroff_wait_wq,
+ &kbdev->pm.backend.gpu_poweroff_wait_work);
+ } else {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+}
+
+static bool is_poweroff_in_progress(struct kbase_device *kbdev)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ ret = (kbdev->pm.backend.poweroff_wait_in_progress == false);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return ret;
+}
+
+void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev)
+{
+ wait_event_killable(kbdev->pm.backend.poweroff_wait,
+ is_poweroff_in_progress(kbdev));
+}
+
+int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
+ unsigned int flags)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ unsigned long irq_flags;
+ int ret;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+ /* A suspend won't happen during startup/insmod */
+ KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+
+ /* Power up the GPU, don't enable IRQs as we are not ready to receive
+ * them. */
+ ret = kbase_pm_init_hw(kbdev, flags);
+ if (ret) {
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ return ret;
+ }
+
+ kbasep_pm_init_core_use_bitmaps(kbdev);
+
+ kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
+ kbdev->pm.debug_core_mask[1] =
+ kbdev->pm.debug_core_mask[2] =
+ kbdev->gpu_props.props.raw_props.shader_present;
+
+ /* Pretend the GPU is active to prevent a power policy turning the GPU
+ * cores off */
+ kbdev->pm.active_count = 1;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ irq_flags);
+ /* Ensure cycle counter is off */
+ kbdev->pm.backend.gpu_cycle_counter_requests = 0;
+ spin_unlock_irqrestore(
+ &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ irq_flags);
+
+ /* We are ready to receive IRQ's now as power policy is set up, so
+ * enable them now. */
+#ifdef CONFIG_MALI_DEBUG
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
+ kbdev->pm.backend.driver_ready_for_irqs = true;
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
+#endif
+ kbase_pm_enable_interrupts(kbdev);
+
+ /* Turn on the GPU and any cores needed by the policy */
+ kbase_pm_do_poweron(kbdev, false);
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ /* Idle the GPU and/or cores, if the policy wants it to */
+ kbase_pm_context_idle(kbdev);
+
+ return 0;
+}
+
+void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ mutex_lock(&kbdev->pm.lock);
+ kbase_pm_cancel_deferred_poweroff(kbdev);
+ kbase_pm_do_poweroff(kbdev, false);
+ mutex_unlock(&kbdev->pm.lock);
+}
+
+KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
+
+void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
+
+ /* Free any resources the policy allocated */
+ kbase_pm_policy_term(kbdev);
+ kbase_pm_ca_term(kbdev);
+
+ /* Shut down the metrics subsystem */
+ kbasep_pm_metrics_term(kbdev);
+
+ destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq);
+}
+
+void kbase_pm_power_changed(struct kbase_device *kbdev)
+{
+ bool cores_are_available;
+ unsigned long flags;
+
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
+
+ if (cores_are_available) {
+ /* Log timelining information that a change in state has
+ * completed */
+ kbase_timeline_pm_handle_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+
+ kbase_backend_slot_update(kbdev);
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
+ u64 new_core_mask_js0, u64 new_core_mask_js1,
+ u64 new_core_mask_js2)
+{
+ kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
+ kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
+ kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
+ kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
+ new_core_mask_js2;
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+}
+
+void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
+{
+ kbase_pm_update_active(kbdev);
+}
+
+void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
+{
+ kbase_pm_update_active(kbdev);
+}
+
+void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ /* Force power off the GPU and all cores (regardless of policy), only
+ * after the PM active count reaches zero (otherwise, we risk turning it
+ * off prematurely) */
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+ kbase_pm_cancel_deferred_poweroff(kbdev);
+ kbase_pm_do_poweroff(kbdev, true);
+
+ kbase_backend_timer_suspend(kbdev);
+
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ kbase_pm_wait_for_poweroff_complete(kbdev);
+}
+
+void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+ kbdev->pm.suspending = false;
+ kbase_pm_do_poweron(kbdev, true);
+
+ kbase_backend_timer_resume(kbdev);
+
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+}
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca.c
new file mode 100644
index 000000000000..85890f1e85f5
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca.c
@@ -0,0 +1,182 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Base kernel core availability APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+static const struct kbase_pm_ca_policy *const policy_list[] = {
+ &kbase_pm_ca_fixed_policy_ops,
+#ifdef CONFIG_MALI_DEVFREQ
+ &kbase_pm_ca_devfreq_policy_ops,
+#endif
+#if !MALI_CUSTOMER_RELEASE
+ &kbase_pm_ca_random_policy_ops
+#endif
+};
+
+/**
+ * POLICY_COUNT - The number of policies available in the system.
+ *
+ * This is derived from the number of functions listed in policy_list.
+ */
+#define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list))
+
+int kbase_pm_ca_init(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ kbdev->pm.backend.ca_current_policy = policy_list[0];
+
+ kbdev->pm.backend.ca_current_policy->init(kbdev);
+
+ return 0;
+}
+
+void kbase_pm_ca_term(struct kbase_device *kbdev)
+{
+ kbdev->pm.backend.ca_current_policy->term(kbdev);
+}
+
+int kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **list)
+{
+ if (!list)
+ return POLICY_COUNT;
+
+ *list = policy_list;
+
+ return POLICY_COUNT;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_ca_list_policies);
+
+const struct kbase_pm_ca_policy
+*kbase_pm_ca_get_policy(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ return kbdev->pm.backend.ca_current_policy;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_ca_get_policy);
+
+void kbase_pm_ca_set_policy(struct kbase_device *kbdev,
+ const struct kbase_pm_ca_policy *new_policy)
+{
+ const struct kbase_pm_ca_policy *old_policy;
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(new_policy != NULL);
+
+ KBASE_TRACE_ADD(kbdev, PM_CA_SET_POLICY, NULL, NULL, 0u,
+ new_policy->id);
+
+ /* During a policy change we pretend the GPU is active */
+ /* A suspend won't happen here, because we're in a syscall from a
+ * userspace thread */
+ kbase_pm_context_active(kbdev);
+
+ mutex_lock(&kbdev->pm.lock);
+
+ /* Remove the policy to prevent IRQ handlers from working on it */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ old_policy = kbdev->pm.backend.ca_current_policy;
+ kbdev->pm.backend.ca_current_policy = NULL;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ if (old_policy->term)
+ old_policy->term(kbdev);
+
+ if (new_policy->init)
+ new_policy->init(kbdev);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbdev->pm.backend.ca_current_policy = new_policy;
+
+ /* If any core power state changes were previously attempted, but
+ * couldn't be made because the policy was changing (current_policy was
+ * NULL), then re-try them here. */
+ kbase_pm_update_cores_state_nolock(kbdev);
+
+ kbdev->pm.backend.ca_current_policy->update_core_status(kbdev,
+ kbdev->shader_ready_bitmap,
+ kbdev->shader_transitioning_bitmap);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ mutex_unlock(&kbdev->pm.lock);
+
+ /* Now the policy change is finished, we release our fake context active
+ * reference */
+ kbase_pm_context_idle(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_ca_set_policy);
+
+u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* All cores must be enabled when instrumentation is in use */
+ if (kbdev->pm.backend.instr_enabled)
+ return kbdev->gpu_props.props.raw_props.shader_present &
+ kbdev->pm.debug_core_mask_all;
+
+ if (kbdev->pm.backend.ca_current_policy == NULL)
+ return kbdev->gpu_props.props.raw_props.shader_present &
+ kbdev->pm.debug_core_mask_all;
+
+ return kbdev->pm.backend.ca_current_policy->get_core_mask(kbdev) &
+ kbdev->pm.debug_core_mask_all;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_ca_get_core_mask);
+
+void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready,
+ u64 cores_transitioning)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (kbdev->pm.backend.ca_current_policy != NULL)
+ kbdev->pm.backend.ca_current_policy->update_core_status(kbdev,
+ cores_ready,
+ cores_transitioning);
+}
+
+void kbase_pm_ca_instr_enable(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbdev->pm.backend.instr_enabled = true;
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_pm_ca_instr_disable(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ kbdev->pm.backend.instr_enabled = false;
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+}
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca.h
new file mode 100644
index 000000000000..ee9e751f2d79
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca.h
@@ -0,0 +1,92 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Base kernel core availability APIs
+ */
+
+#ifndef _KBASE_PM_CA_H_
+#define _KBASE_PM_CA_H_
+
+/**
+ * kbase_pm_ca_init - Initialize core availability framework
+ *
+ * Must be called before calling any other core availability function
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: 0 if the core availability framework was successfully initialized,
+ * -errno otherwise
+ */
+int kbase_pm_ca_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_term - Terminate core availability framework
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_ca_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_get_core_mask - Get currently available shaders core mask
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Returns a mask of the currently available shader cores.
+ * Calls into the core availability policy
+ *
+ * Return: The bit mask of available cores
+ */
+u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_update_core_status - Update core status
+ *
+ * @kbdev: The kbase device structure for the device (must be
+ * a valid pointer)
+ * @cores_ready: The bit mask of cores ready for job submission
+ * @cores_transitioning: The bit mask of cores that are transitioning power
+ * state
+ *
+ * Update core availability policy with current core power status
+ *
+ * Calls into the core availability policy
+ */
+void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready,
+ u64 cores_transitioning);
+
+/**
+ * kbase_pm_ca_instr_enable - Enable override for instrumentation
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This overrides the output of the core availability policy, ensuring that all
+ * cores are available
+ */
+void kbase_pm_ca_instr_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_instr_disable - Disable override for instrumentation
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This disables any previously enabled override, and resumes normal policy
+ * functionality
+ */
+void kbase_pm_ca_instr_disable(struct kbase_device *kbdev);
+
+#endif /* _KBASE_PM_CA_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_devfreq.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_devfreq.c
new file mode 100644
index 000000000000..66bf660cffb6
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_devfreq.c
@@ -0,0 +1,129 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * A core availability policy implementing core mask selection from devfreq OPPs
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <linux/version.h>
+
+void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
+{
+ struct kbasep_pm_ca_policy_devfreq *data =
+ &kbdev->pm.backend.ca_policy_data.devfreq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ data->cores_desired = core_mask;
+
+ /* Disable any cores that are now unwanted */
+ data->cores_enabled &= data->cores_desired;
+
+ kbdev->pm.backend.ca_in_transition = true;
+
+ /* If there are no cores to be powered off then power on desired cores
+ */
+ if (!(data->cores_used & ~data->cores_desired)) {
+ data->cores_enabled = data->cores_desired;
+ kbdev->pm.backend.ca_in_transition = false;
+ }
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ dev_dbg(kbdev->dev, "Devfreq policy : new core mask=%llX %llX\n",
+ data->cores_desired, data->cores_enabled);
+}
+
+static void devfreq_init(struct kbase_device *kbdev)
+{
+ struct kbasep_pm_ca_policy_devfreq *data =
+ &kbdev->pm.backend.ca_policy_data.devfreq;
+
+ if (kbdev->current_core_mask) {
+ data->cores_enabled = kbdev->current_core_mask;
+ data->cores_desired = kbdev->current_core_mask;
+ } else {
+ data->cores_enabled =
+ kbdev->gpu_props.props.raw_props.shader_present;
+ data->cores_desired =
+ kbdev->gpu_props.props.raw_props.shader_present;
+ }
+ data->cores_used = 0;
+ kbdev->pm.backend.ca_in_transition = false;
+}
+
+static void devfreq_term(struct kbase_device *kbdev)
+{
+}
+
+static u64 devfreq_get_core_mask(struct kbase_device *kbdev)
+{
+ return kbdev->pm.backend.ca_policy_data.devfreq.cores_enabled;
+}
+
+static void devfreq_update_core_status(struct kbase_device *kbdev,
+ u64 cores_ready,
+ u64 cores_transitioning)
+{
+ struct kbasep_pm_ca_policy_devfreq *data =
+ &kbdev->pm.backend.ca_policy_data.devfreq;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ data->cores_used = cores_ready | cores_transitioning;
+
+ /* If in desired state then clear transition flag */
+ if (data->cores_enabled == data->cores_desired)
+ kbdev->pm.backend.ca_in_transition = false;
+
+ /* If all undesired cores are now off then power on desired cores.
+ * The direct comparison against cores_enabled limits potential
+ * recursion to one level */
+ if (!(data->cores_used & ~data->cores_desired) &&
+ data->cores_enabled != data->cores_desired) {
+ data->cores_enabled = data->cores_desired;
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+
+ kbdev->pm.backend.ca_in_transition = false;
+ }
+}
+
+/*
+ * The struct kbase_pm_ca_policy structure for the devfreq core availability
+ * policy.
+ *
+ * This is the static structure that defines the devfreq core availability power
+ * policy's callback and name.
+ */
+const struct kbase_pm_ca_policy kbase_pm_ca_devfreq_policy_ops = {
+ "devfreq", /* name */
+ devfreq_init, /* init */
+ devfreq_term, /* term */
+ devfreq_get_core_mask, /* get_core_mask */
+ devfreq_update_core_status, /* update_core_status */
+ 0u, /* flags */
+ KBASE_PM_CA_POLICY_ID_DEVFREQ, /* id */
+};
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_devfreq.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_devfreq.h
new file mode 100644
index 000000000000..7ab3cd4d8460
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_devfreq.h
@@ -0,0 +1,55 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * A core availability policy for use with devfreq, where core masks are
+ * associated with OPPs.
+ */
+
+#ifndef MALI_KBASE_PM_CA_DEVFREQ_H
+#define MALI_KBASE_PM_CA_DEVFREQ_H
+
+/**
+ * struct kbasep_pm_ca_policy_devfreq - Private structure for devfreq ca policy
+ *
+ * This contains data that is private to the devfreq core availability
+ * policy.
+ *
+ * @cores_desired: Cores that the policy wants to be available
+ * @cores_enabled: Cores that the policy is currently returning as available
+ * @cores_used: Cores currently powered or transitioning
+ */
+struct kbasep_pm_ca_policy_devfreq {
+ u64 cores_desired;
+ u64 cores_enabled;
+ u64 cores_used;
+};
+
+extern const struct kbase_pm_ca_policy kbase_pm_ca_devfreq_policy_ops;
+
+/**
+ * kbase_devfreq_set_core_mask - Set core mask for policy to use
+ * @kbdev: Device pointer
+ * @core_mask: New core mask
+ *
+ * The new core mask will have immediate effect if the GPU is powered, or will
+ * take effect when it is next powered on.
+ */
+void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask);
+
+#endif /* MALI_KBASE_PM_CA_DEVFREQ_H */
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_fixed.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_fixed.c
new file mode 100644
index 000000000000..864612d31f9b
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_fixed.c
@@ -0,0 +1,65 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * A power policy implementing fixed core availability
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+
+static void fixed_init(struct kbase_device *kbdev)
+{
+ kbdev->pm.backend.ca_in_transition = false;
+}
+
+static void fixed_term(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+static u64 fixed_get_core_mask(struct kbase_device *kbdev)
+{
+ return kbdev->gpu_props.props.raw_props.shader_present;
+}
+
+static void fixed_update_core_status(struct kbase_device *kbdev,
+ u64 cores_ready,
+ u64 cores_transitioning)
+{
+ CSTD_UNUSED(kbdev);
+ CSTD_UNUSED(cores_ready);
+ CSTD_UNUSED(cores_transitioning);
+}
+
+/*
+ * The struct kbase_pm_policy structure for the fixed power policy.
+ *
+ * This is the static structure that defines the fixed power policy's callback
+ * and name.
+ */
+const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops = {
+ "fixed", /* name */
+ fixed_init, /* init */
+ fixed_term, /* term */
+ fixed_get_core_mask, /* get_core_mask */
+ fixed_update_core_status, /* update_core_status */
+ 0u, /* flags */
+ KBASE_PM_CA_POLICY_ID_FIXED, /* id */
+};
+
+KBASE_EXPORT_TEST_API(kbase_pm_ca_fixed_policy_ops);
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_fixed.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_fixed.h
new file mode 100644
index 000000000000..a763155cb703
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_ca_fixed.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * A power policy implementing fixed core availability
+ */
+
+#ifndef MALI_KBASE_PM_CA_FIXED_H
+#define MALI_KBASE_PM_CA_FIXED_H
+
+/**
+ * struct kbasep_pm_ca_policy_fixed - Private structure for policy instance data
+ *
+ * @dummy: Dummy member - no state is needed
+ *
+ * This contains data that is private to the particular power policy that is
+ * active.
+ */
+struct kbasep_pm_ca_policy_fixed {
+ int dummy;
+};
+
+extern const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops;
+
+#endif /* MALI_KBASE_PM_CA_FIXED_H */
+
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_coarse_demand.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_coarse_demand.c
new file mode 100644
index 000000000000..f891fa225a89
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_coarse_demand.c
@@ -0,0 +1,70 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * "Coarse Demand" power management policy
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+
+static u64 coarse_demand_get_core_mask(struct kbase_device *kbdev)
+{
+ if (kbdev->pm.active_count == 0)
+ return 0;
+
+ return kbdev->gpu_props.props.raw_props.shader_present;
+}
+
+static bool coarse_demand_get_core_active(struct kbase_device *kbdev)
+{
+ if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap |
+ kbdev->shader_inuse_bitmap) && !kbdev->tiler_needed_cnt
+ && !kbdev->tiler_inuse_cnt)
+ return false;
+
+ return true;
+}
+
+static void coarse_demand_init(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+static void coarse_demand_term(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+/* The struct kbase_pm_policy structure for the demand power policy.
+ *
+ * This is the static structure that defines the demand power policy's callback
+ * and name.
+ */
+const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops = {
+ "coarse_demand", /* name */
+ coarse_demand_init, /* init */
+ coarse_demand_term, /* term */
+ coarse_demand_get_core_mask, /* get_core_mask */
+ coarse_demand_get_core_active, /* get_core_active */
+ 0u, /* flags */
+ KBASE_PM_POLICY_ID_COARSE_DEMAND, /* id */
+};
+
+KBASE_EXPORT_TEST_API(kbase_pm_coarse_demand_policy_ops);
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_coarse_demand.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_coarse_demand.h
new file mode 100644
index 000000000000..749d305eee9a
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_coarse_demand.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * "Coarse Demand" power management policy
+ */
+
+#ifndef MALI_KBASE_PM_COARSE_DEMAND_H
+#define MALI_KBASE_PM_COARSE_DEMAND_H
+
+/**
+ * DOC:
+ * The "Coarse" demand power management policy has the following
+ * characteristics:
+ * - When KBase indicates that the GPU will be powered up, but we don't yet
+ * know which Job Chains are to be run:
+ * - All Shader Cores are powered up, regardless of whether or not they will
+ * be needed later.
+ * - When KBase indicates that a set of Shader Cores are needed to submit the
+ * currently queued Job Chains:
+ * - All Shader Cores are kept powered, regardless of whether or not they will
+ * be needed
+ * - When KBase indicates that the GPU need not be powered:
+ * - The Shader Cores are powered off, and the GPU itself is powered off too.
+ *
+ * @note:
+ * - KBase indicates the GPU will be powered up when it has a User Process that
+ * has just started to submit Job Chains.
+ * - KBase indicates the GPU need not be powered when all the Job Chains from
+ * User Processes have finished, and it is waiting for a User Process to
+ * submit some more Job Chains.
+ */
+
+/**
+ * struct kbasep_pm_policy_coarse_demand - Private structure for coarse demand
+ * policy
+ *
+ * This contains data that is private to the coarse demand power policy.
+ *
+ * @dummy: Dummy member - no state needed
+ */
+struct kbasep_pm_policy_coarse_demand {
+ int dummy;
+};
+
+extern const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops;
+
+#endif /* MALI_KBASE_PM_COARSE_DEMAND_H */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_defs.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_defs.h
new file mode 100644
index 000000000000..352744ee6d73
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_defs.h
@@ -0,0 +1,519 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Backend-specific Power Manager definitions
+ */
+
+#ifndef _KBASE_PM_HWACCESS_DEFS_H_
+#define _KBASE_PM_HWACCESS_DEFS_H_
+
+#include "mali_kbase_pm_ca_fixed.h"
+#include "mali_kbase_pm_ca_devfreq.h"
+#if !MALI_CUSTOMER_RELEASE
+#include "mali_kbase_pm_ca_random.h"
+#endif
+
+#include "mali_kbase_pm_always_on.h"
+#include "mali_kbase_pm_coarse_demand.h"
+#include "mali_kbase_pm_demand.h"
+#if !MALI_CUSTOMER_RELEASE
+#include "mali_kbase_pm_demand_always_powered.h"
+#include "mali_kbase_pm_fast_start.h"
+#endif
+
+/* Forward definition - see mali_kbase.h */
+struct kbase_device;
+struct kbase_jd_atom;
+
+/**
+ * enum kbase_pm_core_type - The types of core in a GPU.
+ *
+ * These enumerated values are used in calls to
+ * - kbase_pm_get_present_cores()
+ * - kbase_pm_get_active_cores()
+ * - kbase_pm_get_trans_cores()
+ * - kbase_pm_get_ready_cores().
+ *
+ * They specify which type of core should be acted on. These values are set in
+ * a manner that allows core_type_to_reg() function to be simpler and more
+ * efficient.
+ *
+ * @KBASE_PM_CORE_L2: The L2 cache
+ * @KBASE_PM_CORE_SHADER: Shader cores
+ * @KBASE_PM_CORE_TILER: Tiler cores
+ * @KBASE_PM_CORE_STACK: Core stacks
+ */
+enum kbase_pm_core_type {
+ KBASE_PM_CORE_L2 = L2_PRESENT_LO,
+ KBASE_PM_CORE_SHADER = SHADER_PRESENT_LO,
+ KBASE_PM_CORE_TILER = TILER_PRESENT_LO,
+ KBASE_PM_CORE_STACK = STACK_PRESENT_LO
+};
+
+/**
+ * struct kbasep_pm_metrics_data - Metrics data collected for use by the power
+ * management framework.
+ *
+ * @time_period_start: time at which busy/idle measurements started
+ * @time_busy: number of ns the GPU was busy executing jobs since the
+ * @time_period_start timestamp.
+ * @time_idle: number of ns since time_period_start the GPU was not executing
+ * jobs since the @time_period_start timestamp.
+ * @prev_busy: busy time in ns of previous time period.
+ * Updated when metrics are reset.
+ * @prev_idle: idle time in ns of previous time period
+ * Updated when metrics are reset.
+ * @gpu_active: true when the GPU is executing jobs. false when
+ * not. Updated when the job scheduler informs us a job in submitted
+ * or removed from a GPU slot.
+ * @busy_cl: number of ns the GPU was busy executing CL jobs. Note that
+ * if two CL jobs were active for 400ns, this value would be updated
+ * with 800.
+ * @busy_gl: number of ns the GPU was busy executing GL jobs. Note that
+ * if two GL jobs were active for 400ns, this value would be updated
+ * with 800.
+ * @active_cl_ctx: number of CL jobs active on the GPU. Array is per-device.
+ * @active_gl_ctx: number of GL jobs active on the GPU. Array is per-slot. As
+ * GL jobs never run on slot 2 this slot is not recorded.
+ * @lock: spinlock protecting the kbasep_pm_metrics_data structure
+ * @timer: timer to regularly make DVFS decisions based on the power
+ * management metrics.
+ * @timer_active: boolean indicating @timer is running
+ * @platform_data: pointer to data controlled by platform specific code
+ * @kbdev: pointer to kbase device for which metrics are collected
+ *
+ */
+struct kbasep_pm_metrics_data {
+ ktime_t time_period_start;
+ u32 time_busy;
+ u32 time_idle;
+ u32 prev_busy;
+ u32 prev_idle;
+ bool gpu_active;
+ u32 busy_cl[2];
+ u32 busy_gl;
+ u32 active_cl_ctx[2];
+ u32 active_gl_ctx[2]; /* GL jobs can only run on 2 of the 3 job slots */
+ spinlock_t lock;
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ struct hrtimer timer;
+ bool timer_active;
+#endif
+
+ void *platform_data;
+ struct kbase_device *kbdev;
+};
+
+union kbase_pm_policy_data {
+ struct kbasep_pm_policy_always_on always_on;
+ struct kbasep_pm_policy_coarse_demand coarse_demand;
+ struct kbasep_pm_policy_demand demand;
+#if !MALI_CUSTOMER_RELEASE
+ struct kbasep_pm_policy_demand_always_powered demand_always_powered;
+ struct kbasep_pm_policy_fast_start fast_start;
+#endif
+};
+
+union kbase_pm_ca_policy_data {
+ struct kbasep_pm_ca_policy_fixed fixed;
+ struct kbasep_pm_ca_policy_devfreq devfreq;
+#if !MALI_CUSTOMER_RELEASE
+ struct kbasep_pm_ca_policy_random random;
+#endif
+};
+
+/**
+ * struct kbase_pm_backend_data - Data stored per device for power management.
+ *
+ * This structure contains data for the power management framework. There is one
+ * instance of this structure per device in the system.
+ *
+ * @ca_current_policy: The policy that is currently actively controlling core
+ * availability.
+ * @pm_current_policy: The policy that is currently actively controlling the
+ * power state.
+ * @ca_policy_data: Private data for current CA policy
+ * @pm_policy_data: Private data for current PM policy
+ * @ca_in_transition: Flag indicating when core availability policy is
+ * transitioning cores. The core availability policy must
+ * set this when a change in core availability is occurring.
+ * power_change_lock must be held when accessing this.
+ * @reset_done: Flag when a reset is complete
+ * @reset_done_wait: Wait queue to wait for changes to @reset_done
+ * @l2_powered_wait: Wait queue for whether the l2 cache has been powered as
+ * requested
+ * @l2_powered: State indicating whether all the l2 caches are powered.
+ * Non-zero indicates they're *all* powered
+ * Zero indicates that some (or all) are not powered
+ * @gpu_cycle_counter_requests: The reference count of active gpu cycle counter
+ * users
+ * @gpu_cycle_counter_requests_lock: Lock to protect @gpu_cycle_counter_requests
+ * @desired_shader_state: A bit mask identifying the shader cores that the
+ * power policy would like to be on. The current state
+ * of the cores may be different, but there should be
+ * transitions in progress that will eventually achieve
+ * this state (assuming that the policy doesn't change
+ * its mind in the mean time).
+ * @powering_on_shader_state: A bit mask indicating which shader cores are
+ * currently in a power-on transition
+ * @desired_tiler_state: A bit mask identifying the tiler cores that the power
+ * policy would like to be on. See @desired_shader_state
+ * @powering_on_tiler_state: A bit mask indicating which tiler core are
+ * currently in a power-on transition
+ * @powering_on_l2_state: A bit mask indicating which l2-caches are currently
+ * in a power-on transition
+ * @powering_on_stack_state: A bit mask indicating which core stacks are
+ * currently in a power-on transition
+ * @gpu_in_desired_state: This flag is set if the GPU is powered as requested
+ * by the desired_xxx_state variables
+ * @gpu_in_desired_state_wait: Wait queue set when @gpu_in_desired_state != 0
+ * @gpu_powered: Set to true when the GPU is powered and register
+ * accesses are possible, false otherwise
+ * @instr_enabled: Set to true when instrumentation is enabled,
+ * false otherwise
+ * @cg1_disabled: Set if the policy wants to keep the second core group
+ * powered off
+ * @driver_ready_for_irqs: Debug state indicating whether sufficient
+ * initialization of the driver has occurred to handle
+ * IRQs
+ * @gpu_powered_lock: Spinlock that must be held when writing @gpu_powered or
+ * accessing @driver_ready_for_irqs
+ * @metrics: Structure to hold metrics for the GPU
+ * @gpu_poweroff_pending: number of poweroff timer ticks until the GPU is
+ * powered off
+ * @shader_poweroff_pending_time: number of poweroff timer ticks until shaders
+ * and/or timers are powered off
+ * @gpu_poweroff_timer: Timer for powering off GPU
+ * @gpu_poweroff_wq: Workqueue to power off GPU on when timer fires
+ * @gpu_poweroff_work: Workitem used on @gpu_poweroff_wq
+ * @shader_poweroff_pending: Bit mask of shaders to be powered off on next
+ * timer callback
+ * @tiler_poweroff_pending: Bit mask of tilers to be powered off on next timer
+ * callback
+ * @poweroff_timer_needed: true if the poweroff timer is currently required,
+ * false otherwise
+ * @poweroff_timer_running: true if the poweroff timer is currently running,
+ * false otherwise
+ * power_change_lock should be held when accessing,
+ * unless there is no way the timer can be running (eg
+ * hrtimer_cancel() was called immediately before)
+ * @poweroff_wait_in_progress: true if a wait for GPU power off is in progress.
+ * hwaccess_lock must be held when accessing
+ * @poweron_required: true if a GPU power on is required. Should only be set
+ * when poweroff_wait_in_progress is true, and therefore the
+ * GPU can not immediately be powered on. pm.lock must be
+ * held when accessing
+ * @poweroff_is_suspend: true if the GPU is being powered off due to a suspend
+ * request. pm.lock must be held when accessing
+ * @gpu_poweroff_wait_wq: workqueue for waiting for GPU to power off
+ * @gpu_poweroff_wait_work: work item for use with @gpu_poweroff_wait_wq
+ * @poweroff_wait: waitqueue for waiting for @gpu_poweroff_wait_work to complete
+ * @callback_power_on: Callback when the GPU needs to be turned on. See
+ * &struct kbase_pm_callback_conf
+ * @callback_power_off: Callback when the GPU may be turned off. See
+ * &struct kbase_pm_callback_conf
+ * @callback_power_suspend: Callback when a suspend occurs and the GPU needs to
+ * be turned off. See &struct kbase_pm_callback_conf
+ * @callback_power_resume: Callback when a resume occurs and the GPU needs to
+ * be turned on. See &struct kbase_pm_callback_conf
+ * @callback_power_runtime_on: Callback when the GPU needs to be turned on. See
+ * &struct kbase_pm_callback_conf
+ * @callback_power_runtime_off: Callback when the GPU may be turned off. See
+ * &struct kbase_pm_callback_conf
+ * @callback_power_runtime_idle: Optional callback when the GPU may be idle. See
+ * &struct kbase_pm_callback_conf
+ *
+ * Note:
+ * During an IRQ, @ca_current_policy or @pm_current_policy can be NULL when the
+ * policy is being changed with kbase_pm_ca_set_policy() or
+ * kbase_pm_set_policy(). The change is protected under
+ * kbase_device.pm.power_change_lock. Direct access to this
+ * from IRQ context must therefore check for NULL. If NULL, then
+ * kbase_pm_ca_set_policy() or kbase_pm_set_policy() will re-issue the policy
+ * functions that would have been done under IRQ.
+ */
+struct kbase_pm_backend_data {
+ const struct kbase_pm_ca_policy *ca_current_policy;
+ const struct kbase_pm_policy *pm_current_policy;
+ union kbase_pm_ca_policy_data ca_policy_data;
+ union kbase_pm_policy_data pm_policy_data;
+ bool ca_in_transition;
+ bool reset_done;
+ wait_queue_head_t reset_done_wait;
+ wait_queue_head_t l2_powered_wait;
+ int l2_powered;
+ int gpu_cycle_counter_requests;
+ spinlock_t gpu_cycle_counter_requests_lock;
+
+ u64 desired_shader_state;
+ u64 powering_on_shader_state;
+ u64 desired_tiler_state;
+ u64 powering_on_tiler_state;
+ u64 powering_on_l2_state;
+#ifdef CONFIG_MALI_CORESTACK
+ u64 powering_on_stack_state;
+#endif /* CONFIG_MALI_CORESTACK */
+
+ bool gpu_in_desired_state;
+ wait_queue_head_t gpu_in_desired_state_wait;
+
+ bool gpu_powered;
+
+ bool instr_enabled;
+
+ bool cg1_disabled;
+
+#ifdef CONFIG_MALI_DEBUG
+ bool driver_ready_for_irqs;
+#endif /* CONFIG_MALI_DEBUG */
+
+ spinlock_t gpu_powered_lock;
+
+
+ struct kbasep_pm_metrics_data metrics;
+
+ int gpu_poweroff_pending;
+ int shader_poweroff_pending_time;
+
+ struct hrtimer gpu_poweroff_timer;
+ struct workqueue_struct *gpu_poweroff_wq;
+ struct work_struct gpu_poweroff_work;
+
+ u64 shader_poweroff_pending;
+ u64 tiler_poweroff_pending;
+
+ bool poweroff_timer_needed;
+ bool poweroff_timer_running;
+
+ bool poweroff_wait_in_progress;
+ bool poweron_required;
+ bool poweroff_is_suspend;
+
+ struct workqueue_struct *gpu_poweroff_wait_wq;
+ struct work_struct gpu_poweroff_wait_work;
+
+ wait_queue_head_t poweroff_wait;
+
+ int (*callback_power_on)(struct kbase_device *kbdev);
+ void (*callback_power_off)(struct kbase_device *kbdev);
+ void (*callback_power_suspend)(struct kbase_device *kbdev);
+ void (*callback_power_resume)(struct kbase_device *kbdev);
+ int (*callback_power_runtime_on)(struct kbase_device *kbdev);
+ void (*callback_power_runtime_off)(struct kbase_device *kbdev);
+ int (*callback_power_runtime_idle)(struct kbase_device *kbdev);
+};
+
+
+/* List of policy IDs */
+enum kbase_pm_policy_id {
+ KBASE_PM_POLICY_ID_DEMAND = 1,
+ KBASE_PM_POLICY_ID_ALWAYS_ON,
+ KBASE_PM_POLICY_ID_COARSE_DEMAND,
+#if !MALI_CUSTOMER_RELEASE
+ KBASE_PM_POLICY_ID_DEMAND_ALWAYS_POWERED,
+ KBASE_PM_POLICY_ID_FAST_START
+#endif
+};
+
+typedef u32 kbase_pm_policy_flags;
+
+/**
+ * struct kbase_pm_policy - Power policy structure.
+ *
+ * Each power policy exposes a (static) instance of this structure which
+ * contains function pointers to the policy's methods.
+ *
+ * @name: The name of this policy
+ * @init: Function called when the policy is selected
+ * @term: Function called when the policy is unselected
+ * @get_core_mask: Function called to get the current shader core mask
+ * @get_core_active: Function called to get the current overall GPU power
+ * state
+ * @flags: Field indicating flags for this policy
+ * @id: Field indicating an ID for this policy. This is not
+ * necessarily the same as its index in the list returned
+ * by kbase_pm_list_policies().
+ * It is used purely for debugging.
+ */
+struct kbase_pm_policy {
+ char *name;
+
+ /**
+ * Function called when the policy is selected
+ *
+ * This should initialize the kbdev->pm.pm_policy_data structure. It
+ * should not attempt to make any changes to hardware state.
+ *
+ * It is undefined what state the cores are in when the function is
+ * called.
+ *
+ * @kbdev: The kbase device structure for the device (must be a
+ * valid pointer)
+ */
+ void (*init)(struct kbase_device *kbdev);
+
+ /**
+ * Function called when the policy is unselected.
+ *
+ * @kbdev: The kbase device structure for the device (must be a
+ * valid pointer)
+ */
+ void (*term)(struct kbase_device *kbdev);
+
+ /**
+ * Function called to get the current shader core mask
+ *
+ * The returned mask should meet or exceed (kbdev->shader_needed_bitmap
+ * | kbdev->shader_inuse_bitmap).
+ *
+ * @kbdev: The kbase device structure for the device (must be a
+ * valid pointer)
+ *
+ * Return: The mask of shader cores to be powered
+ */
+ u64 (*get_core_mask)(struct kbase_device *kbdev);
+
+ /**
+ * Function called to get the current overall GPU power state
+ *
+ * This function should consider the state of kbdev->pm.active_count. If
+ * this count is greater than 0 then there is at least one active
+ * context on the device and the GPU should be powered. If it is equal
+ * to 0 then there are no active contexts and the GPU could be powered
+ * off if desired.
+ *
+ * @kbdev: The kbase device structure for the device (must be a
+ * valid pointer)
+ *
+ * Return: true if the GPU should be powered, false otherwise
+ */
+ bool (*get_core_active)(struct kbase_device *kbdev);
+
+ kbase_pm_policy_flags flags;
+ enum kbase_pm_policy_id id;
+};
+
+
+enum kbase_pm_ca_policy_id {
+ KBASE_PM_CA_POLICY_ID_FIXED = 1,
+ KBASE_PM_CA_POLICY_ID_DEVFREQ,
+ KBASE_PM_CA_POLICY_ID_RANDOM
+};
+
+typedef u32 kbase_pm_ca_policy_flags;
+
+/**
+ * Maximum length of a CA policy names
+ */
+#define KBASE_PM_CA_MAX_POLICY_NAME_LEN 15
+
+/**
+ * struct kbase_pm_ca_policy - Core availability policy structure.
+ *
+ * Each core availability policy exposes a (static) instance of this structure
+ * which contains function pointers to the policy's methods.
+ *
+ * @name: The name of this policy
+ * @init: Function called when the policy is selected
+ * @term: Function called when the policy is unselected
+ * @get_core_mask: Function called to get the current shader core
+ * availability mask
+ * @update_core_status: Function called to update the current core status
+ * @flags: Field indicating flags for this policy
+ * @id: Field indicating an ID for this policy. This is not
+ * necessarily the same as its index in the list returned
+ * by kbase_pm_list_policies().
+ * It is used purely for debugging.
+ */
+struct kbase_pm_ca_policy {
+ char name[KBASE_PM_CA_MAX_POLICY_NAME_LEN + 1];
+
+ /**
+ * Function called when the policy is selected
+ *
+ * This should initialize the kbdev->pm.ca_policy_data structure. It
+ * should not attempt to make any changes to hardware state.
+ *
+ * It is undefined what state the cores are in when the function is
+ * called.
+ *
+ * @kbdev The kbase device structure for the device (must be a
+ * valid pointer)
+ */
+ void (*init)(struct kbase_device *kbdev);
+
+ /**
+ * Function called when the policy is unselected.
+ *
+ * @kbdev The kbase device structure for the device (must be a
+ * valid pointer)
+ */
+ void (*term)(struct kbase_device *kbdev);
+
+ /**
+ * Function called to get the current shader core availability mask
+ *
+ * When a change in core availability is occurring, the policy must set
+ * kbdev->pm.ca_in_transition to true. This is to indicate that
+ * reporting changes in power state cannot be optimized out, even if
+ * kbdev->pm.desired_shader_state remains unchanged. This must be done
+ * by any functions internal to the Core Availability Policy that change
+ * the return value of kbase_pm_ca_policy::get_core_mask.
+ *
+ * @kbdev The kbase device structure for the device (must be a
+ * valid pointer)
+ *
+ * Return: The current core availability mask
+ */
+ u64 (*get_core_mask)(struct kbase_device *kbdev);
+
+ /**
+ * Function called to update the current core status
+ *
+ * If none of the cores in core group 0 are ready or transitioning, then
+ * the policy must ensure that the next call to get_core_mask does not
+ * return 0 for all cores in core group 0. It is an error to disable
+ * core group 0 through the core availability policy.
+ *
+ * When a change in core availability has finished, the policy must set
+ * kbdev->pm.ca_in_transition to false. This is to indicate that
+ * changes in power state can once again be optimized out when
+ * kbdev->pm.desired_shader_state is unchanged.
+ *
+ * @kbdev: The kbase device structure for the device
+ * (must be a valid pointer)
+ * @cores_ready: The mask of cores currently powered and
+ * ready to run jobs
+ * @cores_transitioning: The mask of cores currently transitioning
+ * power state
+ */
+ void (*update_core_status)(struct kbase_device *kbdev, u64 cores_ready,
+ u64 cores_transitioning);
+
+ kbase_pm_ca_policy_flags flags;
+
+ /**
+ * Field indicating an ID for this policy. This is not necessarily the
+ * same as its index in the list returned by kbase_pm_list_policies().
+ * It is used purely for debugging.
+ */
+ enum kbase_pm_ca_policy_id id;
+};
+
+#endif /* _KBASE_PM_HWACCESS_DEFS_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_demand.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_demand.c
new file mode 100644
index 000000000000..81322fd0dd17
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_demand.c
@@ -0,0 +1,73 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * A simple demand based power management policy
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+
+static u64 demand_get_core_mask(struct kbase_device *kbdev)
+{
+ u64 desired = kbdev->shader_needed_bitmap | kbdev->shader_inuse_bitmap;
+
+ if (0 == kbdev->pm.active_count)
+ return 0;
+
+ return desired;
+}
+
+static bool demand_get_core_active(struct kbase_device *kbdev)
+{
+ if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap |
+ kbdev->shader_inuse_bitmap) && !kbdev->tiler_needed_cnt
+ && !kbdev->tiler_inuse_cnt)
+ return false;
+
+ return true;
+}
+
+static void demand_init(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+static void demand_term(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+/*
+ * The struct kbase_pm_policy structure for the demand power policy.
+ *
+ * This is the static structure that defines the demand power policy's callback
+ * and name.
+ */
+const struct kbase_pm_policy kbase_pm_demand_policy_ops = {
+ "demand", /* name */
+ demand_init, /* init */
+ demand_term, /* term */
+ demand_get_core_mask, /* get_core_mask */
+ demand_get_core_active, /* get_core_active */
+ 0u, /* flags */
+ KBASE_PM_POLICY_ID_DEMAND, /* id */
+};
+
+KBASE_EXPORT_TEST_API(kbase_pm_demand_policy_ops);
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_demand.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_demand.h
new file mode 100644
index 000000000000..c0c84b6e9189
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_demand.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * A simple demand based power management policy
+ */
+
+#ifndef MALI_KBASE_PM_DEMAND_H
+#define MALI_KBASE_PM_DEMAND_H
+
+/**
+ * DOC: Demand power management policy
+ *
+ * The demand power management policy has the following characteristics:
+ * - When KBase indicates that the GPU will be powered up, but we don't yet
+ * know which Job Chains are to be run:
+ * - The Shader Cores are not powered up
+ *
+ * - When KBase indicates that a set of Shader Cores are needed to submit the
+ * currently queued Job Chains:
+ * - Only those Shader Cores are powered up
+ *
+ * - When KBase indicates that the GPU need not be powered:
+ * - The Shader Cores are powered off, and the GPU itself is powered off too.
+ *
+ * Note:
+ * - KBase indicates the GPU will be powered up when it has a User Process that
+ * has just started to submit Job Chains.
+ *
+ * - KBase indicates the GPU need not be powered when all the Job Chains from
+ * User Processes have finished, and it is waiting for a User Process to
+ * submit some more Job Chains.
+ */
+
+/**
+ * struct kbasep_pm_policy_demand - Private structure for policy instance data
+ *
+ * @dummy: No state is needed, a dummy variable
+ *
+ * This contains data that is private to the demand power policy.
+ */
+struct kbasep_pm_policy_demand {
+ int dummy;
+};
+
+extern const struct kbase_pm_policy kbase_pm_demand_policy_ops;
+
+#endif /* MALI_KBASE_PM_DEMAND_H */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_driver.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_driver.c
new file mode 100644
index 000000000000..ed19a8a845d6
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_driver.c
@@ -0,0 +1,1671 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Base kernel Power Management hardware control
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_midg_regmap.h>
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include <mali_kbase_gator.h>
+#endif
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_pm.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_kbase_smc.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_ctx_sched.h>
+#include <backend/gpu/mali_kbase_cache_policy_backend.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+#include <linux/of.h>
+
+#if MALI_MOCK_TEST
+#define MOCKABLE(function) function##_original
+#else
+#define MOCKABLE(function) function
+#endif /* MALI_MOCK_TEST */
+
+/**
+ * enum kbasep_pm_action - Actions that can be performed on a core.
+ *
+ * This enumeration is private to the file. Its values are set to allow
+ * core_type_to_reg() function, which decodes this enumeration, to be simpler
+ * and more efficient.
+ *
+ * @ACTION_PRESENT: The cores that are present
+ * @ACTION_READY: The cores that are ready
+ * @ACTION_PWRON: Power on the cores specified
+ * @ACTION_PWROFF: Power off the cores specified
+ * @ACTION_PWRTRANS: The cores that are transitioning
+ * @ACTION_PWRACTIVE: The cores that are active
+ */
+enum kbasep_pm_action {
+ ACTION_PRESENT = 0,
+ ACTION_READY = (SHADER_READY_LO - SHADER_PRESENT_LO),
+ ACTION_PWRON = (SHADER_PWRON_LO - SHADER_PRESENT_LO),
+ ACTION_PWROFF = (SHADER_PWROFF_LO - SHADER_PRESENT_LO),
+ ACTION_PWRTRANS = (SHADER_PWRTRANS_LO - SHADER_PRESENT_LO),
+ ACTION_PWRACTIVE = (SHADER_PWRACTIVE_LO - SHADER_PRESENT_LO)
+};
+
+static u64 kbase_pm_get_state(
+ struct kbase_device *kbdev,
+ enum kbase_pm_core_type core_type,
+ enum kbasep_pm_action action);
+
+/**
+ * core_type_to_reg - Decode a core type and action to a register.
+ *
+ * Given a core type (defined by kbase_pm_core_type) and an action (defined
+ * by kbasep_pm_action) this function will return the register offset that
+ * will perform the action on the core type. The register returned is the _LO
+ * register and an offset must be applied to use the _HI register.
+ *
+ * @core_type: The type of core
+ * @action: The type of action
+ *
+ * Return: The register offset of the _LO register that performs an action of
+ * type @action on a core of type @core_type.
+ */
+static u32 core_type_to_reg(enum kbase_pm_core_type core_type,
+ enum kbasep_pm_action action)
+{
+#ifdef CONFIG_MALI_CORESTACK
+ if (core_type == KBASE_PM_CORE_STACK) {
+ switch (action) {
+ case ACTION_PRESENT:
+ return STACK_PRESENT_LO;
+ case ACTION_READY:
+ return STACK_READY_LO;
+ case ACTION_PWRON:
+ return STACK_PWRON_LO;
+ case ACTION_PWROFF:
+ return STACK_PWROFF_LO;
+ case ACTION_PWRTRANS:
+ return STACK_PWRTRANS_LO;
+ default:
+ BUG();
+ }
+ }
+#endif /* CONFIG_MALI_CORESTACK */
+
+ return (u32)core_type + (u32)action;
+}
+
+#ifdef CONFIG_ARM64
+static void mali_cci_flush_l2(struct kbase_device *kbdev)
+{
+ const u32 mask = CLEAN_CACHES_COMPLETED | RESET_COMPLETED;
+ u32 loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
+ u32 raw;
+
+ /*
+ * Note that we don't take the cache flush mutex here since
+ * we expect to be the last user of the L2, all other L2 users
+ * would have dropped their references, to initiate L2 power
+ * down, L2 power down being the only valid place for this
+ * to be called from.
+ */
+
+ kbase_reg_write(kbdev,
+ GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_CLEAN_INV_CACHES,
+ NULL);
+
+ raw = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
+ NULL);
+
+ /* Wait for cache flush to complete before continuing, exit on
+ * gpu resets or loop expiry. */
+ while (((raw & mask) == 0) && --loops) {
+ raw = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
+ NULL);
+ }
+}
+#endif
+
+/**
+ * kbase_pm_invoke - Invokes an action on a core set
+ *
+ * This function performs the action given by @action on a set of cores of a
+ * type given by @core_type. It is a static function used by
+ * kbase_pm_transition_core_type()
+ *
+ * @kbdev: The kbase device structure of the device
+ * @core_type: The type of core that the action should be performed on
+ * @cores: A bit mask of cores to perform the action on (low 32 bits)
+ * @action: The action to perform on the cores
+ */
+static void kbase_pm_invoke(struct kbase_device *kbdev,
+ enum kbase_pm_core_type core_type,
+ u64 cores,
+ enum kbasep_pm_action action)
+{
+ u32 reg;
+ u32 lo = cores & 0xFFFFFFFF;
+ u32 hi = (cores >> 32) & 0xFFFFFFFF;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ reg = core_type_to_reg(core_type, action);
+
+ KBASE_DEBUG_ASSERT(reg);
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ if (cores) {
+ if (action == ACTION_PWRON)
+ kbase_trace_mali_pm_power_on(core_type, cores);
+ else if (action == ACTION_PWROFF)
+ kbase_trace_mali_pm_power_off(core_type, cores);
+ }
+#endif
+
+ if (cores) {
+ u64 state = kbase_pm_get_state(kbdev, core_type, ACTION_READY);
+
+ if (action == ACTION_PWRON)
+ state |= cores;
+ else if (action == ACTION_PWROFF)
+ state &= ~cores;
+ KBASE_TLSTREAM_AUX_PM_STATE(core_type, state);
+ }
+
+ /* Tracing */
+ if (cores) {
+ if (action == ACTION_PWRON)
+ switch (core_type) {
+ case KBASE_PM_CORE_SHADER:
+ KBASE_TRACE_ADD(kbdev, PM_PWRON, NULL, NULL, 0u,
+ lo);
+ break;
+ case KBASE_PM_CORE_TILER:
+ KBASE_TRACE_ADD(kbdev, PM_PWRON_TILER, NULL,
+ NULL, 0u, lo);
+ break;
+ case KBASE_PM_CORE_L2:
+ KBASE_TRACE_ADD(kbdev, PM_PWRON_L2, NULL, NULL,
+ 0u, lo);
+ break;
+ default:
+ break;
+ }
+ else if (action == ACTION_PWROFF)
+ switch (core_type) {
+ case KBASE_PM_CORE_SHADER:
+ KBASE_TRACE_ADD(kbdev, PM_PWROFF, NULL, NULL,
+ 0u, lo);
+ break;
+ case KBASE_PM_CORE_TILER:
+ KBASE_TRACE_ADD(kbdev, PM_PWROFF_TILER, NULL,
+ NULL, 0u, lo);
+ break;
+ case KBASE_PM_CORE_L2:
+ KBASE_TRACE_ADD(kbdev, PM_PWROFF_L2, NULL, NULL,
+ 0u, lo);
+ /* disable snoops before L2 is turned off */
+ kbase_pm_cache_snoop_disable(kbdev);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (lo != 0)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo, NULL);
+
+ if (hi != 0)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi, NULL);
+}
+
+/**
+ * kbase_pm_get_state - Get information about a core set
+ *
+ * This function gets information (chosen by @action) about a set of cores of
+ * a type given by @core_type. It is a static function used by
+ * kbase_pm_get_active_cores(), kbase_pm_get_trans_cores() and
+ * kbase_pm_get_ready_cores().
+ *
+ * @kbdev: The kbase device structure of the device
+ * @core_type: The type of core that the should be queried
+ * @action: The property of the cores to query
+ *
+ * Return: A bit mask specifying the state of the cores
+ */
+static u64 kbase_pm_get_state(struct kbase_device *kbdev,
+ enum kbase_pm_core_type core_type,
+ enum kbasep_pm_action action)
+{
+ u32 reg;
+ u32 lo, hi;
+
+ reg = core_type_to_reg(core_type, action);
+
+ KBASE_DEBUG_ASSERT(reg);
+
+ lo = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg), NULL);
+ hi = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg + 4), NULL);
+
+ return (((u64) hi) << 32) | ((u64) lo);
+}
+
+void kbasep_pm_init_core_use_bitmaps(struct kbase_device *kbdev)
+{
+ kbdev->shader_inuse_bitmap = 0;
+ kbdev->shader_needed_bitmap = 0;
+ kbdev->shader_available_bitmap = 0;
+ kbdev->tiler_available_bitmap = 0;
+ kbdev->l2_users_count = 0;
+ kbdev->l2_available_bitmap = 0;
+ kbdev->tiler_needed_cnt = 0;
+ kbdev->tiler_inuse_cnt = 0;
+
+ memset(kbdev->shader_needed_cnt, 0, sizeof(kbdev->shader_needed_cnt));
+}
+
+/**
+ * kbase_pm_get_present_cores - Get the cores that are present
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of the cores that are present
+ */
+u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ switch (type) {
+ case KBASE_PM_CORE_L2:
+ return kbdev->gpu_props.props.raw_props.l2_present;
+ case KBASE_PM_CORE_SHADER:
+ return kbdev->gpu_props.props.raw_props.shader_present;
+ case KBASE_PM_CORE_TILER:
+ return kbdev->gpu_props.props.raw_props.tiler_present;
+#ifdef CONFIG_MALI_CORESTACK
+ case KBASE_PM_CORE_STACK:
+ return kbdev->gpu_props.props.raw_props.stack_present;
+#endif /* CONFIG_MALI_CORESTACK */
+ default:
+ break;
+ }
+ KBASE_DEBUG_ASSERT(0);
+
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_present_cores);
+
+/**
+ * kbase_pm_get_active_cores - Get the cores that are "active"
+ * (busy processing work)
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of cores that are active
+ */
+u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type)
+{
+ return kbase_pm_get_state(kbdev, type, ACTION_PWRACTIVE);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_active_cores);
+
+/**
+ * kbase_pm_get_trans_cores - Get the cores that are transitioning between
+ * power states
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of cores that are transitioning
+ */
+u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type)
+{
+ return kbase_pm_get_state(kbdev, type, ACTION_PWRTRANS);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_trans_cores);
+
+/**
+ * kbase_pm_get_ready_cores - Get the cores that are powered on
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of cores that are ready (powered on)
+ */
+u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type)
+{
+ u64 result;
+
+ result = kbase_pm_get_state(kbdev, type, ACTION_READY);
+
+ switch (type) {
+ case KBASE_PM_CORE_SHADER:
+ KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED, NULL, NULL, 0u,
+ (u32) result);
+ break;
+ case KBASE_PM_CORE_TILER:
+ KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_TILER, NULL, NULL, 0u,
+ (u32) result);
+ break;
+ case KBASE_PM_CORE_L2:
+ KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_L2, NULL, NULL, 0u,
+ (u32) result);
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_ready_cores);
+
+/**
+ * kbase_pm_transition_core_type - Perform power transitions for a particular
+ * core type.
+ *
+ * This function will perform any available power transitions to make the actual
+ * hardware state closer to the desired state. If a core is currently
+ * transitioning then changes to the power state of that call cannot be made
+ * until the transition has finished. Cores which are not present in the
+ * hardware are ignored if they are specified in the desired_state bitmask,
+ * however the return value will always be 0 in this case.
+ *
+ * @kbdev: The kbase device
+ * @type: The core type to perform transitions for
+ * @desired_state: A bit mask of the desired state of the cores
+ * @in_use: A bit mask of the cores that are currently running
+ * jobs. These cores have to be kept powered up because
+ * there are jobs running (or about to run) on them.
+ * @available: Receives a bit mask of the cores that the job
+ * scheduler can use to submit jobs to. May be NULL if
+ * this is not needed.
+ * @powering_on: Bit mask to update with cores that are
+ * transitioning to a power-on state.
+ *
+ * Return: true if the desired state has been reached, false otherwise
+ */
+static bool kbase_pm_transition_core_type(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type,
+ u64 desired_state,
+ u64 in_use,
+ u64 * const available,
+ u64 *powering_on)
+{
+ u64 present;
+ u64 ready;
+ u64 trans;
+ u64 powerup;
+ u64 powerdown;
+ u64 powering_on_trans;
+ u64 desired_state_in_use;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* Get current state */
+ present = kbase_pm_get_present_cores(kbdev, type);
+ trans = kbase_pm_get_trans_cores(kbdev, type);
+ ready = kbase_pm_get_ready_cores(kbdev, type);
+ /* mask off ready from trans in case transitions finished between the
+ * register reads */
+ trans &= ~ready;
+
+ if (trans) /* Do not progress if any cores are transitioning */
+ return false;
+
+ powering_on_trans = trans & *powering_on;
+ *powering_on = powering_on_trans;
+
+ if (available != NULL)
+ *available = (ready | powering_on_trans) & desired_state;
+
+ /* Update desired state to include the in-use cores. These have to be
+ * kept powered up because there are jobs running or about to run on
+ * these cores
+ */
+ desired_state_in_use = desired_state | in_use;
+
+ /* Update state of whether l2 caches are powered */
+ if (type == KBASE_PM_CORE_L2) {
+ if ((ready == present) && (desired_state_in_use == ready) &&
+ (trans == 0)) {
+ /* All are ready, none will be turned off, and none are
+ * transitioning */
+ kbdev->pm.backend.l2_powered = 1;
+ /*
+ * Ensure snoops are enabled after L2 is powered up,
+ * note that kbase keeps track of the snoop state, so
+ * safe to repeatedly call.
+ */
+ kbase_pm_cache_snoop_enable(kbdev);
+ if (kbdev->l2_users_count > 0) {
+ /* Notify any registered l2 cache users
+ * (optimized out when no users waiting) */
+ wake_up(&kbdev->pm.backend.l2_powered_wait);
+ }
+ } else
+ kbdev->pm.backend.l2_powered = 0;
+ }
+
+ if (desired_state == ready && (trans == 0))
+ return true;
+
+ /* Restrict the cores to those that are actually present */
+ powerup = desired_state_in_use & present;
+ powerdown = (~desired_state_in_use) & present;
+
+ /* Restrict to cores that are not already in the desired state */
+ powerup &= ~ready;
+ powerdown &= ready;
+
+ /* Don't transition any cores that are already transitioning, except for
+ * Mali cores that support the following case:
+ *
+ * If the SHADER_PWRON or TILER_PWRON registers are written to turn on
+ * a core that is currently transitioning to power off, then this is
+ * remembered and the shader core is automatically powered up again once
+ * the original transition completes. Once the automatic power on is
+ * complete any job scheduled on the shader core should start.
+ */
+ powerdown &= ~trans;
+
+ if (kbase_hw_has_feature(kbdev,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS))
+ if (KBASE_PM_CORE_SHADER == type || KBASE_PM_CORE_TILER == type)
+ trans = powering_on_trans; /* for exception cases, only
+ * mask off cores in power on
+ * transitions */
+
+ powerup &= ~trans;
+
+ /* Perform transitions if any */
+ kbase_pm_invoke(kbdev, type, powerup, ACTION_PWRON);
+#if !PLATFORM_POWER_DOWN_ONLY
+ kbase_pm_invoke(kbdev, type, powerdown, ACTION_PWROFF);
+#endif
+
+ /* Recalculate cores transitioning on, and re-evaluate our state */
+ powering_on_trans |= powerup;
+ *powering_on = powering_on_trans;
+ if (available != NULL)
+ *available = (ready | powering_on_trans) & desired_state;
+
+ return false;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_transition_core_type);
+
+/**
+ * get_desired_cache_status - Determine which caches should be on for a
+ * particular core state
+ *
+ * This function takes a bit mask of the present caches and the cores (or
+ * caches) that are attached to the caches that will be powered. It then
+ * computes which caches should be turned on to allow the cores requested to be
+ * powered up.
+ *
+ * @present: The bit mask of present caches
+ * @cores_powered: A bit mask of cores (or L2 caches) that are desired to
+ * be powered
+ * @tilers_powered: The bit mask of tilers that are desired to be powered
+ *
+ * Return: A bit mask of the caches that should be turned on
+ */
+static u64 get_desired_cache_status(u64 present, u64 cores_powered,
+ u64 tilers_powered)
+{
+ u64 desired = 0;
+
+ while (present) {
+ /* Find out which is the highest set bit */
+ u64 bit = fls64(present) - 1;
+ u64 bit_mask = 1ull << bit;
+ /* Create a mask which has all bits from 'bit' upwards set */
+
+ u64 mask = ~(bit_mask - 1);
+
+ /* If there are any cores powered at this bit or above (that
+ * haven't previously been processed) then we need this core on
+ */
+ if (cores_powered & mask)
+ desired |= bit_mask;
+
+ /* Remove bits from cores_powered and present */
+ cores_powered &= ~mask;
+ present &= ~bit_mask;
+ }
+
+ /* Power up the required L2(s) for the tiler */
+ if (tilers_powered)
+ desired |= 1;
+
+ return desired;
+}
+
+KBASE_EXPORT_TEST_API(get_desired_cache_status);
+
+#ifdef CONFIG_MALI_CORESTACK
+u64 kbase_pm_core_stack_mask(u64 cores)
+{
+ u64 stack_mask = 0;
+ size_t const MAX_CORE_ID = 31;
+ size_t const NUM_CORES_PER_STACK = 4;
+ size_t i;
+
+ for (i = 0; i <= MAX_CORE_ID; ++i) {
+ if (test_bit(i, (unsigned long *)&cores)) {
+ /* Every core which ID >= 16 is filled to stacks 4-7
+ * instead of 0-3 */
+ size_t const stack_num = (i > 16) ?
+ (i % NUM_CORES_PER_STACK) + 4 :
+ (i % NUM_CORES_PER_STACK);
+ set_bit(stack_num, (unsigned long *)&stack_mask);
+ }
+ }
+
+ return stack_mask;
+}
+#endif /* CONFIG_MALI_CORESTACK */
+
+bool
+MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
+{
+ bool cores_are_available = false;
+ bool in_desired_state = true;
+ u64 desired_l2_state;
+#ifdef CONFIG_MALI_CORESTACK
+ u64 desired_stack_state;
+ u64 stacks_powered;
+#endif /* CONFIG_MALI_CORESTACK */
+ u64 cores_powered;
+ u64 tilers_powered;
+ u64 tiler_available_bitmap;
+ u64 tiler_transitioning_bitmap;
+ u64 shader_available_bitmap;
+ u64 shader_ready_bitmap;
+ u64 shader_transitioning_bitmap;
+ u64 l2_available_bitmap;
+ u64 prev_l2_available_bitmap;
+ u64 l2_inuse_bitmap;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ spin_lock(&kbdev->pm.backend.gpu_powered_lock);
+ if (kbdev->pm.backend.gpu_powered == false) {
+ spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
+ if (kbdev->pm.backend.desired_shader_state == 0 &&
+ kbdev->pm.backend.desired_tiler_state == 0)
+ return true;
+ return false;
+ }
+
+ /* Trace that a change-state is being requested, and that it took
+ * (effectively) no time to start it. This is useful for counting how
+ * many state changes occurred, in a way that's backwards-compatible
+ * with processing the trace data */
+ kbase_timeline_pm_send_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE);
+ kbase_timeline_pm_handle_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE);
+
+ /* If any cores are already powered then, we must keep the caches on */
+ shader_transitioning_bitmap = kbase_pm_get_trans_cores(kbdev,
+ KBASE_PM_CORE_SHADER);
+ cores_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
+ cores_powered |= kbdev->pm.backend.desired_shader_state;
+
+#ifdef CONFIG_MALI_CORESTACK
+ /* Work out which core stacks want to be powered */
+ desired_stack_state = kbase_pm_core_stack_mask(cores_powered);
+ stacks_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_STACK) |
+ desired_stack_state;
+#endif /* CONFIG_MALI_CORESTACK */
+
+ /* Work out which tilers want to be powered */
+ tiler_transitioning_bitmap = kbase_pm_get_trans_cores(kbdev,
+ KBASE_PM_CORE_TILER);
+ tilers_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_TILER);
+ tilers_powered |= kbdev->pm.backend.desired_tiler_state;
+
+ /* If there are l2 cache users registered, keep all l2s powered even if
+ * all other cores are off. */
+ if (kbdev->l2_users_count > 0)
+ cores_powered |= kbdev->gpu_props.props.raw_props.l2_present;
+
+ desired_l2_state = get_desired_cache_status(
+ kbdev->gpu_props.props.raw_props.l2_present,
+ cores_powered, tilers_powered);
+
+ l2_inuse_bitmap = get_desired_cache_status(
+ kbdev->gpu_props.props.raw_props.l2_present,
+ cores_powered | shader_transitioning_bitmap,
+ tilers_powered | tiler_transitioning_bitmap);
+
+#ifdef CONFIG_MALI_CORESTACK
+ if (stacks_powered)
+ desired_l2_state |= 1;
+#endif /* CONFIG_MALI_CORESTACK */
+
+ /* If any l2 cache is on, then enable l2 #0, for use by job manager */
+ if (0 != desired_l2_state)
+ desired_l2_state |= 1;
+
+ prev_l2_available_bitmap = kbdev->l2_available_bitmap;
+ in_desired_state &= kbase_pm_transition_core_type(kbdev,
+ KBASE_PM_CORE_L2, desired_l2_state, l2_inuse_bitmap,
+ &l2_available_bitmap,
+ &kbdev->pm.backend.powering_on_l2_state);
+
+ if (kbdev->l2_available_bitmap != l2_available_bitmap)
+ KBASE_TIMELINE_POWER_L2(kbdev, l2_available_bitmap);
+
+ kbdev->l2_available_bitmap = l2_available_bitmap;
+
+
+#ifdef CONFIG_MALI_CORESTACK
+ if (in_desired_state) {
+ in_desired_state &= kbase_pm_transition_core_type(kbdev,
+ KBASE_PM_CORE_STACK, desired_stack_state, 0,
+ &kbdev->stack_available_bitmap,
+ &kbdev->pm.backend.powering_on_stack_state);
+ }
+#endif /* CONFIG_MALI_CORESTACK */
+
+ if (in_desired_state) {
+ in_desired_state &= kbase_pm_transition_core_type(kbdev,
+ KBASE_PM_CORE_TILER,
+ kbdev->pm.backend.desired_tiler_state,
+ 0, &tiler_available_bitmap,
+ &kbdev->pm.backend.powering_on_tiler_state);
+ in_desired_state &= kbase_pm_transition_core_type(kbdev,
+ KBASE_PM_CORE_SHADER,
+ kbdev->pm.backend.desired_shader_state,
+ kbdev->shader_inuse_bitmap,
+ &shader_available_bitmap,
+ &kbdev->pm.backend.powering_on_shader_state);
+
+ if (kbdev->shader_available_bitmap != shader_available_bitmap) {
+ KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
+ NULL, 0u,
+ (u32) shader_available_bitmap);
+ KBASE_TIMELINE_POWER_SHADER(kbdev,
+ shader_available_bitmap);
+ }
+
+ kbdev->shader_available_bitmap = shader_available_bitmap;
+
+ if (kbdev->tiler_available_bitmap != tiler_available_bitmap) {
+ KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER,
+ NULL, NULL, 0u,
+ (u32) tiler_available_bitmap);
+ KBASE_TIMELINE_POWER_TILER(kbdev,
+ tiler_available_bitmap);
+ }
+
+ kbdev->tiler_available_bitmap = tiler_available_bitmap;
+
+ } else if ((l2_available_bitmap &
+ kbdev->gpu_props.props.raw_props.tiler_present) !=
+ kbdev->gpu_props.props.raw_props.tiler_present) {
+ tiler_available_bitmap = 0;
+
+ if (kbdev->tiler_available_bitmap != tiler_available_bitmap)
+ KBASE_TIMELINE_POWER_TILER(kbdev,
+ tiler_available_bitmap);
+
+ kbdev->tiler_available_bitmap = tiler_available_bitmap;
+ }
+
+ /* State updated for slow-path waiters */
+ kbdev->pm.backend.gpu_in_desired_state = in_desired_state;
+
+ shader_ready_bitmap = kbase_pm_get_ready_cores(kbdev,
+ KBASE_PM_CORE_SHADER);
+ shader_transitioning_bitmap = kbase_pm_get_trans_cores(kbdev,
+ KBASE_PM_CORE_SHADER);
+
+ /* Determine whether the cores are now available (even if the set of
+ * available cores is empty). Note that they can be available even if
+ * we've not finished transitioning to the desired state */
+ if ((kbdev->shader_available_bitmap &
+ kbdev->pm.backend.desired_shader_state)
+ == kbdev->pm.backend.desired_shader_state &&
+ (kbdev->tiler_available_bitmap &
+ kbdev->pm.backend.desired_tiler_state)
+ == kbdev->pm.backend.desired_tiler_state) {
+ cores_are_available = true;
+
+ KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE, NULL, NULL, 0u,
+ (u32)(kbdev->shader_available_bitmap &
+ kbdev->pm.backend.desired_shader_state));
+ KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE_TILER, NULL, NULL, 0u,
+ (u32)(kbdev->tiler_available_bitmap &
+ kbdev->pm.backend.desired_tiler_state));
+
+ /* Log timelining information about handling events that power
+ * up cores, to match up either with immediate submission either
+ * because cores already available, or from PM IRQ */
+ if (!in_desired_state)
+ kbase_timeline_pm_send_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+ }
+
+ if (in_desired_state) {
+ KBASE_DEBUG_ASSERT(cores_are_available);
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_pm_status(KBASE_PM_CORE_L2,
+ kbase_pm_get_ready_cores(kbdev,
+ KBASE_PM_CORE_L2));
+ kbase_trace_mali_pm_status(KBASE_PM_CORE_SHADER,
+ kbase_pm_get_ready_cores(kbdev,
+ KBASE_PM_CORE_SHADER));
+ kbase_trace_mali_pm_status(KBASE_PM_CORE_TILER,
+ kbase_pm_get_ready_cores(kbdev,
+ KBASE_PM_CORE_TILER));
+#ifdef CONFIG_MALI_CORESTACK
+ kbase_trace_mali_pm_status(KBASE_PM_CORE_STACK,
+ kbase_pm_get_ready_cores(kbdev,
+ KBASE_PM_CORE_STACK));
+#endif /* CONFIG_MALI_CORESTACK */
+#endif
+
+ KBASE_TLSTREAM_AUX_PM_STATE(
+ KBASE_PM_CORE_L2,
+ kbase_pm_get_ready_cores(
+ kbdev, KBASE_PM_CORE_L2));
+ KBASE_TLSTREAM_AUX_PM_STATE(
+ KBASE_PM_CORE_SHADER,
+ kbase_pm_get_ready_cores(
+ kbdev, KBASE_PM_CORE_SHADER));
+ KBASE_TLSTREAM_AUX_PM_STATE(
+ KBASE_PM_CORE_TILER,
+ kbase_pm_get_ready_cores(
+ kbdev,
+ KBASE_PM_CORE_TILER));
+#ifdef CONFIG_MALI_CORESTACK
+ KBASE_TLSTREAM_AUX_PM_STATE(
+ KBASE_PM_CORE_STACK,
+ kbase_pm_get_ready_cores(
+ kbdev,
+ KBASE_PM_CORE_STACK));
+#endif /* CONFIG_MALI_CORESTACK */
+
+ KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED, NULL, NULL,
+ kbdev->pm.backend.gpu_in_desired_state,
+ (u32)kbdev->pm.backend.desired_shader_state);
+ KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED_TILER, NULL, NULL, 0u,
+ (u32)kbdev->pm.backend.desired_tiler_state);
+
+ /* Log timelining information for synchronous waiters */
+ kbase_timeline_pm_send_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+ /* Wake slow-path waiters. Job scheduler does not use this. */
+ KBASE_TRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, NULL, 0u, 0);
+
+ wake_up(&kbdev->pm.backend.gpu_in_desired_state_wait);
+ }
+
+ spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
+
+ /* kbase_pm_ca_update_core_status can cause one-level recursion into
+ * this function, so it must only be called once all changes to kbdev
+ * have been committed, and after the gpu_powered_lock has been
+ * dropped. */
+ if (kbdev->shader_ready_bitmap != shader_ready_bitmap ||
+ kbdev->shader_transitioning_bitmap != shader_transitioning_bitmap) {
+ kbdev->shader_ready_bitmap = shader_ready_bitmap;
+ kbdev->shader_transitioning_bitmap =
+ shader_transitioning_bitmap;
+
+ kbase_pm_ca_update_core_status(kbdev, shader_ready_bitmap,
+ shader_transitioning_bitmap);
+ }
+
+ /* The core availability policy is not allowed to keep core group 0
+ * turned off (unless it was changing the l2 power state) */
+ if (!((shader_ready_bitmap | shader_transitioning_bitmap) &
+ kbdev->gpu_props.props.coherency_info.group[0].core_mask) &&
+ (prev_l2_available_bitmap == desired_l2_state) &&
+ !(kbase_pm_ca_get_core_mask(kbdev) &
+ kbdev->gpu_props.props.coherency_info.group[0].core_mask))
+ BUG();
+
+ /* The core availability policy is allowed to keep core group 1 off,
+ * but all jobs specifically targeting CG1 must fail */
+ if (!((shader_ready_bitmap | shader_transitioning_bitmap) &
+ kbdev->gpu_props.props.coherency_info.group[1].core_mask) &&
+ !(kbase_pm_ca_get_core_mask(kbdev) &
+ kbdev->gpu_props.props.coherency_info.group[1].core_mask))
+ kbdev->pm.backend.cg1_disabled = true;
+ else
+ kbdev->pm.backend.cg1_disabled = false;
+
+ return cores_are_available;
+}
+KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_nolock);
+
+/* Timeout for kbase_pm_check_transitions_sync when wait_event_killable has
+ * aborted due to a fatal signal. If the time spent waiting has exceeded this
+ * threshold then there is most likely a hardware issue. */
+#define PM_TIMEOUT (5*HZ) /* 5s */
+
+void kbase_pm_check_transitions_sync(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ unsigned long timeout;
+ bool cores_are_available;
+ int ret;
+
+ /* Force the transition to be checked and reported - the cores may be
+ * 'available' (for job submission) but not fully powered up. */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
+
+ /* Don't need 'cores_are_available', because we don't return anything */
+ CSTD_UNUSED(cores_are_available);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ timeout = jiffies + PM_TIMEOUT;
+
+ /* Wait for cores */
+ ret = wait_event_killable(kbdev->pm.backend.gpu_in_desired_state_wait,
+ kbdev->pm.backend.gpu_in_desired_state);
+
+ if (ret < 0 && time_after(jiffies, timeout)) {
+ dev_err(kbdev->dev, "Power transition timed out unexpectedly\n");
+ dev_err(kbdev->dev, "Desired state :\n");
+ dev_err(kbdev->dev, "\tShader=%016llx\n",
+ kbdev->pm.backend.desired_shader_state);
+ dev_err(kbdev->dev, "\tTiler =%016llx\n",
+ kbdev->pm.backend.desired_tiler_state);
+ dev_err(kbdev->dev, "Current state :\n");
+ dev_err(kbdev->dev, "\tShader=%08x%08x\n",
+ kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(SHADER_READY_HI), NULL),
+ kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(SHADER_READY_LO),
+ NULL));
+ dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
+ kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TILER_READY_HI), NULL),
+ kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TILER_READY_LO), NULL));
+ dev_err(kbdev->dev, "\tL2 =%08x%08x\n",
+ kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(L2_READY_HI), NULL),
+ kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(L2_READY_LO), NULL));
+ dev_err(kbdev->dev, "Cores transitioning :\n");
+ dev_err(kbdev->dev, "\tShader=%08x%08x\n",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(
+ SHADER_PWRTRANS_HI), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(
+ SHADER_PWRTRANS_LO), NULL));
+ dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(
+ TILER_PWRTRANS_HI), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(
+ TILER_PWRTRANS_LO), NULL));
+ dev_err(kbdev->dev, "\tL2 =%08x%08x\n",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(
+ L2_PWRTRANS_HI), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(
+ L2_PWRTRANS_LO), NULL));
+#if KBASE_GPU_RESET_EN
+ dev_err(kbdev->dev, "Sending reset to GPU - all running jobs will be lost\n");
+ if (kbase_prepare_to_reset_gpu(kbdev))
+ kbase_reset_gpu(kbdev);
+#endif /* KBASE_GPU_RESET_EN */
+ } else {
+ /* Log timelining information that a change in state has
+ * completed */
+ kbase_timeline_pm_handle_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+ }
+}
+KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_sync);
+
+void kbase_pm_enable_interrupts(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ /*
+ * Clear all interrupts,
+ * and unmask them all.
+ */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL,
+ NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL,
+ NULL);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF,
+ NULL);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF, NULL);
+
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL);
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFFFFFF, NULL);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts);
+
+void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ /*
+ * Mask all interrupts,
+ * and clear them all.
+ */
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0, NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL,
+ NULL);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0, NULL);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF,
+ NULL);
+
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL);
+}
+
+void kbase_pm_disable_interrupts(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_pm_disable_interrupts_nolock(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts);
+
+
+/*
+ * pmu layout:
+ * 0x0000: PMU TAG (RO) (0xCAFECAFE)
+ * 0x0004: PMU VERSION ID (RO) (0x00000000)
+ * 0x0008: CLOCK ENABLE (RW) (31:1 SBZ, 0 CLOCK STATE)
+ */
+void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
+{
+ bool reset_required = is_resume;
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ if (kbdev->pm.backend.gpu_powered) {
+ /* Already turned on */
+ if (kbdev->poweroff_pending)
+ kbase_pm_enable_interrupts(kbdev);
+ kbdev->poweroff_pending = false;
+ KBASE_DEBUG_ASSERT(!is_resume);
+ return;
+ }
+
+ kbdev->poweroff_pending = false;
+
+ KBASE_TRACE_ADD(kbdev, PM_GPU_ON, NULL, NULL, 0u, 0u);
+
+ if (is_resume && kbdev->pm.backend.callback_power_resume) {
+ kbdev->pm.backend.callback_power_resume(kbdev);
+ return;
+ } else if (kbdev->pm.backend.callback_power_on) {
+ kbdev->pm.backend.callback_power_on(kbdev);
+ /* If your platform properly keeps the GPU state you may use the
+ * return value of the callback_power_on function to
+ * conditionally reset the GPU on power up. Currently we are
+ * conservative and always reset the GPU. */
+ reset_required = true;
+ }
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+ kbdev->pm.backend.gpu_powered = true;
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (reset_required) {
+ /* GPU state was lost, reset GPU to ensure it is in a
+ * consistent state */
+ kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS);
+ }
+
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_ctx_sched_restore_all_as(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ /* Lastly, enable the interrupts */
+ kbase_pm_enable_interrupts(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_clock_on);
+
+bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend)
+{
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ /* ASSERT that the cores should now be unavailable. No lock needed. */
+ KBASE_DEBUG_ASSERT(kbdev->shader_available_bitmap == 0u);
+
+ kbdev->poweroff_pending = true;
+
+ if (!kbdev->pm.backend.gpu_powered) {
+ /* Already turned off */
+ if (is_suspend && kbdev->pm.backend.callback_power_suspend)
+ kbdev->pm.backend.callback_power_suspend(kbdev);
+ return true;
+ }
+
+ KBASE_TRACE_ADD(kbdev, PM_GPU_OFF, NULL, NULL, 0u, 0u);
+
+ /* Disable interrupts. This also clears any outstanding interrupts */
+ kbase_pm_disable_interrupts(kbdev);
+ /* Ensure that any IRQ handlers have finished */
+ kbase_synchronize_irqs(kbdev);
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (atomic_read(&kbdev->faults_pending)) {
+ /* Page/bus faults are still being processed. The GPU can not
+ * be powered off until they have completed */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ flags);
+ return false;
+ }
+
+ kbase_pm_cache_snoop_disable(kbdev);
+
+ /* The GPU power may be turned off from this point */
+ kbdev->pm.backend.gpu_powered = false;
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (is_suspend && kbdev->pm.backend.callback_power_suspend)
+ kbdev->pm.backend.callback_power_suspend(kbdev);
+ else if (kbdev->pm.backend.callback_power_off)
+ kbdev->pm.backend.callback_power_off(kbdev);
+ return true;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_clock_off);
+
+struct kbasep_reset_timeout_data {
+ struct hrtimer timer;
+ bool timed_out;
+ struct kbase_device *kbdev;
+};
+
+void kbase_pm_reset_done(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ kbdev->pm.backend.reset_done = true;
+ wake_up(&kbdev->pm.backend.reset_done_wait);
+}
+
+/**
+ * kbase_pm_wait_for_reset - Wait for a reset to happen
+ *
+ * Wait for the %RESET_COMPLETED IRQ to occur, then reset the waiting state.
+ *
+ * @kbdev: Kbase device
+ */
+static void kbase_pm_wait_for_reset(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ wait_event(kbdev->pm.backend.reset_done_wait,
+ (kbdev->pm.backend.reset_done));
+ kbdev->pm.backend.reset_done = false;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_reset_done);
+
+static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer)
+{
+ struct kbasep_reset_timeout_data *rtdata =
+ container_of(timer, struct kbasep_reset_timeout_data, timer);
+
+ rtdata->timed_out = 1;
+
+ /* Set the wait queue to wake up kbase_pm_init_hw even though the reset
+ * hasn't completed */
+ kbase_pm_reset_done(rtdata->kbdev);
+
+ return HRTIMER_NORESTART;
+}
+
+static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
+{
+ struct device_node *np = kbdev->dev->of_node;
+ u32 jm_values[4];
+ const u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
+ GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+ const u32 major = (gpu_id & GPU_ID_VERSION_MAJOR) >>
+ GPU_ID_VERSION_MAJOR_SHIFT;
+
+ kbdev->hw_quirks_sc = 0;
+
+ /* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443.
+ * and needed due to MIDGLES-3539. See PRLAM-11035 */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443) ||
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11035))
+ kbdev->hw_quirks_sc |= SC_LS_PAUSEBUFFER_DISABLE;
+
+ /* Needed due to MIDBASE-2054: SDC_DISABLE_OQ_DISCARD. See PRLAM-10327.
+ */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10327))
+ kbdev->hw_quirks_sc |= SC_SDC_DISABLE_OQ_DISCARD;
+
+#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
+ /* Enable alternative hardware counter selection if configured. */
+ if (!GPU_ID_IS_NEW_FORMAT(prod_id))
+ kbdev->hw_quirks_sc |= SC_ALT_COUNTERS;
+#endif
+
+ /* Needed due to MIDBASE-2795. ENABLE_TEXGRD_FLAGS. See PRLAM-10797. */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10797))
+ kbdev->hw_quirks_sc |= SC_ENABLE_TEXGRD_FLAGS;
+
+ if (!kbase_hw_has_issue(kbdev, GPUCORE_1619)) {
+ if (prod_id < 0x750 || prod_id == 0x6956) /* T60x, T62x, T72x */
+ kbdev->hw_quirks_sc |= SC_LS_ATTR_CHECK_DISABLE;
+ else if (prod_id >= 0x750 && prod_id <= 0x880) /* T76x, T8xx */
+ kbdev->hw_quirks_sc |= SC_LS_ALLOW_ATTR_TYPES;
+ }
+
+ if (!kbdev->hw_quirks_sc)
+ kbdev->hw_quirks_sc = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(SHADER_CONFIG), NULL);
+
+ kbdev->hw_quirks_tiler = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TILER_CONFIG), NULL);
+
+ /* Set tiler clock gate override if required */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3953))
+ kbdev->hw_quirks_tiler |= TC_CLOCK_GATE_OVERRIDE;
+
+ /* Limit the GPU bus bandwidth if the platform needs this. */
+ kbdev->hw_quirks_mmu = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(L2_MMU_CONFIG), NULL);
+
+ /* Limit read ID width for AXI */
+ kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS);
+ kbdev->hw_quirks_mmu |= (DEFAULT_ARID_LIMIT & 0x3) <<
+ L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT;
+
+ /* Limit write ID width for AXI */
+ kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
+ kbdev->hw_quirks_mmu |= (DEFAULT_AWID_LIMIT & 0x3) <<
+ L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT;
+
+ if (kbdev->system_coherency == COHERENCY_ACE) {
+ /* Allow memory configuration disparity to be ignored, we
+ * optimize the use of shared memory and thus we expect
+ * some disparity in the memory configuration */
+ kbdev->hw_quirks_mmu |= L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY;
+ }
+
+ kbdev->hw_quirks_jm = 0;
+ /* Only for T86x/T88x-based products after r2p0 */
+ if (prod_id >= 0x860 && prod_id <= 0x880 && major >= 2) {
+
+ if (of_property_read_u32_array(np,
+ "jm_config",
+ &jm_values[0],
+ ARRAY_SIZE(jm_values))) {
+ /* Entry not in device tree, use defaults */
+ jm_values[0] = 0;
+ jm_values[1] = 0;
+ jm_values[2] = 0;
+ jm_values[3] = JM_MAX_JOB_THROTTLE_LIMIT;
+ }
+
+ /* Limit throttle limit to 6 bits*/
+ if (jm_values[3] > JM_MAX_JOB_THROTTLE_LIMIT) {
+ dev_dbg(kbdev->dev, "JOB_THROTTLE_LIMIT supplied in device tree is too large. Limiting to MAX (63).");
+ jm_values[3] = JM_MAX_JOB_THROTTLE_LIMIT;
+ }
+
+ /* Aggregate to one integer. */
+ kbdev->hw_quirks_jm |= (jm_values[0] ?
+ JM_TIMESTAMP_OVERRIDE : 0);
+ kbdev->hw_quirks_jm |= (jm_values[1] ?
+ JM_CLOCK_GATE_OVERRIDE : 0);
+ kbdev->hw_quirks_jm |= (jm_values[2] ?
+ JM_JOB_THROTTLE_ENABLE : 0);
+ kbdev->hw_quirks_jm |= (jm_values[3] <<
+ JM_JOB_THROTTLE_LIMIT_SHIFT);
+
+ } else if (GPU_ID_IS_NEW_FORMAT(prod_id) &&
+ (GPU_ID2_MODEL_MATCH_VALUE(prod_id) ==
+ GPU_ID2_PRODUCT_TMIX)) {
+ /* Only for tMIx */
+ u32 coherency_features;
+
+ coherency_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(COHERENCY_FEATURES), NULL);
+
+ /* (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
+ * documented for tMIx so force correct value here.
+ */
+ if (coherency_features ==
+ COHERENCY_FEATURE_BIT(COHERENCY_ACE)) {
+ kbdev->hw_quirks_jm |=
+ (COHERENCY_ACE_LITE | COHERENCY_ACE) <<
+ JM_FORCE_COHERENCY_FEATURES_SHIFT;
+ }
+ }
+
+ if (!kbdev->hw_quirks_jm)
+ kbdev->hw_quirks_jm = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(JM_CONFIG), NULL);
+
+#ifdef CONFIG_MALI_CORESTACK
+#define MANUAL_POWER_CONTROL ((u32)(1 << 8))
+ kbdev->hw_quirks_jm |= MANUAL_POWER_CONTROL;
+#endif /* CONFIG_MALI_CORESTACK */
+}
+
+static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
+{
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_CONFIG),
+ kbdev->hw_quirks_sc, NULL);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(TILER_CONFIG),
+ kbdev->hw_quirks_tiler, NULL);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG),
+ kbdev->hw_quirks_mmu, NULL);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(JM_CONFIG),
+ kbdev->hw_quirks_jm, NULL);
+
+}
+
+void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
+{
+ if ((kbdev->current_gpu_coherency_mode == COHERENCY_ACE) &&
+ !kbdev->cci_snoop_enabled) {
+#ifdef CONFIG_ARM64
+ if (kbdev->snoop_enable_smc != 0)
+ kbase_invoke_smc_fid(kbdev->snoop_enable_smc, 0, 0, 0);
+#endif /* CONFIG_ARM64 */
+ dev_dbg(kbdev->dev, "MALI - CCI Snoops - Enabled\n");
+ kbdev->cci_snoop_enabled = true;
+ }
+}
+
+void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev)
+{
+ if (kbdev->cci_snoop_enabled) {
+#ifdef CONFIG_ARM64
+ if (kbdev->snoop_disable_smc != 0) {
+ mali_cci_flush_l2(kbdev);
+ kbase_invoke_smc_fid(kbdev->snoop_disable_smc, 0, 0, 0);
+ }
+#endif /* CONFIG_ARM64 */
+ dev_dbg(kbdev->dev, "MALI - CCI Snoops Disabled\n");
+ kbdev->cci_snoop_enabled = false;
+ }
+}
+
+static int kbase_pm_do_reset(struct kbase_device *kbdev)
+{
+ struct kbasep_reset_timeout_data rtdata;
+
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0);
+
+ KBASE_TLSTREAM_JD_GPU_SOFT_RESET(kbdev);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_SOFT_RESET, NULL);
+
+ /* Unmask the reset complete interrupt only */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), RESET_COMPLETED,
+ NULL);
+
+ /* Initialize a structure for tracking the status of the reset */
+ rtdata.kbdev = kbdev;
+ rtdata.timed_out = 0;
+
+ /* Create a timer to use as a timeout on the reset */
+ hrtimer_init_on_stack(&rtdata.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rtdata.timer.function = kbasep_reset_timeout;
+
+ hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
+ HRTIMER_MODE_REL);
+
+ /* Wait for the RESET_COMPLETED interrupt to be raised */
+ kbase_pm_wait_for_reset(kbdev);
+
+ if (rtdata.timed_out == 0) {
+ /* GPU has been reset */
+ hrtimer_cancel(&rtdata.timer);
+ destroy_hrtimer_on_stack(&rtdata.timer);
+ return 0;
+ }
+
+ /* No interrupt has been received - check if the RAWSTAT register says
+ * the reset has completed */
+ if (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) &
+ RESET_COMPLETED) {
+ /* The interrupt is set in the RAWSTAT; this suggests that the
+ * interrupts are not getting to the CPU */
+ dev_err(kbdev->dev, "Reset interrupt didn't reach CPU. Check interrupt assignments.\n");
+ /* If interrupts aren't working we can't continue. */
+ destroy_hrtimer_on_stack(&rtdata.timer);
+ return -EINVAL;
+ }
+
+ /* The GPU doesn't seem to be responding to the reset so try a hard
+ * reset */
+ dev_err(kbdev->dev, "Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n",
+ RESET_TIMEOUT);
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_HARD_RESET, NULL);
+
+ /* Restart the timer to wait for the hard reset to complete */
+ rtdata.timed_out = 0;
+
+ hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
+ HRTIMER_MODE_REL);
+
+ /* Wait for the RESET_COMPLETED interrupt to be raised */
+ kbase_pm_wait_for_reset(kbdev);
+
+ if (rtdata.timed_out == 0) {
+ /* GPU has been reset */
+ hrtimer_cancel(&rtdata.timer);
+ destroy_hrtimer_on_stack(&rtdata.timer);
+ return 0;
+ }
+
+ destroy_hrtimer_on_stack(&rtdata.timer);
+
+ dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n",
+ RESET_TIMEOUT);
+
+ return -EINVAL;
+}
+
+static int kbasep_protected_mode_enable(struct protected_mode_device *pdev)
+{
+ struct kbase_device *kbdev = pdev->data;
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_SET_PROTECTED_MODE, NULL);
+ return 0;
+}
+
+static int kbasep_protected_mode_disable(struct protected_mode_device *pdev)
+{
+ struct kbase_device *kbdev = pdev->data;
+
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ return kbase_pm_do_reset(kbdev);
+}
+
+struct protected_mode_ops kbase_native_protected_ops = {
+ .protected_mode_enable = kbasep_protected_mode_enable,
+ .protected_mode_disable = kbasep_protected_mode_disable
+};
+
+int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
+{
+ unsigned long irq_flags;
+ int err;
+ bool resume_vinstr = false;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ /* Ensure the clock is on before attempting to access the hardware */
+ if (!kbdev->pm.backend.gpu_powered) {
+ if (kbdev->pm.backend.callback_power_on)
+ kbdev->pm.backend.callback_power_on(kbdev);
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock,
+ irq_flags);
+ kbdev->pm.backend.gpu_powered = true;
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ irq_flags);
+ }
+
+ /* Ensure interrupts are off to begin with, this also clears any
+ * outstanding interrupts */
+ kbase_pm_disable_interrupts(kbdev);
+ /* Ensure cache snoops are disabled before reset. */
+ kbase_pm_cache_snoop_disable(kbdev);
+ /* Prepare for the soft-reset */
+ kbdev->pm.backend.reset_done = false;
+
+ /* The cores should be made unavailable due to the reset */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
+ if (kbdev->shader_available_bitmap != 0u)
+ KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
+ NULL, 0u, (u32)0u);
+ if (kbdev->tiler_available_bitmap != 0u)
+ KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER,
+ NULL, NULL, 0u, (u32)0u);
+ kbdev->shader_available_bitmap = 0u;
+ kbdev->tiler_available_bitmap = 0u;
+ kbdev->l2_available_bitmap = 0u;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+
+ /* Soft reset the GPU */
+ if (kbdev->protected_mode_support)
+ err = kbdev->protected_ops->protected_mode_disable(
+ kbdev->protected_dev);
+ else
+ err = kbase_pm_do_reset(kbdev);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
+ if (kbdev->protected_mode)
+ resume_vinstr = true;
+ kbdev->protected_mode = false;
+ kbase_ipa_model_use_configured_locked(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+
+ if (err)
+ goto exit;
+
+ if (flags & PM_HW_ISSUES_DETECT)
+ kbase_pm_hw_issues_detect(kbdev);
+
+ kbase_pm_hw_issues_apply(kbdev);
+ kbase_cache_set_coherency_mode(kbdev, kbdev->system_coherency);
+
+ /* Sanity check protected mode was left after reset */
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
+ u32 gpu_status = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_STATUS), NULL);
+
+ WARN_ON(gpu_status & GPU_STATUS_PROTECTED_MODE_ACTIVE);
+ }
+
+ /* If cycle counter was in use re-enable it, enable_irqs will only be
+ * false when called from kbase_pm_powerup */
+ if (kbdev->pm.backend.gpu_cycle_counter_requests &&
+ (flags & PM_ENABLE_IRQS)) {
+ /* enable interrupts as the L2 may have to be powered on */
+ kbase_pm_enable_interrupts(kbdev);
+ kbase_pm_request_l2_caches(kbdev);
+
+ /* Re-enable the counters if we need to */
+ spin_lock_irqsave(
+ &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ irq_flags);
+ if (kbdev->pm.backend.gpu_cycle_counter_requests)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_CYCLE_COUNT_START, NULL);
+ spin_unlock_irqrestore(
+ &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ irq_flags);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
+ kbase_pm_release_l2_caches(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+
+ kbase_pm_disable_interrupts(kbdev);
+ }
+
+ if (flags & PM_ENABLE_IRQS)
+ kbase_pm_enable_interrupts(kbdev);
+
+exit:
+ /* If GPU is leaving protected mode resume vinstr operation. */
+ if (kbdev->vinstr_ctx && resume_vinstr)
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+
+ return err;
+}
+
+/**
+ * kbase_pm_request_gpu_cycle_counter_do_request - Request cycle counters
+ *
+ * Increase the count of cycle counter users and turn the cycle counters on if
+ * they were previously off
+ *
+ * This function is designed to be called by
+ * kbase_pm_request_gpu_cycle_counter() or
+ * kbase_pm_request_gpu_cycle_counter_l2_is_on() only
+ *
+ * When this function is called the l2 cache must be on and the l2 cache users
+ * count must have been incremented by a call to (
+ * kbase_pm_request_l2_caches() or kbase_pm_request_l2_caches_l2_on() )
+ *
+ * @kbdev: The kbase device structure of the device
+ */
+static void
+kbase_pm_request_gpu_cycle_counter_do_request(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ flags);
+
+ ++kbdev->pm.backend.gpu_cycle_counter_requests;
+
+ if (1 == kbdev->pm.backend.gpu_cycle_counter_requests)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_CYCLE_COUNT_START, NULL);
+
+ spin_unlock_irqrestore(
+ &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ flags);
+}
+
+void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
+ INT_MAX);
+
+ kbase_pm_request_l2_caches(kbdev);
+
+ kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter);
+
+void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
+ INT_MAX);
+
+ kbase_pm_request_l2_caches_l2_is_on(kbdev);
+
+ kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter_l2_is_on);
+
+void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ flags);
+
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests > 0);
+
+ --kbdev->pm.backend.gpu_cycle_counter_requests;
+
+ if (0 == kbdev->pm.backend.gpu_cycle_counter_requests)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_CYCLE_COUNT_STOP, NULL);
+
+ spin_unlock_irqrestore(
+ &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ flags);
+
+ kbase_pm_release_l2_caches(kbdev);
+}
+
+void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_release_gpu_cycle_counter);
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_internal.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_internal.h
new file mode 100644
index 000000000000..6804f45ac27b
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_internal.h
@@ -0,0 +1,548 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Power management API definitions used internally by GPU backend
+ */
+
+#ifndef _KBASE_BACKEND_PM_INTERNAL_H_
+#define _KBASE_BACKEND_PM_INTERNAL_H_
+
+#include <mali_kbase_hwaccess_pm.h>
+
+#include "mali_kbase_pm_ca.h"
+#include "mali_kbase_pm_policy.h"
+
+
+/**
+ * kbase_pm_dev_idle - The GPU is idle.
+ *
+ * The OS may choose to turn off idle devices
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_dev_idle(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_dev_activate - The GPU is active.
+ *
+ * The OS should avoid opportunistically turning off the GPU while it is active
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_dev_activate(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_get_present_cores - Get details of the cores that are present in
+ * the device.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) present in the GPU device and also a count of
+ * the number of cores.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid
+ * pointer)
+ * @type: The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of cores present
+ */
+u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_get_active_cores - Get details of the cores that are currently
+ * active in the device.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) that are actively processing work (i.e.
+ * turned on *and* busy).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type: The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of active cores
+ */
+u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_get_trans_cores - Get details of the cores that are currently
+ * transitioning between power states.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) that are currently transitioning between
+ * power states.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type: The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of transitioning cores
+ */
+u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_get_ready_cores - Get details of the cores that are currently
+ * powered and ready for jobs.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) that are powered and ready for jobs (they may
+ * or may not be currently executing jobs).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type: The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of ready cores
+ */
+u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_clock_on - Turn the clock for the device on, and enable device
+ * interrupts.
+ *
+ * This function can be used by a power policy to turn the clock for the GPU on.
+ * It should be modified during integration to perform the necessary actions to
+ * ensure that the GPU is fully powered and clocked.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid
+ * pointer)
+ * @is_resume: true if clock on due to resume after suspend, false otherwise
+ */
+void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume);
+
+/**
+ * kbase_pm_clock_off - Disable device interrupts, and turn the clock for the
+ * device off.
+ *
+ * This function can be used by a power policy to turn the clock for the GPU
+ * off. It should be modified during integration to perform the necessary
+ * actions to turn the clock off (if this is possible in the integration).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid
+ * pointer)
+ * @is_suspend: true if clock off due to suspend, false otherwise
+ *
+ * Return: true if clock was turned off, or
+ * false if clock can not be turned off due to pending page/bus fault
+ * workers. Caller must flush MMU workqueues and retry
+ */
+bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend);
+
+/**
+ * kbase_pm_enable_interrupts - Enable interrupts on the device.
+ *
+ * Interrupts are also enabled after a call to kbase_pm_clock_on().
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_enable_interrupts(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_disable_interrupts - Disable interrupts on the device.
+ *
+ * This prevents delivery of Power Management interrupts to the CPU so that
+ * kbase_pm_check_transitions_nolock() will not be called from the IRQ handler
+ * until kbase_pm_enable_interrupts() or kbase_pm_clock_on() is called.
+ *
+ * Interrupts are also disabled after a call to kbase_pm_clock_off().
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_disable_interrupts(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_disable_interrupts_nolock - Version of kbase_pm_disable_interrupts()
+ * that does not take the hwaccess_lock
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_init_hw - Initialize the hardware.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @flags: Flags specifying the type of PM init
+ *
+ * This function checks the GPU ID register to ensure that the GPU is supported
+ * by the driver and performs a reset on the device so that it is in a known
+ * state before the device is used.
+ *
+ * Return: 0 if the device is supported and successfully reset.
+ */
+int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags);
+
+/**
+ * kbase_pm_reset_done - The GPU has been reset successfully.
+ *
+ * This function must be called by the GPU interrupt handler when the
+ * RESET_COMPLETED bit is set. It signals to the power management initialization
+ * code that the GPU has been successfully reset.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_reset_done(struct kbase_device *kbdev);
+
+
+/**
+ * kbase_pm_check_transitions_nolock - Check if there are any power transitions
+ * to make, and if so start them.
+ *
+ * This function will check the desired_xx_state members of
+ * struct kbase_pm_device_data and the actual status of the hardware to see if
+ * any power transitions can be made at this time to make the hardware state
+ * closer to the state desired by the power policy.
+ *
+ * The return value can be used to check whether all the desired cores are
+ * available, and so whether it's worth submitting a job (e.g. from a Power
+ * Management IRQ).
+ *
+ * Note that this still returns true when desired_xx_state has no
+ * cores. That is: of the no cores desired, none were *un*available. In
+ * this case, the caller may still need to try submitting jobs. This is because
+ * the Core Availability Policy might have taken us to an intermediate state
+ * where no cores are powered, before powering on more cores (e.g. for core
+ * rotation)
+ *
+ * The caller must hold kbase_device.pm.power_change_lock
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: non-zero when all desired cores are available. That is,
+ * it's worthwhile for the caller to submit a job.
+ * false otherwise
+ */
+bool kbase_pm_check_transitions_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_check_transitions_sync - Synchronous and locking variant of
+ * kbase_pm_check_transitions_nolock()
+ *
+ * On returning, the desired state at the time of the call will have been met.
+ *
+ * There is nothing to stop the core being switched off by calls to
+ * kbase_pm_release_cores() or kbase_pm_unrequest_cores(). Therefore, the
+ * caller must have already made a call to
+ * kbase_pm_request_cores()/kbase_pm_request_cores_sync() previously.
+ *
+ * The usual use-case for this is to ensure cores are 'READY' after performing
+ * a GPU Reset.
+ *
+ * Unlike kbase_pm_check_transitions_nolock(), the caller must not hold
+ * kbase_device.pm.power_change_lock, because this function will take that
+ * lock itself.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_check_transitions_sync(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_cores_state_nolock - Variant of kbase_pm_update_cores_state()
+ * where the caller must hold
+ * kbase_device.pm.power_change_lock
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_cores_state - Update the desired state of shader cores from
+ * the Power Policy, and begin any power
+ * transitions.
+ *
+ * This function will update the desired_xx_state members of
+ * struct kbase_pm_device_data by calling into the current Power Policy. It will
+ * then begin power transitions to make the hardware acheive the desired shader
+ * core state.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_update_cores_state(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_cancel_deferred_poweroff - Cancel any pending requests to power off
+ * the GPU and/or shader cores.
+ *
+ * This should be called by any functions which directly power off the GPU.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev);
+
+/**
+ * kbasep_pm_init_core_use_bitmaps - Initialise data tracking the required
+ * and used cores.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbasep_pm_init_core_use_bitmaps(struct kbase_device *kbdev);
+
+/**
+ * kbasep_pm_metrics_init - Initialize the metrics gathering framework.
+ *
+ * This must be called before other metric gathering APIs are called.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: 0 on success, error code on error
+ */
+int kbasep_pm_metrics_init(struct kbase_device *kbdev);
+
+/**
+ * kbasep_pm_metrics_term - Terminate the metrics gathering framework.
+ *
+ * This must be called when metric gathering is no longer required. It is an
+ * error to call any metrics gathering function (other than
+ * kbasep_pm_metrics_init()) after calling this function.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbasep_pm_metrics_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_report_vsync - Function to be called by the frame buffer driver to
+ * update the vsync metric.
+ *
+ * This function should be called by the frame buffer driver to update whether
+ * the system is hitting the vsync target or not. buffer_updated should be true
+ * if the vsync corresponded with a new frame being displayed, otherwise it
+ * should be false. This function does not need to be called every vsync, but
+ * only when the value of @buffer_updated differs from a previous call.
+ *
+ * @kbdev: The kbase device structure for the device (must be a
+ * valid pointer)
+ * @buffer_updated: True if the buffer has been updated on this VSync,
+ * false otherwise
+ */
+void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated);
+
+/**
+ * kbase_pm_get_dvfs_action - Determine whether the DVFS system should change
+ * the clock speed of the GPU.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This function should be called regularly by the DVFS system to check whether
+ * the clock speed of the GPU needs updating.
+ */
+void kbase_pm_get_dvfs_action(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_request_gpu_cycle_counter - Mark that the GPU cycle counter is
+ * needed
+ *
+ * If the caller is the first caller then the GPU cycle counters will be enabled
+ * along with the l2 cache
+ *
+ * The GPU must be powered when calling this function (i.e.
+ * kbase_pm_context_active() must have been called).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_request_gpu_cycle_counter_l2_is_on - Mark GPU cycle counter is
+ * needed (l2 cache already on)
+ *
+ * This is a version of the above function
+ * (kbase_pm_request_gpu_cycle_counter()) suitable for being called when the
+ * l2 cache is known to be on and assured to be on until the subsequent call of
+ * kbase_pm_release_gpu_cycle_counter() such as when a job is submitted. It does
+ * not sleep and can be called from atomic functions.
+ *
+ * The GPU must be powered when calling this function (i.e.
+ * kbase_pm_context_active() must have been called) and the l2 cache must be
+ * powered on.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_release_gpu_cycle_counter - Mark that the GPU cycle counter is no
+ * longer in use
+ *
+ * If the caller is the last caller then the GPU cycle counters will be
+ * disabled. A request must have been made before a call to this.
+ *
+ * Caller must not hold the hwaccess_lock, as it will be taken in this function.
+ * If the caller is already holding this lock then
+ * kbase_pm_release_gpu_cycle_counter_nolock() must be used instead.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_release_gpu_cycle_counter_nolock - Version of kbase_pm_release_gpu_cycle_counter()
+ * that does not take hwaccess_lock
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_wait_for_poweroff_complete - Wait for the poweroff workqueue to
+ * complete
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_register_access_enable - Enable access to GPU registers
+ *
+ * Enables access to the GPU registers before power management has powered up
+ * the GPU with kbase_pm_powerup().
+ *
+ * Access to registers should be done using kbase_os_reg_read()/write() at this
+ * stage, not kbase_reg_read()/write().
+ *
+ * This results in the power management callbacks provided in the driver
+ * configuration to get called to turn on power and/or clocks to the GPU. See
+ * kbase_pm_callback_conf.
+ *
+ * This should only be used before power management is powered up with
+ * kbase_pm_powerup()
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_register_access_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_register_access_disable - Disable early register access
+ *
+ * Disables access to the GPU registers enabled earlier by a call to
+ * kbase_pm_register_access_enable().
+ *
+ * This results in the power management callbacks provided in the driver
+ * configuration to get called to turn off power and/or clocks to the GPU. See
+ * kbase_pm_callback_conf
+ *
+ * This should only be used before power management is powered up with
+ * kbase_pm_powerup()
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_register_access_disable(struct kbase_device *kbdev);
+
+/* NOTE: kbase_pm_is_suspending is in mali_kbase.h, because it is an inline
+ * function */
+
+/**
+ * kbase_pm_metrics_is_active - Check if the power management metrics
+ * collection is active.
+ *
+ * Note that this returns if the power management metrics collection was
+ * active at the time of calling, it is possible that after the call the metrics
+ * collection enable may have changed state.
+ *
+ * The caller must handle the consequence that the state may have changed.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * Return: true if metrics collection was active else false.
+ */
+bool kbase_pm_metrics_is_active(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_do_poweron - Power on the GPU, and any cores that are requested.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid
+ * pointer)
+ * @is_resume: true if power on due to resume after suspend,
+ * false otherwise
+ */
+void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume);
+
+/**
+ * kbase_pm_do_poweroff - Power off the GPU, and any cores that have been
+ * requested.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid
+ * pointer)
+ * @is_suspend: true if power off due to suspend,
+ * false otherwise
+ */
+void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend);
+
+#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
+void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
+ unsigned long *total, unsigned long *busy);
+void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev);
+#endif /* defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS) */
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+
+/**
+ * kbase_platform_dvfs_event - Report utilisation to DVFS code
+ *
+ * Function provided by platform specific code when DVFS is enabled to allow
+ * the power management metrics system to report utilisation.
+ *
+ * @kbdev: The kbase device structure for the device (must be a
+ * valid pointer)
+ * @utilisation: The current calculated utilisation by the metrics system.
+ * @util_gl_share: The current calculated gl share of utilisation.
+ * @util_cl_share: The current calculated cl share of utilisation per core
+ * group.
+ * Return: Returns 0 on failure and non zero on success.
+ */
+
+int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
+ u32 util_gl_share, u32 util_cl_share[2]);
+#endif
+
+void kbase_pm_power_changed(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_metrics_update - Inform the metrics system that an atom is either
+ * about to be run or has just completed.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @now: Pointer to the timestamp of the change, or NULL to use current time
+ *
+ * Caller must hold hwaccess_lock
+ */
+void kbase_pm_metrics_update(struct kbase_device *kbdev,
+ ktime_t *now);
+
+/**
+ * kbase_pm_cache_snoop_enable - Allow CPU snoops on the GPU
+ * If the GPU does not have coherency this is a no-op
+ * @kbdev: Device pointer
+ *
+ * This function should be called after L2 power up.
+ */
+
+void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_cache_snoop_disable - Prevent CPU snoops on the GPU
+ * If the GPU does not have coherency this is a no-op
+ * @kbdev: Device pointer
+ *
+ * This function should be called before L2 power off.
+ */
+void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev);
+
+#endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_metrics.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_metrics.c
new file mode 100644
index 000000000000..024248ca7123
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_metrics.c
@@ -0,0 +1,401 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Metrics for power management
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <backend/gpu/mali_kbase_jm_rb.h>
+
+/* When VSync is being hit aim for utilisation between 70-90% */
+#define KBASE_PM_VSYNC_MIN_UTILISATION 70
+#define KBASE_PM_VSYNC_MAX_UTILISATION 90
+/* Otherwise aim for 10-40% */
+#define KBASE_PM_NO_VSYNC_MIN_UTILISATION 10
+#define KBASE_PM_NO_VSYNC_MAX_UTILISATION 40
+
+/* Shift used for kbasep_pm_metrics_data.time_busy/idle - units of (1 << 8) ns
+ * This gives a maximum period between samples of 2^(32+8)/100 ns = slightly
+ * under 11s. Exceeding this will cause overflow */
+#define KBASE_PM_TIME_SHIFT 8
+
+/* Maximum time between sampling of utilization data, without resetting the
+ * counters. */
+#define MALI_UTILIZATION_MAX_PERIOD 100000 /* ns = 100ms */
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
+{
+ unsigned long flags;
+ struct kbasep_pm_metrics_data *metrics;
+
+ KBASE_DEBUG_ASSERT(timer != NULL);
+
+ metrics = container_of(timer, struct kbasep_pm_metrics_data, timer);
+ kbase_pm_get_dvfs_action(metrics->kbdev);
+
+ spin_lock_irqsave(&metrics->lock, flags);
+
+ if (metrics->timer_active)
+ hrtimer_start(timer,
+ HR_TIMER_DELAY_MSEC(metrics->kbdev->pm.dvfs_period),
+ HRTIMER_MODE_REL);
+
+ spin_unlock_irqrestore(&metrics->lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+int kbasep_pm_metrics_init(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ kbdev->pm.backend.metrics.kbdev = kbdev;
+
+ kbdev->pm.backend.metrics.time_period_start = ktime_get();
+ kbdev->pm.backend.metrics.time_busy = 0;
+ kbdev->pm.backend.metrics.time_idle = 0;
+ kbdev->pm.backend.metrics.prev_busy = 0;
+ kbdev->pm.backend.metrics.prev_idle = 0;
+ kbdev->pm.backend.metrics.gpu_active = false;
+ kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
+ kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
+ kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
+ kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
+ kbdev->pm.backend.metrics.busy_cl[0] = 0;
+ kbdev->pm.backend.metrics.busy_cl[1] = 0;
+ kbdev->pm.backend.metrics.busy_gl = 0;
+
+ spin_lock_init(&kbdev->pm.backend.metrics.lock);
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ kbdev->pm.backend.metrics.timer_active = true;
+ hrtimer_init(&kbdev->pm.backend.metrics.timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ kbdev->pm.backend.metrics.timer.function = dvfs_callback;
+
+ hrtimer_start(&kbdev->pm.backend.metrics.timer,
+ HR_TIMER_DELAY_MSEC(kbdev->pm.dvfs_period),
+ HRTIMER_MODE_REL);
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbasep_pm_metrics_init);
+
+void kbasep_pm_metrics_term(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+ kbdev->pm.backend.metrics.timer_active = false;
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+
+ hrtimer_cancel(&kbdev->pm.backend.metrics.timer);
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+}
+
+KBASE_EXPORT_TEST_API(kbasep_pm_metrics_term);
+
+/* caller needs to hold kbdev->pm.backend.metrics.lock before calling this
+ * function
+ */
+static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev,
+ ktime_t now)
+{
+ ktime_t diff;
+
+ lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
+
+ diff = ktime_sub(now, kbdev->pm.backend.metrics.time_period_start);
+ if (ktime_to_ns(diff) < 0)
+ return;
+
+ if (kbdev->pm.backend.metrics.gpu_active) {
+ u32 ns_time = (u32) (ktime_to_ns(diff) >> KBASE_PM_TIME_SHIFT);
+
+ kbdev->pm.backend.metrics.time_busy += ns_time;
+ if (kbdev->pm.backend.metrics.active_cl_ctx[0])
+ kbdev->pm.backend.metrics.busy_cl[0] += ns_time;
+ if (kbdev->pm.backend.metrics.active_cl_ctx[1])
+ kbdev->pm.backend.metrics.busy_cl[1] += ns_time;
+ if (kbdev->pm.backend.metrics.active_gl_ctx[0])
+ kbdev->pm.backend.metrics.busy_gl += ns_time;
+ if (kbdev->pm.backend.metrics.active_gl_ctx[1])
+ kbdev->pm.backend.metrics.busy_gl += ns_time;
+ } else {
+ kbdev->pm.backend.metrics.time_idle += (u32) (ktime_to_ns(diff)
+ >> KBASE_PM_TIME_SHIFT);
+ }
+
+ kbdev->pm.backend.metrics.time_period_start = now;
+}
+
+#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
+/* Caller needs to hold kbdev->pm.backend.metrics.lock before calling this
+ * function.
+ */
+static void kbase_pm_reset_dvfs_utilisation_unlocked(struct kbase_device *kbdev,
+ ktime_t now)
+{
+ /* Store previous value */
+ kbdev->pm.backend.metrics.prev_idle =
+ kbdev->pm.backend.metrics.time_idle;
+ kbdev->pm.backend.metrics.prev_busy =
+ kbdev->pm.backend.metrics.time_busy;
+
+ /* Reset current values */
+ kbdev->pm.backend.metrics.time_period_start = now;
+ kbdev->pm.backend.metrics.time_idle = 0;
+ kbdev->pm.backend.metrics.time_busy = 0;
+ kbdev->pm.backend.metrics.busy_cl[0] = 0;
+ kbdev->pm.backend.metrics.busy_cl[1] = 0;
+ kbdev->pm.backend.metrics.busy_gl = 0;
+}
+
+void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+ kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, ktime_get());
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+}
+
+void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
+ unsigned long *total_out, unsigned long *busy_out)
+{
+ ktime_t now = ktime_get();
+ unsigned long flags, busy, total;
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+ kbase_pm_get_dvfs_utilisation_calc(kbdev, now);
+
+ busy = kbdev->pm.backend.metrics.time_busy;
+ total = busy + kbdev->pm.backend.metrics.time_idle;
+
+ /* Reset stats if older than MALI_UTILIZATION_MAX_PERIOD (default
+ * 100ms) */
+ if (total >= MALI_UTILIZATION_MAX_PERIOD) {
+ kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now);
+ } else if (total < (MALI_UTILIZATION_MAX_PERIOD / 2)) {
+ total += kbdev->pm.backend.metrics.prev_idle +
+ kbdev->pm.backend.metrics.prev_busy;
+ busy += kbdev->pm.backend.metrics.prev_busy;
+ }
+
+ *total_out = total;
+ *busy_out = busy;
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+}
+#endif
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+
+/* caller needs to hold kbdev->pm.backend.metrics.lock before calling this
+ * function
+ */
+int kbase_pm_get_dvfs_utilisation_old(struct kbase_device *kbdev,
+ int *util_gl_share,
+ int util_cl_share[2],
+ ktime_t now)
+{
+ int utilisation;
+ int busy;
+
+ kbase_pm_get_dvfs_utilisation_calc(kbdev, now);
+
+ if (kbdev->pm.backend.metrics.time_idle +
+ kbdev->pm.backend.metrics.time_busy == 0) {
+ /* No data - so we return NOP */
+ utilisation = -1;
+ if (util_gl_share)
+ *util_gl_share = -1;
+ if (util_cl_share) {
+ util_cl_share[0] = -1;
+ util_cl_share[1] = -1;
+ }
+ goto out;
+ }
+
+ utilisation = (100 * kbdev->pm.backend.metrics.time_busy) /
+ (kbdev->pm.backend.metrics.time_idle +
+ kbdev->pm.backend.metrics.time_busy);
+
+ busy = kbdev->pm.backend.metrics.busy_gl +
+ kbdev->pm.backend.metrics.busy_cl[0] +
+ kbdev->pm.backend.metrics.busy_cl[1];
+
+ if (busy != 0) {
+ if (util_gl_share)
+ *util_gl_share =
+ (100 * kbdev->pm.backend.metrics.busy_gl) /
+ busy;
+ if (util_cl_share) {
+ util_cl_share[0] =
+ (100 * kbdev->pm.backend.metrics.busy_cl[0]) /
+ busy;
+ util_cl_share[1] =
+ (100 * kbdev->pm.backend.metrics.busy_cl[1]) /
+ busy;
+ }
+ } else {
+ if (util_gl_share)
+ *util_gl_share = -1;
+ if (util_cl_share) {
+ util_cl_share[0] = -1;
+ util_cl_share[1] = -1;
+ }
+ }
+
+out:
+ return utilisation;
+}
+
+void kbase_pm_get_dvfs_action(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ int utilisation, util_gl_share;
+ int util_cl_share[2];
+ ktime_t now;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+
+ now = ktime_get();
+
+ utilisation = kbase_pm_get_dvfs_utilisation_old(kbdev, &util_gl_share,
+ util_cl_share, now);
+
+ if (utilisation < 0 || util_gl_share < 0 || util_cl_share[0] < 0 ||
+ util_cl_share[1] < 0) {
+ utilisation = 0;
+ util_gl_share = 0;
+ util_cl_share[0] = 0;
+ util_cl_share[1] = 0;
+ goto out;
+ }
+
+out:
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ kbase_platform_dvfs_event(kbdev, utilisation, util_gl_share,
+ util_cl_share);
+#endif /*CONFIG_MALI_MIDGARD_DVFS */
+
+ kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now);
+
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+}
+
+bool kbase_pm_metrics_is_active(struct kbase_device *kbdev)
+{
+ bool isactive;
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+ isactive = kbdev->pm.backend.metrics.timer_active;
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+
+ return isactive;
+}
+KBASE_EXPORT_TEST_API(kbase_pm_metrics_is_active);
+
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+/**
+ * kbase_pm_metrics_active_calc - Update PM active counts based on currently
+ * running atoms
+ * @kbdev: Device pointer
+ *
+ * The caller must hold kbdev->pm.backend.metrics.lock
+ */
+static void kbase_pm_metrics_active_calc(struct kbase_device *kbdev)
+{
+ int js;
+
+ lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
+
+ kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
+ kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
+ kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
+ kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
+ kbdev->pm.backend.metrics.gpu_active = false;
+
+ for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0);
+
+ /* Head atom may have just completed, so if it isn't running
+ * then try the next atom */
+ if (katom && katom->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED)
+ katom = kbase_gpu_inspect(kbdev, js, 1);
+
+ if (katom && katom->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_SUBMITTED) {
+ if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
+ int device_nr = (katom->core_req &
+ BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)
+ ? katom->device_nr : 0;
+ if (!WARN_ON(device_nr >= 2))
+ kbdev->pm.backend.metrics.
+ active_cl_ctx[device_nr] = 1;
+ } else {
+ /* Slot 2 should not be running non-compute
+ * atoms */
+ if (!WARN_ON(js >= 2))
+ kbdev->pm.backend.metrics.
+ active_gl_ctx[js] = 1;
+ }
+ kbdev->pm.backend.metrics.gpu_active = true;
+ }
+ }
+}
+
+/* called when job is submitted to or removed from a GPU slot */
+void kbase_pm_metrics_update(struct kbase_device *kbdev, ktime_t *timestamp)
+{
+ unsigned long flags;
+ ktime_t now;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+
+ if (!timestamp) {
+ now = ktime_get();
+ timestamp = &now;
+ }
+
+ /* Track how long CL and/or GL jobs have been busy for */
+ kbase_pm_get_dvfs_utilisation_calc(kbdev, *timestamp);
+
+ kbase_pm_metrics_active_calc(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+}
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_policy.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_policy.c
new file mode 100644
index 000000000000..075f020c66e6
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_policy.c
@@ -0,0 +1,973 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Power policy API implementations
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_pm.h>
+#include <mali_kbase_config_defaults.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+static const struct kbase_pm_policy *const policy_list[] = {
+#ifdef CONFIG_MALI_NO_MALI
+ &kbase_pm_always_on_policy_ops,
+ &kbase_pm_demand_policy_ops,
+ &kbase_pm_coarse_demand_policy_ops,
+#if !MALI_CUSTOMER_RELEASE
+ &kbase_pm_demand_always_powered_policy_ops,
+ &kbase_pm_fast_start_policy_ops,
+#endif
+#else /* CONFIG_MALI_NO_MALI */
+#if !PLATFORM_POWER_DOWN_ONLY
+ &kbase_pm_demand_policy_ops,
+#endif /* !PLATFORM_POWER_DOWN_ONLY */
+ &kbase_pm_coarse_demand_policy_ops,
+ &kbase_pm_always_on_policy_ops,
+#if !MALI_CUSTOMER_RELEASE
+#if !PLATFORM_POWER_DOWN_ONLY
+ &kbase_pm_demand_always_powered_policy_ops,
+ &kbase_pm_fast_start_policy_ops,
+#endif /* !PLATFORM_POWER_DOWN_ONLY */
+#endif
+#endif /* CONFIG_MALI_NO_MALI */
+};
+
+/* The number of policies available in the system.
+ * This is derived from the number of functions listed in policy_get_functions.
+ */
+#define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list))
+
+
+/* Function IDs for looking up Timeline Trace codes in
+ * kbase_pm_change_state_trace_code */
+enum kbase_pm_func_id {
+ KBASE_PM_FUNC_ID_REQUEST_CORES_START,
+ KBASE_PM_FUNC_ID_REQUEST_CORES_END,
+ KBASE_PM_FUNC_ID_RELEASE_CORES_START,
+ KBASE_PM_FUNC_ID_RELEASE_CORES_END,
+ /* Note: kbase_pm_unrequest_cores() is on the slow path, and we neither
+ * expect to hit it nor tend to hit it very much anyway. We can detect
+ * whether we need more instrumentation by a difference between
+ * PM_CHECKTRANS events and PM_SEND/HANDLE_EVENT. */
+
+ /* Must be the last */
+ KBASE_PM_FUNC_ID_COUNT
+};
+
+
+/* State changes during request/unrequest/release-ing cores */
+enum {
+ KBASE_PM_CHANGE_STATE_SHADER = (1u << 0),
+ KBASE_PM_CHANGE_STATE_TILER = (1u << 1),
+
+ /* These two must be last */
+ KBASE_PM_CHANGE_STATE_MASK = (KBASE_PM_CHANGE_STATE_TILER |
+ KBASE_PM_CHANGE_STATE_SHADER),
+ KBASE_PM_CHANGE_STATE_COUNT = KBASE_PM_CHANGE_STATE_MASK + 1
+};
+typedef u32 kbase_pm_change_state;
+
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+/* Timeline Trace code lookups for each function */
+static u32 kbase_pm_change_state_trace_code[KBASE_PM_FUNC_ID_COUNT]
+ [KBASE_PM_CHANGE_STATE_COUNT] = {
+ /* kbase_pm_request_cores */
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_START][0] = 0,
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_SHADER] =
+ SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_START,
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_START,
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_SHADER |
+ KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_START,
+
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_END][0] = 0,
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_SHADER] =
+ SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_END,
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_END,
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_SHADER |
+ KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_END,
+
+ /* kbase_pm_release_cores */
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_START][0] = 0,
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_SHADER] =
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_START,
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_START,
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_SHADER |
+ KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_START,
+
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_END][0] = 0,
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_SHADER] =
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_END,
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_END,
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_SHADER |
+ KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_END
+};
+
+static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev,
+ enum kbase_pm_func_id func_id,
+ kbase_pm_change_state state)
+{
+ int trace_code;
+
+ KBASE_DEBUG_ASSERT(func_id >= 0 && func_id < KBASE_PM_FUNC_ID_COUNT);
+ KBASE_DEBUG_ASSERT(state != 0 && (state & KBASE_PM_CHANGE_STATE_MASK) ==
+ state);
+
+ trace_code = kbase_pm_change_state_trace_code[func_id][state];
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code);
+}
+
+#else /* CONFIG_MALI_TRACE_TIMELINE */
+static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev,
+ enum kbase_pm_func_id func_id, kbase_pm_change_state state)
+{
+}
+
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
+
+/**
+ * kbasep_pm_do_poweroff_cores - Process a poweroff request and power down any
+ * requested shader cores
+ * @kbdev: Device pointer
+ */
+static void kbasep_pm_do_poweroff_cores(struct kbase_device *kbdev)
+{
+ u64 prev_shader_state = kbdev->pm.backend.desired_shader_state;
+ u64 prev_tiler_state = kbdev->pm.backend.desired_tiler_state;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ kbdev->pm.backend.desired_shader_state &=
+ ~kbdev->pm.backend.shader_poweroff_pending;
+ kbdev->pm.backend.desired_tiler_state &=
+ ~kbdev->pm.backend.tiler_poweroff_pending;
+
+ kbdev->pm.backend.shader_poweroff_pending = 0;
+ kbdev->pm.backend.tiler_poweroff_pending = 0;
+
+ if (prev_shader_state != kbdev->pm.backend.desired_shader_state ||
+ prev_tiler_state !=
+ kbdev->pm.backend.desired_tiler_state ||
+ kbdev->pm.backend.ca_in_transition) {
+ bool cores_are_available;
+
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START);
+ cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END);
+
+ /* Don't need 'cores_are_available',
+ * because we don't return anything */
+ CSTD_UNUSED(cores_are_available);
+ }
+}
+
+static enum hrtimer_restart
+kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *timer)
+{
+ struct kbase_device *kbdev;
+ unsigned long flags;
+
+ kbdev = container_of(timer, struct kbase_device,
+ pm.backend.gpu_poweroff_timer);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* It is safe for this call to do nothing if the work item is already
+ * queued. The worker function will read the must up-to-date state of
+ * kbdev->pm.backend.gpu_poweroff_pending under lock.
+ *
+ * If a state change occurs while the worker function is processing,
+ * this call will succeed as a work item can be requeued once it has
+ * started processing.
+ */
+ if (kbdev->pm.backend.gpu_poweroff_pending)
+ queue_work(kbdev->pm.backend.gpu_poweroff_wq,
+ &kbdev->pm.backend.gpu_poweroff_work);
+
+ if (kbdev->pm.backend.shader_poweroff_pending ||
+ kbdev->pm.backend.tiler_poweroff_pending) {
+ kbdev->pm.backend.shader_poweroff_pending_time--;
+
+ KBASE_DEBUG_ASSERT(
+ kbdev->pm.backend.shader_poweroff_pending_time
+ >= 0);
+
+ if (!kbdev->pm.backend.shader_poweroff_pending_time)
+ kbasep_pm_do_poweroff_cores(kbdev);
+ }
+
+ if (kbdev->pm.backend.poweroff_timer_needed) {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ hrtimer_add_expires(timer, kbdev->pm.gpu_poweroff_time);
+
+ return HRTIMER_RESTART;
+ }
+
+ kbdev->pm.backend.poweroff_timer_running = false;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+static void kbasep_pm_do_gpu_poweroff_wq(struct work_struct *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev;
+ bool do_poweroff = false;
+
+ kbdev = container_of(data, struct kbase_device,
+ pm.backend.gpu_poweroff_work);
+
+ mutex_lock(&kbdev->pm.lock);
+
+ if (kbdev->pm.backend.gpu_poweroff_pending == 0) {
+ mutex_unlock(&kbdev->pm.lock);
+ return;
+ }
+
+ kbdev->pm.backend.gpu_poweroff_pending--;
+
+ if (kbdev->pm.backend.gpu_poweroff_pending > 0) {
+ mutex_unlock(&kbdev->pm.lock);
+ return;
+ }
+
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_poweroff_pending == 0);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* Only power off the GPU if a request is still pending */
+ if (!kbdev->pm.backend.pm_current_policy->get_core_active(kbdev))
+ do_poweroff = true;
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ if (do_poweroff) {
+ kbdev->pm.backend.poweroff_timer_needed = false;
+ hrtimer_cancel(&kbdev->pm.backend.gpu_poweroff_timer);
+ kbdev->pm.backend.poweroff_timer_running = false;
+
+ /* Power off the GPU */
+ kbase_pm_do_poweroff(kbdev, false);
+ }
+
+ mutex_unlock(&kbdev->pm.lock);
+}
+
+int kbase_pm_policy_init(struct kbase_device *kbdev)
+{
+ struct workqueue_struct *wq;
+
+ wq = alloc_workqueue("kbase_pm_do_poweroff",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ if (!wq)
+ return -ENOMEM;
+
+ kbdev->pm.backend.gpu_poweroff_wq = wq;
+ INIT_WORK(&kbdev->pm.backend.gpu_poweroff_work,
+ kbasep_pm_do_gpu_poweroff_wq);
+ hrtimer_init(&kbdev->pm.backend.gpu_poweroff_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ kbdev->pm.backend.gpu_poweroff_timer.function =
+ kbasep_pm_do_gpu_poweroff_callback;
+ kbdev->pm.backend.pm_current_policy = policy_list[0];
+ kbdev->pm.backend.pm_current_policy->init(kbdev);
+ kbdev->pm.gpu_poweroff_time =
+ HR_TIMER_DELAY_NSEC(DEFAULT_PM_GPU_POWEROFF_TICK_NS);
+ kbdev->pm.poweroff_shader_ticks = DEFAULT_PM_POWEROFF_TICK_SHADER;
+ kbdev->pm.poweroff_gpu_ticks = DEFAULT_PM_POWEROFF_TICK_GPU;
+
+ return 0;
+}
+
+void kbase_pm_policy_term(struct kbase_device *kbdev)
+{
+ kbdev->pm.backend.pm_current_policy->term(kbdev);
+ destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wq);
+}
+
+void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ kbdev->pm.backend.poweroff_timer_needed = false;
+ hrtimer_cancel(&kbdev->pm.backend.gpu_poweroff_timer);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbdev->pm.backend.poweroff_timer_running = false;
+
+ /* If wq is already running but is held off by pm.lock, make sure it has
+ * no effect */
+ kbdev->pm.backend.gpu_poweroff_pending = 0;
+
+ kbdev->pm.backend.shader_poweroff_pending = 0;
+ kbdev->pm.backend.tiler_poweroff_pending = 0;
+ kbdev->pm.backend.shader_poweroff_pending_time = 0;
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_pm_update_active(struct kbase_device *kbdev)
+{
+ struct kbase_pm_device_data *pm = &kbdev->pm;
+ struct kbase_pm_backend_data *backend = &pm->backend;
+ unsigned long flags;
+ bool active;
+
+ lockdep_assert_held(&pm->lock);
+
+ /* pm_current_policy will never be NULL while pm.lock is held */
+ KBASE_DEBUG_ASSERT(backend->pm_current_policy);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ active = backend->pm_current_policy->get_core_active(kbdev);
+
+ if (active) {
+ if (backend->gpu_poweroff_pending) {
+ /* Cancel any pending power off request */
+ backend->gpu_poweroff_pending = 0;
+
+ /* If a request was pending then the GPU was still
+ * powered, so no need to continue */
+ if (!kbdev->poweroff_pending) {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ flags);
+ return;
+ }
+ }
+
+ if (!backend->poweroff_timer_running && !backend->gpu_powered &&
+ (pm->poweroff_gpu_ticks ||
+ pm->poweroff_shader_ticks)) {
+ backend->poweroff_timer_needed = true;
+ backend->poweroff_timer_running = true;
+ hrtimer_start(&backend->gpu_poweroff_timer,
+ pm->gpu_poweroff_time,
+ HRTIMER_MODE_REL);
+ }
+
+ /* Power on the GPU and any cores requested by the policy */
+ if (pm->backend.poweroff_wait_in_progress) {
+ pm->backend.poweron_required = true;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ kbase_pm_do_poweron(kbdev, false);
+ }
+ } else {
+ /* It is an error for the power policy to power off the GPU
+ * when there are contexts active */
+ KBASE_DEBUG_ASSERT(pm->active_count == 0);
+
+ if (backend->shader_poweroff_pending ||
+ backend->tiler_poweroff_pending) {
+ backend->shader_poweroff_pending = 0;
+ backend->tiler_poweroff_pending = 0;
+ backend->shader_poweroff_pending_time = 0;
+ }
+
+ /* Request power off */
+ if (pm->backend.gpu_powered) {
+ if (pm->poweroff_gpu_ticks) {
+ backend->gpu_poweroff_pending =
+ pm->poweroff_gpu_ticks;
+ backend->poweroff_timer_needed = true;
+ if (!backend->poweroff_timer_running) {
+ /* Start timer if not running (eg if
+ * power policy has been changed from
+ * always_on to something else). This
+ * will ensure the GPU is actually
+ * powered off */
+ backend->poweroff_timer_running
+ = true;
+ hrtimer_start(
+ &backend->gpu_poweroff_timer,
+ pm->gpu_poweroff_time,
+ HRTIMER_MODE_REL);
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ flags);
+ } else {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ flags);
+
+ /* Power off the GPU immediately */
+ kbase_pm_do_poweroff(kbdev, false);
+ }
+ } else {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+ }
+}
+
+void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
+{
+ u64 desired_bitmap;
+ u64 desired_tiler_bitmap;
+ bool cores_are_available;
+ bool do_poweroff = false;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (kbdev->pm.backend.pm_current_policy == NULL)
+ return;
+ if (kbdev->pm.backend.poweroff_wait_in_progress)
+ return;
+
+ if (kbdev->protected_mode_transition && !kbdev->shader_needed_bitmap &&
+ !kbdev->shader_inuse_bitmap && !kbdev->tiler_needed_cnt
+ && !kbdev->tiler_inuse_cnt) {
+ /* We are trying to change in/out of protected mode - force all
+ * cores off so that the L2 powers down */
+ desired_bitmap = 0;
+ desired_tiler_bitmap = 0;
+ } else {
+ desired_bitmap =
+ kbdev->pm.backend.pm_current_policy->get_core_mask(kbdev);
+ desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev);
+
+ if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0)
+ desired_tiler_bitmap = 1;
+ else
+ desired_tiler_bitmap = 0;
+
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY)) {
+ /* Unless XAFFINITY is supported, enable core 0 if tiler
+ * required, regardless of core availability */
+ if (kbdev->tiler_needed_cnt > 0 ||
+ kbdev->tiler_inuse_cnt > 0)
+ desired_bitmap |= 1;
+ }
+ }
+
+ if (kbdev->pm.backend.desired_shader_state != desired_bitmap)
+ KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, NULL, 0u,
+ (u32)desired_bitmap);
+ /* Are any cores being powered on? */
+ if (~kbdev->pm.backend.desired_shader_state & desired_bitmap ||
+ ~kbdev->pm.backend.desired_tiler_state & desired_tiler_bitmap ||
+ kbdev->pm.backend.ca_in_transition) {
+ /* Check if we are powering off any cores before updating shader
+ * state */
+ if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap ||
+ kbdev->pm.backend.desired_tiler_state &
+ ~desired_tiler_bitmap) {
+ /* Start timer to power off cores */
+ kbdev->pm.backend.shader_poweroff_pending |=
+ (kbdev->pm.backend.desired_shader_state &
+ ~desired_bitmap);
+ kbdev->pm.backend.tiler_poweroff_pending |=
+ (kbdev->pm.backend.desired_tiler_state &
+ ~desired_tiler_bitmap);
+
+ if (kbdev->pm.poweroff_shader_ticks &&
+ !kbdev->protected_mode_transition)
+ kbdev->pm.backend.shader_poweroff_pending_time =
+ kbdev->pm.poweroff_shader_ticks;
+ else
+ do_poweroff = true;
+ }
+
+ kbdev->pm.backend.desired_shader_state = desired_bitmap;
+ kbdev->pm.backend.desired_tiler_state = desired_tiler_bitmap;
+
+ /* If any cores are being powered on, transition immediately */
+ cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
+ } else if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap ||
+ kbdev->pm.backend.desired_tiler_state &
+ ~desired_tiler_bitmap) {
+ /* Start timer to power off cores */
+ kbdev->pm.backend.shader_poweroff_pending |=
+ (kbdev->pm.backend.desired_shader_state &
+ ~desired_bitmap);
+ kbdev->pm.backend.tiler_poweroff_pending |=
+ (kbdev->pm.backend.desired_tiler_state &
+ ~desired_tiler_bitmap);
+ if (kbdev->pm.poweroff_shader_ticks &&
+ !kbdev->protected_mode_transition)
+ kbdev->pm.backend.shader_poweroff_pending_time =
+ kbdev->pm.poweroff_shader_ticks;
+ else
+ kbasep_pm_do_poweroff_cores(kbdev);
+ } else if (kbdev->pm.active_count == 0 && desired_bitmap != 0 &&
+ desired_tiler_bitmap != 0 &&
+ kbdev->pm.backend.poweroff_timer_needed) {
+ /* If power policy is keeping cores on despite there being no
+ * active contexts then disable poweroff timer as it isn't
+ * required.
+ * Only reset poweroff_timer_needed if we're not in the middle
+ * of the power off callback */
+ kbdev->pm.backend.poweroff_timer_needed = false;
+ }
+
+ /* Ensure timer does not power off wanted cores and make sure to power
+ * off unwanted cores */
+ if (kbdev->pm.backend.shader_poweroff_pending ||
+ kbdev->pm.backend.tiler_poweroff_pending) {
+ kbdev->pm.backend.shader_poweroff_pending &=
+ ~(kbdev->pm.backend.desired_shader_state &
+ desired_bitmap);
+ kbdev->pm.backend.tiler_poweroff_pending &=
+ ~(kbdev->pm.backend.desired_tiler_state &
+ desired_tiler_bitmap);
+
+ if (!kbdev->pm.backend.shader_poweroff_pending &&
+ !kbdev->pm.backend.tiler_poweroff_pending)
+ kbdev->pm.backend.shader_poweroff_pending_time = 0;
+ }
+
+ /* Shader poweroff is deferred to the end of the function, to eliminate
+ * issues caused by the core availability policy recursing into this
+ * function */
+ if (do_poweroff)
+ kbasep_pm_do_poweroff_cores(kbdev);
+
+ /* Don't need 'cores_are_available', because we don't return anything */
+ CSTD_UNUSED(cores_are_available);
+}
+
+void kbase_pm_update_cores_state(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+int kbase_pm_list_policies(const struct kbase_pm_policy * const **list)
+{
+ if (!list)
+ return POLICY_COUNT;
+
+ *list = policy_list;
+
+ return POLICY_COUNT;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_list_policies);
+
+const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ return kbdev->pm.backend.pm_current_policy;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_policy);
+
+void kbase_pm_set_policy(struct kbase_device *kbdev,
+ const struct kbase_pm_policy *new_policy)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ const struct kbase_pm_policy *old_policy;
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(new_policy != NULL);
+
+ KBASE_TRACE_ADD(kbdev, PM_SET_POLICY, NULL, NULL, 0u, new_policy->id);
+
+ /* During a policy change we pretend the GPU is active */
+ /* A suspend won't happen here, because we're in a syscall from a
+ * userspace thread */
+ kbase_pm_context_active(kbdev);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+ /* Remove the policy to prevent IRQ handlers from working on it */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ old_policy = kbdev->pm.backend.pm_current_policy;
+ kbdev->pm.backend.pm_current_policy = NULL;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_TERM, NULL, NULL, 0u,
+ old_policy->id);
+ if (old_policy->term)
+ old_policy->term(kbdev);
+
+ KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_INIT, NULL, NULL, 0u,
+ new_policy->id);
+ if (new_policy->init)
+ new_policy->init(kbdev);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbdev->pm.backend.pm_current_policy = new_policy;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ /* If any core power state changes were previously attempted, but
+ * couldn't be made because the policy was changing (current_policy was
+ * NULL), then re-try them here. */
+ kbase_pm_update_active(kbdev);
+ kbase_pm_update_cores_state(kbdev);
+
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ /* Now the policy change is finished, we release our fake context active
+ * reference */
+ kbase_pm_context_idle(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_set_policy);
+
+/* Check whether a state change has finished, and trace it as completed */
+static void
+kbase_pm_trace_check_and_finish_state_change(struct kbase_device *kbdev)
+{
+ if ((kbdev->shader_available_bitmap &
+ kbdev->pm.backend.desired_shader_state)
+ == kbdev->pm.backend.desired_shader_state &&
+ (kbdev->tiler_available_bitmap &
+ kbdev->pm.backend.desired_tiler_state)
+ == kbdev->pm.backend.desired_tiler_state)
+ kbase_timeline_pm_check_handle_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+}
+
+void kbase_pm_request_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores)
+{
+ u64 cores;
+
+ kbase_pm_change_state change_gpu_state = 0u;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ cores = shader_cores;
+ while (cores) {
+ int bitnum = fls64(cores) - 1;
+ u64 bit = 1ULL << bitnum;
+
+ /* It should be almost impossible for this to overflow. It would
+ * require 2^32 atoms to request a particular core, which would
+ * require 2^24 contexts to submit. This would require an amount
+ * of memory that is impossible on a 32-bit system and extremely
+ * unlikely on a 64-bit system. */
+ int cnt = ++kbdev->shader_needed_cnt[bitnum];
+
+ if (1 == cnt) {
+ kbdev->shader_needed_bitmap |= bit;
+ change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER;
+ }
+
+ cores &= ~bit;
+ }
+
+ if (tiler_required) {
+ int cnt = ++kbdev->tiler_needed_cnt;
+
+ if (1 == cnt)
+ change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER;
+
+ KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt != 0);
+ }
+
+ if (change_gpu_state) {
+ KBASE_TRACE_ADD(kbdev, PM_REQUEST_CHANGE_SHADER_NEEDED, NULL,
+ NULL, 0u, (u32) kbdev->shader_needed_bitmap);
+
+ kbase_timeline_pm_cores_func(kbdev,
+ KBASE_PM_FUNC_ID_REQUEST_CORES_START,
+ change_gpu_state);
+ kbase_pm_update_cores_state_nolock(kbdev);
+ kbase_timeline_pm_cores_func(kbdev,
+ KBASE_PM_FUNC_ID_REQUEST_CORES_END,
+ change_gpu_state);
+ }
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_cores);
+
+void kbase_pm_unrequest_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores)
+{
+ kbase_pm_change_state change_gpu_state = 0u;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ while (shader_cores) {
+ int bitnum = fls64(shader_cores) - 1;
+ u64 bit = 1ULL << bitnum;
+ int cnt;
+
+ KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt[bitnum] > 0);
+
+ cnt = --kbdev->shader_needed_cnt[bitnum];
+
+ if (0 == cnt) {
+ kbdev->shader_needed_bitmap &= ~bit;
+
+ change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER;
+ }
+
+ shader_cores &= ~bit;
+ }
+
+ if (tiler_required) {
+ int cnt;
+
+ KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt > 0);
+
+ cnt = --kbdev->tiler_needed_cnt;
+
+ if (0 == cnt)
+ change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER;
+ }
+
+ if (change_gpu_state) {
+ KBASE_TRACE_ADD(kbdev, PM_UNREQUEST_CHANGE_SHADER_NEEDED, NULL,
+ NULL, 0u, (u32) kbdev->shader_needed_bitmap);
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+
+ /* Trace that any state change effectively completes immediately
+ * - no-one will wait on the state change */
+ kbase_pm_trace_check_and_finish_state_change(kbdev);
+ }
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_unrequest_cores);
+
+enum kbase_pm_cores_ready
+kbase_pm_register_inuse_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores)
+{
+ u64 prev_shader_needed; /* Just for tracing */
+ u64 prev_shader_inuse; /* Just for tracing */
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ prev_shader_needed = kbdev->shader_needed_bitmap;
+ prev_shader_inuse = kbdev->shader_inuse_bitmap;
+
+ /* If desired_shader_state does not contain the requested cores, then
+ * power management is not attempting to powering those cores (most
+ * likely due to core availability policy) and a new job affinity must
+ * be chosen */
+ if ((kbdev->pm.backend.desired_shader_state & shader_cores) !=
+ shader_cores) {
+ return (kbdev->pm.backend.poweroff_wait_in_progress ||
+ kbdev->pm.backend.pm_current_policy == NULL) ?
+ KBASE_CORES_NOT_READY : KBASE_NEW_AFFINITY;
+ }
+
+ if ((kbdev->shader_available_bitmap & shader_cores) != shader_cores ||
+ (tiler_required && !kbdev->tiler_available_bitmap)) {
+ /* Trace ongoing core transition */
+ kbase_timeline_pm_l2_transition_start(kbdev);
+ return KBASE_CORES_NOT_READY;
+ }
+
+ /* If we started to trace a state change, then trace it has being
+ * finished by now, at the very latest */
+ kbase_pm_trace_check_and_finish_state_change(kbdev);
+ /* Trace core transition done */
+ kbase_timeline_pm_l2_transition_done(kbdev);
+
+ while (shader_cores) {
+ int bitnum = fls64(shader_cores) - 1;
+ u64 bit = 1ULL << bitnum;
+ int cnt;
+
+ KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt[bitnum] > 0);
+
+ cnt = --kbdev->shader_needed_cnt[bitnum];
+
+ if (0 == cnt)
+ kbdev->shader_needed_bitmap &= ~bit;
+
+ /* shader_inuse_cnt should not overflow because there can only
+ * be a very limited number of jobs on the h/w at one time */
+
+ kbdev->shader_inuse_cnt[bitnum]++;
+ kbdev->shader_inuse_bitmap |= bit;
+
+ shader_cores &= ~bit;
+ }
+
+ if (tiler_required) {
+ KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt > 0);
+
+ --kbdev->tiler_needed_cnt;
+
+ kbdev->tiler_inuse_cnt++;
+
+ KBASE_DEBUG_ASSERT(kbdev->tiler_inuse_cnt != 0);
+ }
+
+ if (prev_shader_needed != kbdev->shader_needed_bitmap)
+ KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_NEEDED, NULL,
+ NULL, 0u, (u32) kbdev->shader_needed_bitmap);
+
+ if (prev_shader_inuse != kbdev->shader_inuse_bitmap)
+ KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_INUSE, NULL,
+ NULL, 0u, (u32) kbdev->shader_inuse_bitmap);
+
+ return KBASE_CORES_READY;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_register_inuse_cores);
+
+void kbase_pm_release_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores)
+{
+ kbase_pm_change_state change_gpu_state = 0u;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ while (shader_cores) {
+ int bitnum = fls64(shader_cores) - 1;
+ u64 bit = 1ULL << bitnum;
+ int cnt;
+
+ KBASE_DEBUG_ASSERT(kbdev->shader_inuse_cnt[bitnum] > 0);
+
+ cnt = --kbdev->shader_inuse_cnt[bitnum];
+
+ if (0 == cnt) {
+ kbdev->shader_inuse_bitmap &= ~bit;
+ change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER;
+ }
+
+ shader_cores &= ~bit;
+ }
+
+ if (tiler_required) {
+ int cnt;
+
+ KBASE_DEBUG_ASSERT(kbdev->tiler_inuse_cnt > 0);
+
+ cnt = --kbdev->tiler_inuse_cnt;
+
+ if (0 == cnt)
+ change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER;
+ }
+
+ if (change_gpu_state) {
+ KBASE_TRACE_ADD(kbdev, PM_RELEASE_CHANGE_SHADER_INUSE, NULL,
+ NULL, 0u, (u32) kbdev->shader_inuse_bitmap);
+
+ kbase_timeline_pm_cores_func(kbdev,
+ KBASE_PM_FUNC_ID_RELEASE_CORES_START,
+ change_gpu_state);
+ kbase_pm_update_cores_state_nolock(kbdev);
+ kbase_timeline_pm_cores_func(kbdev,
+ KBASE_PM_FUNC_ID_RELEASE_CORES_END,
+ change_gpu_state);
+
+ /* Trace that any state change completed immediately */
+ kbase_pm_trace_check_and_finish_state_change(kbdev);
+ }
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_release_cores);
+
+void kbase_pm_request_cores_sync(struct kbase_device *kbdev,
+ bool tiler_required,
+ u64 shader_cores)
+{
+ unsigned long flags;
+
+ kbase_pm_wait_for_poweroff_complete(kbdev);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_pm_request_cores(kbdev, tiler_required, shader_cores);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ kbase_pm_check_transitions_sync(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_cores_sync);
+
+void kbase_pm_request_l2_caches(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ u32 prior_l2_users_count;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ prior_l2_users_count = kbdev->l2_users_count++;
+
+ KBASE_DEBUG_ASSERT(kbdev->l2_users_count != 0);
+
+ /* if the GPU is reset while the l2 is on, l2 will be off but
+ * prior_l2_users_count will be > 0. l2_available_bitmap will have been
+ * set to 0 though by kbase_pm_init_hw */
+ if (!prior_l2_users_count || !kbdev->l2_available_bitmap)
+ kbase_pm_check_transitions_nolock(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ wait_event(kbdev->pm.backend.l2_powered_wait,
+ kbdev->pm.backend.l2_powered == 1);
+
+ /* Trace that any state change completed immediately */
+ kbase_pm_trace_check_and_finish_state_change(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches);
+
+void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ kbdev->l2_users_count++;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches_l2_is_on);
+
+void kbase_pm_release_l2_caches(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ KBASE_DEBUG_ASSERT(kbdev->l2_users_count > 0);
+
+ --kbdev->l2_users_count;
+
+ if (!kbdev->l2_users_count) {
+ kbase_pm_check_transitions_nolock(kbdev);
+ /* Trace that any state change completed immediately */
+ kbase_pm_trace_check_and_finish_state_change(kbdev);
+ }
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_release_l2_caches);
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_policy.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_policy.h
new file mode 100644
index 000000000000..611a90e66e65
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_pm_policy.h
@@ -0,0 +1,227 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Power policy API definitions
+ */
+
+#ifndef _KBASE_PM_POLICY_H_
+#define _KBASE_PM_POLICY_H_
+
+/**
+ * kbase_pm_policy_init - Initialize power policy framework
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Must be called before calling any other policy function
+ *
+ * Return: 0 if the power policy framework was successfully
+ * initialized, -errno otherwise.
+ */
+int kbase_pm_policy_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_policy_term - Terminate power policy framework
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_policy_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_active - Update the active power state of the GPU
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Calls into the current power policy
+ */
+void kbase_pm_update_active(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_cores - Update the desired core state of the GPU
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Calls into the current power policy
+ */
+void kbase_pm_update_cores(struct kbase_device *kbdev);
+
+
+enum kbase_pm_cores_ready {
+ KBASE_CORES_NOT_READY = 0,
+ KBASE_NEW_AFFINITY = 1,
+ KBASE_CORES_READY = 2
+};
+
+
+/**
+ * kbase_pm_request_cores_sync - Synchronous variant of kbase_pm_request_cores()
+ *
+ * @kbdev: The kbase device structure for the device
+ * @tiler_required: true if the tiler is required, false otherwise
+ * @shader_cores: A bitmask of shader cores which are necessary for the job
+ *
+ * When this function returns, the @shader_cores will be in the READY state.
+ *
+ * This is safe variant of kbase_pm_check_transitions_sync(): it handles the
+ * work of ensuring the requested cores will remain powered until a matching
+ * call to kbase_pm_unrequest_cores()/kbase_pm_release_cores() (as appropriate)
+ * is made.
+ */
+void kbase_pm_request_cores_sync(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores);
+
+/**
+ * kbase_pm_request_cores - Mark one or more cores as being required
+ * for jobs to be submitted
+ *
+ * @kbdev: The kbase device structure for the device
+ * @tiler_required: true if the tiler is required, false otherwise
+ * @shader_cores: A bitmask of shader cores which are necessary for the job
+ *
+ * This function is called by the job scheduler to mark one or more cores as
+ * being required to submit jobs that are ready to run.
+ *
+ * The cores requested are reference counted and a subsequent call to
+ * kbase_pm_register_inuse_cores() or kbase_pm_unrequest_cores() should be
+ * made to dereference the cores as being 'needed'.
+ *
+ * The active power policy will meet or exceed the requirements of the
+ * requested cores in the system. Any core transitions needed will be begun
+ * immediately, but they might not complete/the cores might not be available
+ * until a Power Management IRQ.
+ *
+ * Return: 0 if the cores were successfully requested, or -errno otherwise.
+ */
+void kbase_pm_request_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores);
+
+/**
+ * kbase_pm_unrequest_cores - Unmark one or more cores as being required for
+ * jobs to be submitted.
+ *
+ * @kbdev: The kbase device structure for the device
+ * @tiler_required: true if the tiler is required, false otherwise
+ * @shader_cores: A bitmask of shader cores (as given to
+ * kbase_pm_request_cores() )
+ *
+ * This function undoes the effect of kbase_pm_request_cores(). It should be
+ * used when a job is not going to be submitted to the hardware (e.g. the job is
+ * cancelled before it is enqueued).
+ *
+ * The active power policy will meet or exceed the requirements of the
+ * requested cores in the system. Any core transitions needed will be begun
+ * immediately, but they might not complete until a Power Management IRQ.
+ *
+ * The policy may use this as an indication that it can power down cores.
+ */
+void kbase_pm_unrequest_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores);
+
+/**
+ * kbase_pm_register_inuse_cores - Register a set of cores as in use by a job
+ *
+ * @kbdev: The kbase device structure for the device
+ * @tiler_required: true if the tiler is required, false otherwise
+ * @shader_cores: A bitmask of shader cores (as given to
+ * kbase_pm_request_cores() )
+ *
+ * This function should be called after kbase_pm_request_cores() when the job
+ * is about to be submitted to the hardware. It will check that the necessary
+ * cores are available and if so update the 'needed' and 'inuse' bitmasks to
+ * reflect that the job is now committed to being run.
+ *
+ * If the necessary cores are not currently available then the function will
+ * return %KBASE_CORES_NOT_READY and have no effect.
+ *
+ * Return: %KBASE_CORES_NOT_READY if the cores are not immediately ready,
+ *
+ * %KBASE_NEW_AFFINITY if the affinity requested is not allowed,
+ *
+ * %KBASE_CORES_READY if the cores requested are already available
+ */
+enum kbase_pm_cores_ready kbase_pm_register_inuse_cores(
+ struct kbase_device *kbdev,
+ bool tiler_required,
+ u64 shader_cores);
+
+/**
+ * kbase_pm_release_cores - Release cores after a job has run
+ *
+ * @kbdev: The kbase device structure for the device
+ * @tiler_required: true if the tiler is required, false otherwise
+ * @shader_cores: A bitmask of shader cores (as given to
+ * kbase_pm_register_inuse_cores() )
+ *
+ * This function should be called when a job has finished running on the
+ * hardware. A call to kbase_pm_register_inuse_cores() must have previously
+ * occurred. The reference counts of the specified cores will be decremented
+ * which may cause the bitmask of 'inuse' cores to be reduced. The power policy
+ * may then turn off any cores which are no longer 'inuse'.
+ */
+void kbase_pm_release_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores);
+
+/**
+ * kbase_pm_request_l2_caches - Request l2 caches
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Request the use of l2 caches for all core groups, power up, wait and prevent
+ * the power manager from powering down the l2 caches.
+ *
+ * This tells the power management that the caches should be powered up, and
+ * they should remain powered, irrespective of the usage of shader cores. This
+ * does not return until the l2 caches are powered up.
+ *
+ * The caller must call kbase_pm_release_l2_caches() when they are finished
+ * to allow normal power management of the l2 caches to resume.
+ *
+ * This should only be used when power management is active.
+ */
+void kbase_pm_request_l2_caches(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_request_l2_caches_l2_is_on - Request l2 caches but don't power on
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Increment the count of l2 users but do not attempt to power on the l2
+ *
+ * It is the callers responsibility to ensure that the l2 is already powered up
+ * and to eventually call kbase_pm_release_l2_caches()
+ */
+void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_request_l2_caches - Release l2 caches
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Release the use of l2 caches for all core groups and allow the power manager
+ * to power them down when necessary.
+ *
+ * This tells the power management that the caches can be powered down if
+ * necessary, with respect to the usage of shader cores.
+ *
+ * The caller must have called kbase_pm_request_l2_caches() prior to a call
+ * to this.
+ *
+ * This should only be used when power management is active.
+ */
+void kbase_pm_release_l2_caches(struct kbase_device *kbdev);
+
+#endif /* _KBASE_PM_POLICY_H_ */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_time.c b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_time.c
new file mode 100644
index 000000000000..d992989123e8
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_time.c
@@ -0,0 +1,103 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_time.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
+ u64 *system_time, struct timespec *ts)
+{
+ u32 hi1, hi2;
+
+ kbase_pm_request_gpu_cycle_counter(kbdev);
+
+ /* Read hi, lo, hi to ensure that overflow from lo to hi is handled
+ * correctly */
+ do {
+ hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI),
+ NULL);
+ *cycle_counter = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
+ hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI),
+ NULL);
+ *cycle_counter |= (((u64) hi1) << 32);
+ } while (hi1 != hi2);
+
+ /* Read hi, lo, hi to ensure that overflow from lo to hi is handled
+ * correctly */
+ do {
+ hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI),
+ NULL);
+ *system_time = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TIMESTAMP_LO), NULL);
+ hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI),
+ NULL);
+ *system_time |= (((u64) hi1) << 32);
+ } while (hi1 != hi2);
+
+ /* Record the CPU's idea of current time */
+ getrawmonotonic(ts);
+
+ kbase_pm_release_gpu_cycle_counter(kbdev);
+}
+
+/**
+ * kbase_wait_write_flush - Wait for GPU write flush
+ * @kctx: Context pointer
+ *
+ * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
+ * its write buffer.
+ *
+ * Only in use for BASE_HW_ISSUE_6367
+ *
+ * Note : If GPU resets occur then the counters are reset to zero, the delay may
+ * not be as expected.
+ */
+#ifndef CONFIG_MALI_NO_MALI
+void kbase_wait_write_flush(struct kbase_context *kctx)
+{
+ u32 base_count = 0;
+
+ /*
+ * The caller must be holding onto the kctx or the call is from
+ * userspace.
+ */
+ kbase_pm_context_active(kctx->kbdev);
+ kbase_pm_request_gpu_cycle_counter(kctx->kbdev);
+
+ while (true) {
+ u32 new_count;
+
+ new_count = kbase_reg_read(kctx->kbdev,
+ GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
+ /* First time around, just store the count. */
+ if (base_count == 0) {
+ base_count = new_count;
+ continue;
+ }
+
+ /* No need to handle wrapping, unsigned maths works for this. */
+ if ((new_count - base_count) > 1000)
+ break;
+ }
+
+ kbase_pm_release_gpu_cycle_counter(kctx->kbdev);
+ kbase_pm_context_idle(kctx->kbdev);
+}
+#endif /* CONFIG_MALI_NO_MALI */
diff --git a/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_time.h b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_time.h
new file mode 100644
index 000000000000..35088abc8fe5
--- /dev/null
+++ b/drivers/gpu/arm_gpu/backend/gpu/mali_kbase_time.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_BACKEND_TIME_H_
+#define _KBASE_BACKEND_TIME_H_
+
+/**
+ * kbase_backend_get_gpu_time() - Get current GPU time
+ * @kbdev: Device pointer
+ * @cycle_counter: Pointer to u64 to store cycle counter in
+ * @system_time: Pointer to u64 to store system time in
+ * @ts: Pointer to struct timespec to store current monotonic
+ * time in
+ */
+void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
+ u64 *system_time, struct timespec *ts);
+
+/**
+ * kbase_wait_write_flush() - Wait for GPU write flush
+ * @kctx: Context pointer
+ *
+ * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
+ * its write buffer.
+ *
+ * If GPU resets occur then the counters are reset to zero, the delay may not be
+ * as expected.
+ *
+ * This function is only in use for BASE_HW_ISSUE_6367
+ */
+#ifdef CONFIG_MALI_NO_MALI
+static inline void kbase_wait_write_flush(struct kbase_context *kctx)
+{
+}
+#else
+void kbase_wait_write_flush(struct kbase_context *kctx);
+#endif
+
+#endif /* _KBASE_BACKEND_TIME_H_ */
diff --git a/drivers/gpu/arm_gpu/docs/Doxyfile b/drivers/gpu/arm_gpu/docs/Doxyfile
new file mode 100644
index 000000000000..35ff2f1ce4a0
--- /dev/null
+++ b/drivers/gpu/arm_gpu/docs/Doxyfile
@@ -0,0 +1,126 @@
+#
+# (C) COPYRIGHT 2011-2013, 2015 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+##############################################################################
+
+# This file contains per-module Doxygen configuration. Please do not add
+# extra settings to this file without consulting all stakeholders, as they
+# may cause override project-wide settings.
+#
+# Additionally, when defining aliases, macros, sections etc, use the module
+# name as a prefix e.g. gles_my_alias.
+
+##############################################################################
+
+@INCLUDE = ../../bldsys/Doxyfile_common
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT += ../../kernel/drivers/gpu/arm/midgard/
+
+##############################################################################
+# Everything below here is optional, and in most cases not required
+##############################################################################
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES +=
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS +=
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS +=
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+EXCLUDE += ../../kernel/drivers/gpu/arm/midgard/platform ../../kernel/drivers/gpu/arm/midgard/platform_dummy ../../kernel/drivers/gpu/arm/midgard/scripts ../../kernel/drivers/gpu/arm/midgard/tests ../../kernel/drivers/gpu/arm/midgard/Makefile ../../kernel/drivers/gpu/arm/midgard/Makefile.kbase ../../kernel/drivers/gpu/arm/midgard/Kbuild ../../kernel/drivers/gpu/arm/midgard/Kconfig ../../kernel/drivers/gpu/arm/midgard/sconscript ../../kernel/drivers/gpu/arm/midgard/docs ../../kernel/drivers/gpu/arm/midgard/pm_test_script.sh ../../kernel/drivers/gpu/arm/midgard/mali_uk.h ../../kernel/drivers/gpu/arm/midgard/Makefile
+
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS +=
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS +=
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH +=
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH +=
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH +=
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED +=
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED +=
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS += ../../kernel/drivers/gpu/arm/midgard/docs
+
diff --git a/drivers/gpu/arm_gpu/docs/policy_operation_diagram.dot b/drivers/gpu/arm_gpu/docs/policy_operation_diagram.dot
new file mode 100644
index 000000000000..7ae05c2f8ded
--- /dev/null
+++ b/drivers/gpu/arm_gpu/docs/policy_operation_diagram.dot
@@ -0,0 +1,112 @@
+/*
+ *
+ * (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+digraph policy_objects_diagram {
+ rankdir=LR;
+ size="12,8";
+ compound=true;
+
+ node [ shape = box ];
+
+ subgraph cluster_policy_queues {
+ low_queue [ shape=record label = "LowP | {<ql>ctx_lo | ... | <qm>ctx_i | ... | <qr>ctx_hi}" ];
+ queues_middle_sep [ label="" shape=plaintext width=0 height=0 ];
+
+ rt_queue [ shape=record label = "RT | {<ql>ctx_lo | ... | <qm>ctx_j | ... | <qr>ctx_hi}" ];
+
+ label = "Policy's Queue(s)";
+ }
+
+ call_enqueue [ shape=plaintext label="enqueue_ctx()" ];
+
+ {
+ rank=same;
+ ordering=out;
+ call_dequeue [ shape=plaintext label="dequeue_head_ctx()\n+ runpool_add_ctx()" ];
+ call_ctxfinish [ shape=plaintext label="runpool_remove_ctx()" ];
+
+ call_ctxdone [ shape=plaintext label="don't requeue;\n/* ctx has no more jobs */" ];
+ }
+
+ subgraph cluster_runpool {
+
+ as0 [ width=2 height = 0.25 label="AS0: Job_1, ..., Job_n" ];
+ as1 [ width=2 height = 0.25 label="AS1: Job_1, ..., Job_m" ];
+ as2 [ width=2 height = 0.25 label="AS2: Job_1, ..., Job_p" ];
+ as3 [ width=2 height = 0.25 label="AS3: Job_1, ..., Job_q" ];
+
+ label = "Policy's Run Pool";
+ }
+
+ {
+ rank=same;
+ call_jdequeue [ shape=plaintext label="dequeue_job()" ];
+ sstop_dotfixup [ shape=plaintext label="" width=0 height=0 ];
+ }
+
+ {
+ rank=same;
+ ordering=out;
+ sstop [ shape=ellipse label="SS-Timer expires" ]
+ jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ];
+
+ irq [ label="IRQ" shape=ellipse ];
+
+ job_finish [ shape=plaintext label="don't requeue;\n/* job done */" ];
+ }
+
+ hstop [ shape=ellipse label="HS-Timer expires" ]
+
+ /*
+ * Edges
+ */
+
+ call_enqueue -> queues_middle_sep [ lhead=cluster_policy_queues ];
+
+ low_queue:qr -> call_dequeue:w;
+ rt_queue:qr -> call_dequeue:w;
+
+ call_dequeue -> as1 [lhead=cluster_runpool];
+
+ as1->call_jdequeue [ltail=cluster_runpool];
+ call_jdequeue->jobslots:0;
+ call_jdequeue->sstop_dotfixup [ arrowhead=none];
+ sstop_dotfixup->sstop [label="Spawn SS-Timer"];
+ sstop->jobslots [label="SoftStop"];
+ sstop->hstop [label="Spawn HS-Timer"];
+ hstop->jobslots:ne [label="HardStop"];
+
+
+ as3->call_ctxfinish:ne [ ltail=cluster_runpool ];
+ call_ctxfinish:sw->rt_queue:qm [ lhead=cluster_policy_queues label="enqueue_ctx()\n/* ctx still has jobs */" ];
+
+ call_ctxfinish->call_ctxdone [constraint=false];
+
+ call_ctxdone->call_enqueue [weight=0.1 labeldistance=20.0 labelangle=0.0 taillabel="Job submitted to the ctx" style=dotted constraint=false];
+
+
+ {
+ jobslots->irq [constraint=false];
+
+ irq->job_finish [constraint=false];
+ }
+
+ irq->as2 [lhead=cluster_runpool label="requeue_job()\n/* timeslice expired */" ];
+
+}
diff --git a/drivers/gpu/arm_gpu/docs/policy_overview.dot b/drivers/gpu/arm_gpu/docs/policy_overview.dot
new file mode 100644
index 000000000000..159b993b7d61
--- /dev/null
+++ b/drivers/gpu/arm_gpu/docs/policy_overview.dot
@@ -0,0 +1,63 @@
+/*
+ *
+ * (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+digraph policy_objects_diagram {
+ rankdir=LR
+ size="6,6"
+ compound=true;
+
+ node [ shape = box ];
+
+ call_enqueue [ shape=plaintext label="enqueue ctx" ];
+
+
+ policy_queue [ label="Policy's Queue" ];
+
+ {
+ rank=same;
+ runpool [ label="Policy's Run Pool" ];
+
+ ctx_finish [ label="ctx finished" ];
+ }
+
+ {
+ rank=same;
+ jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ];
+
+ job_finish [ label="Job finished" ];
+ }
+
+
+
+ /*
+ * Edges
+ */
+
+ call_enqueue -> policy_queue;
+
+ policy_queue->runpool [label="dequeue ctx" weight=0.1];
+ runpool->policy_queue [label="requeue ctx" weight=0.1];
+
+ runpool->ctx_finish [ style=dotted ];
+
+ runpool->jobslots [label="dequeue job" weight=0.1];
+ jobslots->runpool [label="requeue job" weight=0.1];
+
+ jobslots->job_finish [ style=dotted ];
+}
diff --git a/drivers/gpu/arm_gpu/ipa/Kbuild b/drivers/gpu/arm_gpu/ipa/Kbuild
new file mode 100644
index 000000000000..ddf1c1a614e7
--- /dev/null
+++ b/drivers/gpu/arm_gpu/ipa/Kbuild
@@ -0,0 +1,24 @@
+#
+# (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+mali_kbase-y += \
+ ipa/mali_kbase_ipa_simple.o \
+ ipa/mali_kbase_ipa.o
+
+mali_kbase-$(CONFIG_DEBUG_FS) += ipa/mali_kbase_ipa_debugfs.o
+
+mali_kbase-y += \
+ ipa/mali_kbase_ipa_vinstr_g71.o \
+ ipa/mali_kbase_ipa_vinstr_common.o
diff --git a/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa.c b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa.c
new file mode 100644
index 000000000000..ecc06da5516e
--- /dev/null
+++ b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa.c
@@ -0,0 +1,585 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+#include <linux/thermal.h>
+#include <linux/devfreq_cooling.h>
+#include <linux/of.h>
+#include "mali_kbase.h"
+#include "mali_kbase_ipa.h"
+#include "mali_kbase_ipa_debugfs.h"
+#include "mali_kbase_ipa_simple.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+#include <linux/pm_opp.h>
+#else
+#include <linux/opp.h>
+#define dev_pm_opp_find_freq_exact opp_find_freq_exact
+#define dev_pm_opp_get_voltage opp_get_voltage
+#define dev_pm_opp opp
+#endif
+
+#define KBASE_IPA_FALLBACK_MODEL_NAME "mali-simple-power-model"
+#define KBASE_IPA_G71_MODEL_NAME "mali-g71-power-model"
+
+static struct kbase_ipa_model_ops *kbase_ipa_all_model_ops[] = {
+ &kbase_simple_ipa_model_ops,
+ &kbase_g71_ipa_model_ops
+};
+
+int kbase_ipa_model_recalculate(struct kbase_ipa_model *model)
+{
+ int err = 0;
+
+ lockdep_assert_held(&model->kbdev->ipa.lock);
+
+ if (model->ops->recalculate) {
+ err = model->ops->recalculate(model);
+ if (err) {
+ dev_err(model->kbdev->dev,
+ "recalculation of power model %s returned error %d\n",
+ model->ops->name, err);
+ }
+ }
+
+ return err;
+}
+
+static struct kbase_ipa_model_ops *kbase_ipa_model_ops_find(struct kbase_device *kbdev,
+ const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kbase_ipa_all_model_ops); ++i) {
+ struct kbase_ipa_model_ops *ops = kbase_ipa_all_model_ops[i];
+
+ if (!strcmp(ops->name, name))
+ return ops;
+ }
+
+ dev_err(kbdev->dev, "power model \'%s\' not found\n", name);
+
+ return NULL;
+}
+
+void kbase_ipa_model_use_fallback_locked(struct kbase_device *kbdev)
+{
+ atomic_set(&kbdev->ipa_use_configured_model, false);
+}
+
+void kbase_ipa_model_use_configured_locked(struct kbase_device *kbdev)
+{
+ atomic_set(&kbdev->ipa_use_configured_model, true);
+}
+
+const char *kbase_ipa_model_name_from_id(u32 gpu_id)
+{
+ const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
+ GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+ if (GPU_ID_IS_NEW_FORMAT(prod_id)) {
+ switch (GPU_ID2_MODEL_MATCH_VALUE(prod_id)) {
+ case GPU_ID2_PRODUCT_TMIX:
+ return KBASE_IPA_G71_MODEL_NAME;
+ default:
+ return KBASE_IPA_FALLBACK_MODEL_NAME;
+ }
+ }
+
+ return KBASE_IPA_FALLBACK_MODEL_NAME;
+}
+
+static struct device_node *get_model_dt_node(struct kbase_ipa_model *model)
+{
+ struct device_node *model_dt_node;
+ char compat_string[64];
+
+ snprintf(compat_string, sizeof(compat_string), "arm,%s",
+ model->ops->name);
+
+ of_node_get(model->kbdev->dev->of_node);
+ model_dt_node = of_find_compatible_node(model->kbdev->dev->of_node,
+ NULL, compat_string);
+ if (!model_dt_node && !model->missing_dt_node_warning) {
+ dev_warn(model->kbdev->dev,
+ "Couldn't find power_model DT node matching \'%s\'\n",
+ compat_string);
+ model->missing_dt_node_warning = true;
+ }
+
+ return model_dt_node;
+}
+
+int kbase_ipa_model_add_param_s32(struct kbase_ipa_model *model,
+ const char *name, s32 *addr,
+ size_t num_elems, bool dt_required)
+{
+ int err, i;
+ struct device_node *model_dt_node = get_model_dt_node(model);
+ char *origin;
+
+ err = of_property_read_u32_array(model_dt_node, name, addr, num_elems);
+
+ if (err && dt_required) {
+ memset(addr, 0, sizeof(s32) * num_elems);
+ dev_warn(model->kbdev->dev,
+ "Error %d, no DT entry: %s.%s = %zu*[0]\n",
+ err, model->ops->name, name, num_elems);
+ origin = "zero";
+ } else if (err && !dt_required) {
+ origin = "default";
+ } else /* !err */ {
+ origin = "DT";
+ }
+
+ /* Create a unique debugfs entry for each element */
+ for (i = 0; i < num_elems; ++i) {
+ char elem_name[32];
+
+ if (num_elems == 1)
+ snprintf(elem_name, sizeof(elem_name), "%s", name);
+ else
+ snprintf(elem_name, sizeof(elem_name), "%s.%d",
+ name, i);
+
+ dev_dbg(model->kbdev->dev, "%s.%s = %d (%s)\n",
+ model->ops->name, elem_name, addr[i], origin);
+
+ err = kbase_ipa_model_param_add(model, elem_name,
+ &addr[i], sizeof(s32),
+ PARAM_TYPE_S32);
+ if (err)
+ goto exit;
+ }
+exit:
+ return err;
+}
+
+int kbase_ipa_model_add_param_string(struct kbase_ipa_model *model,
+ const char *name, char *addr,
+ size_t size, bool dt_required)
+{
+ int err;
+ struct device_node *model_dt_node = get_model_dt_node(model);
+ const char *string_prop_value;
+ char *origin;
+
+ err = of_property_read_string(model_dt_node, name,
+ &string_prop_value);
+ if (err && dt_required) {
+ strncpy(addr, "", size - 1);
+ dev_warn(model->kbdev->dev,
+ "Error %d, no DT entry: %s.%s = \'%s\'\n",
+ err, model->ops->name, name, addr);
+ err = 0;
+ origin = "zero";
+ } else if (err && !dt_required) {
+ origin = "default";
+ } else /* !err */ {
+ strncpy(addr, string_prop_value, size - 1);
+ origin = "DT";
+ }
+
+ addr[size - 1] = '\0';
+
+ dev_dbg(model->kbdev->dev, "%s.%s = \'%s\' (%s)\n",
+ model->ops->name, name, string_prop_value, origin);
+
+ err = kbase_ipa_model_param_add(model, name, addr, size,
+ PARAM_TYPE_STRING);
+
+ return err;
+}
+
+void kbase_ipa_term_model(struct kbase_ipa_model *model)
+{
+ if (!model)
+ return;
+
+ lockdep_assert_held(&model->kbdev->ipa.lock);
+
+ if (model->ops->term)
+ model->ops->term(model);
+
+ kbase_ipa_model_param_free_all(model);
+
+ kfree(model);
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_term_model);
+
+struct kbase_ipa_model *kbase_ipa_init_model(struct kbase_device *kbdev,
+ struct kbase_ipa_model_ops *ops)
+{
+ struct kbase_ipa_model *model;
+ int err;
+
+ lockdep_assert_held(&kbdev->ipa.lock);
+
+ if (!ops || !ops->name)
+ return NULL;
+
+ model = kzalloc(sizeof(struct kbase_ipa_model), GFP_KERNEL);
+ if (!model)
+ return NULL;
+
+ model->kbdev = kbdev;
+ model->ops = ops;
+ INIT_LIST_HEAD(&model->params);
+
+ err = model->ops->init(model);
+ if (err) {
+ dev_err(kbdev->dev,
+ "init of power model \'%s\' returned error %d\n",
+ ops->name, err);
+ goto term_model;
+ }
+
+ err = kbase_ipa_model_recalculate(model);
+ if (err)
+ goto term_model;
+
+ return model;
+
+term_model:
+ kbase_ipa_term_model(model);
+ return NULL;
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_init_model);
+
+static void kbase_ipa_term_locked(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->ipa.lock);
+
+ /* Clean up the models */
+ if (kbdev->ipa.configured_model != kbdev->ipa.fallback_model)
+ kbase_ipa_term_model(kbdev->ipa.configured_model);
+ kbase_ipa_term_model(kbdev->ipa.fallback_model);
+
+ kbdev->ipa.configured_model = NULL;
+ kbdev->ipa.fallback_model = NULL;
+}
+
+int kbase_ipa_init(struct kbase_device *kbdev)
+{
+
+ const char *model_name;
+ struct kbase_ipa_model_ops *ops;
+ struct kbase_ipa_model *default_model = NULL;
+ int err;
+
+ mutex_init(&kbdev->ipa.lock);
+ /*
+ * Lock during init to avoid warnings from lockdep_assert_held (there
+ * shouldn't be any concurrent access yet).
+ */
+ mutex_lock(&kbdev->ipa.lock);
+
+ /* The simple IPA model must *always* be present.*/
+ ops = kbase_ipa_model_ops_find(kbdev, KBASE_IPA_FALLBACK_MODEL_NAME);
+
+ if (!ops->do_utilization_scaling_in_framework) {
+ dev_err(kbdev->dev,
+ "Fallback IPA model %s should not account for utilization\n",
+ ops->name);
+ err = -EINVAL;
+ goto end;
+ }
+
+ default_model = kbase_ipa_init_model(kbdev, ops);
+ if (!default_model) {
+ err = -EINVAL;
+ goto end;
+ }
+
+ kbdev->ipa.fallback_model = default_model;
+ err = of_property_read_string(kbdev->dev->of_node,
+ "ipa-model",
+ &model_name);
+ if (err) {
+ /* Attempt to load a match from GPU-ID */
+ u32 gpu_id;
+
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ model_name = kbase_ipa_model_name_from_id(gpu_id);
+ dev_dbg(kbdev->dev,
+ "Inferring model from GPU ID 0x%x: \'%s\'\n",
+ gpu_id, model_name);
+ err = 0;
+ } else {
+ dev_dbg(kbdev->dev,
+ "Using ipa-model parameter from DT: \'%s\'\n",
+ model_name);
+ }
+
+ if (strcmp(KBASE_IPA_FALLBACK_MODEL_NAME, model_name) != 0) {
+ ops = kbase_ipa_model_ops_find(kbdev, model_name);
+ kbdev->ipa.configured_model = kbase_ipa_init_model(kbdev, ops);
+ if (!kbdev->ipa.configured_model) {
+ err = -EINVAL;
+ goto end;
+ }
+ } else {
+ kbdev->ipa.configured_model = default_model;
+ }
+
+ kbase_ipa_model_use_configured_locked(kbdev);
+
+end:
+ if (err)
+ kbase_ipa_term_locked(kbdev);
+ else
+ dev_info(kbdev->dev,
+ "Using configured power model %s, and fallback %s\n",
+ kbdev->ipa.configured_model->ops->name,
+ kbdev->ipa.fallback_model->ops->name);
+
+ mutex_unlock(&kbdev->ipa.lock);
+ return err;
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_init);
+
+void kbase_ipa_term(struct kbase_device *kbdev)
+{
+ mutex_lock(&kbdev->ipa.lock);
+ kbase_ipa_term_locked(kbdev);
+ mutex_unlock(&kbdev->ipa.lock);
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_term);
+
+/**
+ * kbase_scale_dynamic_power() - Scale a dynamic power coefficient to an OPP
+ * @c: Dynamic model coefficient, in pW/(Hz V^2). Should be in range
+ * 0 < c < 2^26 to prevent overflow.
+ * @freq: Frequency, in Hz. Range: 2^23 < freq < 2^30 (~8MHz to ~1GHz)
+ * @voltage: Voltage, in mV. Range: 2^9 < voltage < 2^13 (~0.5V to ~8V)
+ *
+ * Keep a record of the approximate range of each value at every stage of the
+ * calculation, to ensure we don't overflow. This makes heavy use of the
+ * approximations 1000 = 2^10 and 1000000 = 2^20, but does the actual
+ * calculations in decimal for increased accuracy.
+ *
+ * Return: Power consumption, in mW. Range: 0 < p < 2^13 (0W to ~8W)
+ */
+static u32 kbase_scale_dynamic_power(const u32 c, const u32 freq,
+ const u32 voltage)
+{
+ /* Range: 2^8 < v2 < 2^16 m(V^2) */
+ const u32 v2 = (voltage * voltage) / 1000;
+
+ /* Range: 2^3 < f_MHz < 2^10 MHz */
+ const u32 f_MHz = freq / 1000000;
+
+ /* Range: 2^11 < v2f_big < 2^26 kHz V^2 */
+ const u32 v2f_big = v2 * f_MHz;
+
+ /* Range: 2^1 < v2f < 2^16 MHz V^2 */
+ const u32 v2f = v2f_big / 1000;
+
+ /* Range (working backwards from next line): 0 < v2fc < 2^23 uW.
+ * Must be < 2^42 to avoid overflowing the return value. */
+ const u64 v2fc = (u64) c * (u64) v2f;
+
+ /* Range: 0 < v2fc / 1000 < 2^13 mW */
+ return v2fc / 1000;
+}
+
+/**
+ * kbase_scale_static_power() - Scale a static power coefficient to an OPP
+ * @c: Static model coefficient, in uW/V^3. Should be in range
+ * 0 < c < 2^32 to prevent overflow.
+ * @voltage: Voltage, in mV. Range: 2^9 < voltage < 2^13 (~0.5V to ~8V)
+ *
+ * Return: Power consumption, in mW. Range: 0 < p < 2^13 (0W to ~8W)
+ */
+u32 kbase_scale_static_power(const u32 c, const u32 voltage)
+{
+ /* Range: 2^8 < v2 < 2^16 m(V^2) */
+ const u32 v2 = (voltage * voltage) / 1000;
+
+ /* Range: 2^17 < v3_big < 2^29 m(V^2) mV */
+ const u32 v3_big = v2 * voltage;
+
+ /* Range: 2^7 < v3 < 2^19 m(V^3) */
+ const u32 v3 = v3_big / 1000;
+
+ /*
+ * Range (working backwards from next line): 0 < v3c_big < 2^33 nW.
+ * The result should be < 2^52 to avoid overflowing the return value.
+ */
+ const u64 v3c_big = (u64) c * (u64) v3;
+
+ /* Range: 0 < v3c_big / 1000000 < 2^13 mW */
+ return v3c_big / 1000000;
+}
+
+static struct kbase_ipa_model *get_current_model(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->ipa.lock);
+
+ if (atomic_read(&kbdev->ipa_use_configured_model))
+ return kbdev->ipa.configured_model;
+ else
+ return kbdev->ipa.fallback_model;
+}
+
+static u32 get_static_power_locked(struct kbase_device *kbdev,
+ struct kbase_ipa_model *model,
+ unsigned long voltage)
+{
+ u32 power = 0;
+ int err;
+ u32 power_coeff;
+
+ lockdep_assert_held(&model->kbdev->ipa.lock);
+
+ if (!model->ops->get_static_coeff)
+ model = kbdev->ipa.fallback_model;
+
+ if (model->ops->get_static_coeff) {
+ err = model->ops->get_static_coeff(model, &power_coeff);
+ if (!err)
+ power = kbase_scale_static_power(power_coeff,
+ (u32) voltage);
+ }
+
+ return power;
+}
+
+#ifdef CONFIG_MALI_PWRSOFT_765
+static unsigned long kbase_get_static_power(struct devfreq *df,
+ unsigned long voltage)
+#else
+static unsigned long kbase_get_static_power(unsigned long voltage)
+#endif
+{
+ struct kbase_ipa_model *model;
+ u32 power = 0;
+#ifdef CONFIG_MALI_PWRSOFT_765
+ struct kbase_device *kbdev = dev_get_drvdata(&df->dev);
+#else
+ struct kbase_device *kbdev = kbase_find_device(-1);
+#endif
+
+ mutex_lock(&kbdev->ipa.lock);
+
+ model = get_current_model(kbdev);
+ power = get_static_power_locked(kbdev, model, voltage);
+
+ mutex_unlock(&kbdev->ipa.lock);
+
+#ifndef CONFIG_MALI_PWRSOFT_765
+ kbase_release_device(kbdev);
+#endif
+
+ return power;
+}
+
+#ifdef CONFIG_MALI_PWRSOFT_765
+static unsigned long kbase_get_dynamic_power(struct devfreq *df,
+ unsigned long freq,
+ unsigned long voltage)
+#else
+static unsigned long kbase_get_dynamic_power(unsigned long freq,
+ unsigned long voltage)
+#endif
+{
+ struct kbase_ipa_model *model;
+ u32 power_coeff = 0, power = 0;
+ int err = 0;
+#ifdef CONFIG_MALI_PWRSOFT_765
+ struct kbase_device *kbdev = dev_get_drvdata(&df->dev);
+#else
+ struct kbase_device *kbdev = kbase_find_device(-1);
+#endif
+
+ mutex_lock(&kbdev->ipa.lock);
+
+ model = kbdev->ipa.fallback_model;
+
+ err = model->ops->get_dynamic_coeff(model, &power_coeff, freq);
+
+ if (!err)
+ power = kbase_scale_dynamic_power(power_coeff, freq, voltage);
+ else
+ dev_err_ratelimited(kbdev->dev,
+ "Model %s returned error code %d\n",
+ model->ops->name, err);
+
+ mutex_unlock(&kbdev->ipa.lock);
+
+#ifndef CONFIG_MALI_PWRSOFT_765
+ kbase_release_device(kbdev);
+#endif
+
+ return power;
+}
+
+int kbase_get_real_power(struct devfreq *df, u32 *power,
+ unsigned long freq,
+ unsigned long voltage)
+{
+ struct kbase_ipa_model *model;
+ u32 power_coeff = 0;
+ int err = 0;
+ struct kbase_device *kbdev = dev_get_drvdata(&df->dev);
+
+ mutex_lock(&kbdev->ipa.lock);
+
+ model = get_current_model(kbdev);
+
+ err = model->ops->get_dynamic_coeff(model, &power_coeff, freq);
+
+ /* If we switch to protected model between get_current_model() and
+ * get_dynamic_coeff(), counter reading could fail. If that happens
+ * (unlikely, but possible), revert to the fallback model. */
+ if (err && model != kbdev->ipa.fallback_model) {
+ model = kbdev->ipa.fallback_model;
+ err = model->ops->get_dynamic_coeff(model, &power_coeff, freq);
+ }
+
+ if (err)
+ goto exit_unlock;
+
+ *power = kbase_scale_dynamic_power(power_coeff, freq, voltage);
+
+ if (model->ops->do_utilization_scaling_in_framework) {
+ struct devfreq_dev_status *status = &df->last_status;
+ unsigned long total_time = max(status->total_time, 1ul);
+ u64 busy_time = min(status->busy_time, total_time);
+
+ *power = ((u64) *power * (u64) busy_time) / total_time;
+ }
+
+ *power += get_static_power_locked(kbdev, model, voltage);
+
+exit_unlock:
+ mutex_unlock(&kbdev->ipa.lock);
+
+ return err;
+}
+KBASE_EXPORT_TEST_API(kbase_get_real_power);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+struct devfreq_cooling_ops kbase_ipa_power_model_ops = {
+#else
+struct devfreq_cooling_power kbase_ipa_power_model_ops = {
+#endif
+ .get_static_power = &kbase_get_static_power,
+ .get_dynamic_power = &kbase_get_dynamic_power,
+#ifdef CONFIG_MALI_PWRSOFT_765
+ .get_real_power = &kbase_get_real_power,
+#endif
+};
+KBASE_EXPORT_TEST_API(kbase_ipa_power_model_ops);
diff --git a/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa.h b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa.h
new file mode 100644
index 000000000000..469f33cbdcc6
--- /dev/null
+++ b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa.h
@@ -0,0 +1,165 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_IPA_H_
+#define _KBASE_IPA_H_
+
+#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
+
+struct devfreq;
+
+struct kbase_ipa_model {
+ struct list_head link;
+ struct kbase_device *kbdev;
+ void *model_data;
+ struct kbase_ipa_model_ops *ops;
+ struct list_head params;
+ bool missing_dt_node_warning;
+};
+
+/**
+ * kbase_ipa_model_add_param_s32 - Add an integer model parameter
+ * @model: pointer to IPA model
+ * @name: name of corresponding debugfs entry
+ * @addr: address where the value is stored
+ * @num_elems: number of elements (1 if not an array)
+ * @dt_required: if false, a corresponding devicetree entry is not required,
+ * and the current value will be used. If true, a warning is
+ * output and the data is zeroed
+ *
+ * Return: 0 on success, or an error code
+ */
+int kbase_ipa_model_add_param_s32(struct kbase_ipa_model *model,
+ const char *name, s32 *addr,
+ size_t num_elems, bool dt_required);
+
+/**
+ * kbase_ipa_model_add_param_string - Add a string model parameter
+ * @model: pointer to IPA model
+ * @name: name of corresponding debugfs entry
+ * @addr: address where the value is stored
+ * @size: size, in bytes, of the value storage (so the maximum string
+ * length is size - 1)
+ * @dt_required: if false, a corresponding devicetree entry is not required,
+ * and the current value will be used. If true, a warning is
+ * output and the data is zeroed
+ *
+ * Return: 0 on success, or an error code
+ */
+int kbase_ipa_model_add_param_string(struct kbase_ipa_model *model,
+ const char *name, char *addr,
+ size_t size, bool dt_required);
+
+struct kbase_ipa_model_ops {
+ char *name;
+ /* The init, recalculate and term ops on the default model are always
+ * called. However, all the other models are only invoked if the model
+ * is selected in the device tree. Otherwise they are never
+ * initialized. Additional resources can be acquired by models in
+ * init(), however they must be terminated in the term().
+ */
+ int (*init)(struct kbase_ipa_model *model);
+ /* Called immediately after init(), or when a parameter is changed, so
+ * that any coefficients derived from model parameters can be
+ * recalculated. */
+ int (*recalculate)(struct kbase_ipa_model *model);
+ void (*term)(struct kbase_ipa_model *model);
+ /*
+ * get_dynamic_coeff() - calculate dynamic power coefficient
+ * @model: pointer to model
+ * @coeffp: pointer to return value location
+ * @current_freq: frequency the GPU has been running at for the
+ * previous sampling period.
+ *
+ * Calculate a dynamic power coefficient, with units pW/(Hz V^2), which
+ * is then scaled by the IPA framework according to the current OPP's
+ * frequency and voltage.
+ *
+ * Return: 0 on success, or an error code.
+ */
+ int (*get_dynamic_coeff)(struct kbase_ipa_model *model, u32 *coeffp,
+ u32 current_freq);
+ /*
+ * get_static_coeff() - calculate static power coefficient
+ * @model: pointer to model
+ * @coeffp: pointer to return value location
+ *
+ * Calculate a static power coefficient, with units uW/(V^3), which is
+ * scaled by the IPA framework according to the current OPP's voltage.
+ *
+ * Return: 0 on success, or an error code.
+ */
+ int (*get_static_coeff)(struct kbase_ipa_model *model, u32 *coeffp);
+ /* If false, the model's get_dynamic_coeff() method accounts for how
+ * long the GPU was active over the sample period. If true, the
+ * framework will scale the calculated power according to the
+ * utilization stats recorded by devfreq in get_real_power(). */
+ bool do_utilization_scaling_in_framework;
+};
+
+/* Models can be registered only in the platform's platform_init_func call */
+int kbase_ipa_model_ops_register(struct kbase_device *kbdev,
+ struct kbase_ipa_model_ops *new_model_ops);
+struct kbase_ipa_model *kbase_ipa_get_model(struct kbase_device *kbdev,
+ const char *name);
+
+int kbase_ipa_init(struct kbase_device *kbdev);
+void kbase_ipa_term(struct kbase_device *kbdev);
+void kbase_ipa_model_use_fallback_locked(struct kbase_device *kbdev);
+void kbase_ipa_model_use_configured_locked(struct kbase_device *kbdev);
+int kbase_ipa_model_recalculate(struct kbase_ipa_model *model);
+struct kbase_ipa_model *kbase_ipa_init_model(struct kbase_device *kbdev,
+ struct kbase_ipa_model_ops *ops);
+void kbase_ipa_term_model(struct kbase_ipa_model *model);
+
+extern struct kbase_ipa_model_ops kbase_g71_ipa_model_ops;
+
+#if MALI_UNIT_TEST
+/**
+ * kbase_get_real_power() - get the real power consumption of the GPU
+ * @df: dynamic voltage and frequency scaling information for the GPU.
+ * @power: where to store the power consumption, in mW.
+ * @freq: a frequency, in HZ.
+ * @voltage: a voltage, in mV.
+ *
+ * This function is only exposed for use by unit tests. The returned value
+ * incorporates both static and dynamic power consumption.
+ *
+ * Return: 0 on success, or an error code.
+ */
+int kbase_get_real_power(struct devfreq *df, u32 *power,
+ unsigned long freq,
+ unsigned long voltage);
+#endif /* MALI_UNIT_TEST */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+extern struct devfreq_cooling_ops kbase_ipa_power_model_ops;
+#else
+extern struct devfreq_cooling_power kbase_ipa_power_model_ops;
+#endif
+
+#else /* !(defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */
+
+static inline void kbase_ipa_model_use_fallback_locked(struct kbase_device *kbdev)
+{ }
+
+static inline void kbase_ipa_model_use_configured_locked(struct kbase_device *kbdev)
+{ }
+
+#endif /* (defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */
+
+#endif
diff --git a/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_debugfs.c b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_debugfs.c
new file mode 100644
index 000000000000..eafc14009ddc
--- /dev/null
+++ b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_debugfs.c
@@ -0,0 +1,219 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include "mali_kbase.h"
+#include "mali_kbase_ipa.h"
+#include "mali_kbase_ipa_debugfs.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+#define DEFINE_DEBUGFS_ATTRIBUTE DEFINE_SIMPLE_ATTRIBUTE
+#endif
+
+struct kbase_ipa_model_param {
+ char *name;
+ union {
+ void *voidp;
+ s32 *s32p;
+ char *str;
+ } addr;
+ size_t size;
+ enum kbase_ipa_model_param_type type;
+ struct kbase_ipa_model *model;
+ struct list_head link;
+};
+
+static int param_int_get(void *data, u64 *val)
+{
+ struct kbase_ipa_model_param *param = data;
+
+ mutex_lock(&param->model->kbdev->ipa.lock);
+ *(s64 *) val = *param->addr.s32p;
+ mutex_unlock(&param->model->kbdev->ipa.lock);
+
+ return 0;
+}
+
+static int param_int_set(void *data, u64 val)
+{
+ struct kbase_ipa_model_param *param = data;
+ struct kbase_ipa_model *model = param->model;
+ s64 sval = (s64) val;
+ int err = 0;
+
+ if (sval < S32_MIN || sval > S32_MAX)
+ return -ERANGE;
+
+ mutex_lock(&param->model->kbdev->ipa.lock);
+ *param->addr.s32p = val;
+ err = kbase_ipa_model_recalculate(model);
+ mutex_unlock(&param->model->kbdev->ipa.lock);
+
+ return err;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_s32, param_int_get, param_int_set, "%lld\n");
+
+static ssize_t param_string_get(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct kbase_ipa_model_param *param = file->private_data;
+ ssize_t ret;
+ size_t len;
+
+ mutex_lock(&param->model->kbdev->ipa.lock);
+ len = strnlen(param->addr.str, param->size - 1) + 1;
+ ret = simple_read_from_buffer(user_buf, count, ppos,
+ param->addr.str, len);
+ mutex_unlock(&param->model->kbdev->ipa.lock);
+
+ return ret;
+}
+
+static ssize_t param_string_set(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct kbase_ipa_model_param *param = file->private_data;
+ struct kbase_ipa_model *model = param->model;
+ ssize_t ret = count;
+ size_t buf_size;
+ int err;
+
+ mutex_lock(&model->kbdev->ipa.lock);
+
+ if (count > param->size) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ buf_size = min(param->size - 1, count);
+ if (copy_from_user(param->addr.str, user_buf, buf_size)) {
+ ret = -EFAULT;
+ goto end;
+ }
+
+ param->addr.str[buf_size] = '\0';
+
+ err = kbase_ipa_model_recalculate(model);
+ if (err < 0)
+ ret = err;
+
+end:
+ mutex_unlock(&model->kbdev->ipa.lock);
+
+ return ret;
+}
+
+static const struct file_operations fops_string = {
+ .read = param_string_get,
+ .write = param_string_set,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+int kbase_ipa_model_param_add(struct kbase_ipa_model *model, const char *name,
+ void *addr, size_t size,
+ enum kbase_ipa_model_param_type type)
+{
+ struct kbase_ipa_model_param *param;
+
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+
+ if (!param)
+ return -ENOMEM;
+
+ /* 'name' is stack-allocated for array elements, so copy it into
+ * heap-allocated storage */
+ param->name = kstrdup(name, GFP_KERNEL);
+ param->addr.voidp = addr;
+ param->size = size;
+ param->type = type;
+ param->model = model;
+
+ list_add(&param->link, &model->params);
+
+ return 0;
+}
+
+void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model)
+{
+ struct kbase_ipa_model_param *param_p, *param_n;
+
+ list_for_each_entry_safe(param_p, param_n, &model->params, link) {
+ list_del(&param_p->link);
+ kfree(param_p->name);
+ kfree(param_p);
+ }
+}
+
+static void kbase_ipa_model_debugfs_init(struct kbase_ipa_model *model)
+{
+ struct list_head *it;
+ struct dentry *dir;
+
+ lockdep_assert_held(&model->kbdev->ipa.lock);
+
+ dir = debugfs_create_dir(model->ops->name,
+ model->kbdev->mali_debugfs_directory);
+
+ if (!dir) {
+ dev_err(model->kbdev->dev,
+ "Couldn't create mali debugfs %s directory",
+ model->ops->name);
+ return;
+ }
+
+ list_for_each(it, &model->params) {
+ struct kbase_ipa_model_param *param =
+ list_entry(it,
+ struct kbase_ipa_model_param,
+ link);
+ const struct file_operations *fops = NULL;
+
+ switch (param->type) {
+ case PARAM_TYPE_S32:
+ fops = &fops_s32;
+ break;
+ case PARAM_TYPE_STRING:
+ fops = &fops_string;
+ break;
+ }
+
+ if (unlikely(!fops)) {
+ dev_err(model->kbdev->dev,
+ "Type not set for %s parameter %s\n",
+ model->ops->name, param->name);
+ } else {
+ debugfs_create_file(param->name, S_IRUGO | S_IWUSR,
+ dir, param, fops);
+ }
+ }
+}
+
+void kbase_ipa_debugfs_init(struct kbase_device *kbdev)
+{
+ mutex_lock(&kbdev->ipa.lock);
+
+ if (kbdev->ipa.configured_model != kbdev->ipa.fallback_model)
+ kbase_ipa_model_debugfs_init(kbdev->ipa.configured_model);
+ kbase_ipa_model_debugfs_init(kbdev->ipa.fallback_model);
+
+ mutex_unlock(&kbdev->ipa.lock);
+}
diff --git a/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_debugfs.h b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_debugfs.h
new file mode 100644
index 000000000000..ec06e2096f94
--- /dev/null
+++ b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_debugfs.h
@@ -0,0 +1,49 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_IPA_DEBUGFS_H_
+#define _KBASE_IPA_DEBUGFS_H_
+
+enum kbase_ipa_model_param_type {
+ PARAM_TYPE_S32 = 1,
+ PARAM_TYPE_STRING,
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+void kbase_ipa_debugfs_init(struct kbase_device *kbdev);
+int kbase_ipa_model_param_add(struct kbase_ipa_model *model, const char *name,
+ void *addr, size_t size,
+ enum kbase_ipa_model_param_type type);
+void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model);
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline int kbase_ipa_model_param_add(struct kbase_ipa_model *model,
+ const char *name, void *addr,
+ size_t size,
+ enum kbase_ipa_model_param_type type)
+{
+ return 0;
+}
+
+static inline void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model)
+{ }
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* _KBASE_IPA_DEBUGFS_H_ */
diff --git a/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_simple.c b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_simple.c
new file mode 100644
index 000000000000..dd65d33344b4
--- /dev/null
+++ b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_simple.c
@@ -0,0 +1,325 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/thermal.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <linux/devfreq_cooling.h>
+#endif
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#include "mali_kbase.h"
+#include "mali_kbase_defs.h"
+#include "mali_kbase_ipa_simple.h"
+
+#if MALI_UNIT_TEST
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+static unsigned long dummy_temp;
+
+static int kbase_simple_power_model_get_dummy_temp(
+ struct thermal_zone_device *tz,
+ unsigned long *temp)
+{
+ *temp = ACCESS_ONCE(dummy_temp);
+ return 0;
+}
+
+#else
+static int dummy_temp;
+
+static int kbase_simple_power_model_get_dummy_temp(
+ struct thermal_zone_device *tz,
+ int *dummy_temp)
+{
+ *temp = ACCESS_ONCE(dummy_temp);
+ return 0;
+}
+#endif
+
+/* Intercept calls to the kernel function using a macro */
+#ifdef thermal_zone_get_temp
+#undef thermal_zone_get_temp
+#endif
+#define thermal_zone_get_temp(tz, temp) \
+ kbase_simple_power_model_get_dummy_temp(tz, temp)
+
+void kbase_simple_power_model_set_dummy_temp(int temp)
+{
+ ACCESS_ONCE(dummy_temp) = temp;
+}
+KBASE_EXPORT_TEST_API(kbase_simple_power_model_set_dummy_temp);
+
+#endif /* MALI_UNIT_TEST */
+
+/*
+ * This model is primarily designed for the Juno platform. It may not be
+ * suitable for other platforms. The additional resources in this model
+ * should preferably be minimal, as this model is rarely used when a dynamic
+ * model is available.
+ */
+
+/**
+ * struct kbase_ipa_model_simple_data - IPA context per device
+ * @dynamic_coefficient: dynamic coefficient of the model
+ * @static_coefficient: static coefficient of the model
+ * @ts: Thermal scaling coefficients of the model
+ * @tz_name: Thermal zone name
+ * @gpu_tz: thermal zone device
+ * @poll_temperature_thread: Handle for temperature polling thread
+ * @current_temperature: Most recent value of polled temperature
+ * @temperature_poll_interval_ms: How often temperature should be checked, in ms
+ */
+
+struct kbase_ipa_model_simple_data {
+ u32 dynamic_coefficient;
+ u32 static_coefficient;
+ s32 ts[4];
+ char tz_name[16];
+ struct thermal_zone_device *gpu_tz;
+ struct task_struct *poll_temperature_thread;
+ int current_temperature;
+ int temperature_poll_interval_ms;
+};
+#define FALLBACK_STATIC_TEMPERATURE 55000
+
+/**
+ * calculate_temp_scaling_factor() - Calculate temperature scaling coefficient
+ * @ts: Signed coefficients, in order t^0 to t^3, with units Deg^-N
+ * @t: Temperature, in mDeg C. Range: -2^17 < t < 2^17
+ *
+ * Scale the temperature according to a cubic polynomial whose coefficients are
+ * provided in the device tree. The result is used to scale the static power
+ * coefficient, where 1000000 means no change.
+ *
+ * Return: Temperature scaling factor. Range 0 <= ret <= 10,000,000.
+ */
+static u32 calculate_temp_scaling_factor(s32 ts[4], s64 t)
+{
+ /* Range: -2^24 < t2 < 2^24 m(Deg^2) */
+ const s64 t2 = (t * t) / 1000;
+
+ /* Range: -2^31 < t3 < 2^31 m(Deg^3) */
+ const s64 t3 = (t * t2) / 1000;
+
+ /*
+ * Sum the parts. t^[1-3] are in m(Deg^N), but the coefficients are in
+ * Deg^-N, so we need to multiply the last coefficient by 1000.
+ * Range: -2^63 < res_big < 2^63
+ */
+ const s64 res_big = ts[3] * t3 /* +/- 2^62 */
+ + ts[2] * t2 /* +/- 2^55 */
+ + ts[1] * t /* +/- 2^48 */
+ + ts[0] * 1000; /* +/- 2^41 */
+
+ /* Range: -2^60 < res_unclamped < 2^60 */
+ s64 res_unclamped = res_big / 1000;
+
+ /* Clamp to range of 0x to 10x the static power */
+ return clamp(res_unclamped, (s64) 0, (s64) 10000000);
+}
+
+/* We can't call thermal_zone_get_temp() directly in model_static_coeff(),
+ * because we don't know if tz->lock is held in the same thread. So poll it in
+ * a separate thread to get around this. */
+static int poll_temperature(void *data)
+{
+ struct kbase_ipa_model_simple_data *model_data =
+ (struct kbase_ipa_model_simple_data *) data;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+ unsigned long temp;
+#else
+ int temp;
+#endif
+
+ while (!kthread_should_stop()) {
+ struct thermal_zone_device *tz = ACCESS_ONCE(model_data->gpu_tz);
+
+ if (tz) {
+ int ret;
+
+ ret = thermal_zone_get_temp(tz, &temp);
+ if (ret) {
+ pr_warn_ratelimited("Error reading temperature for gpu thermal zone: %d\n",
+ ret);
+ temp = FALLBACK_STATIC_TEMPERATURE;
+ }
+ } else {
+ temp = FALLBACK_STATIC_TEMPERATURE;
+ }
+
+ ACCESS_ONCE(model_data->current_temperature) = temp;
+
+ msleep_interruptible(ACCESS_ONCE(model_data->temperature_poll_interval_ms));
+ }
+
+ return 0;
+}
+
+static int model_static_coeff(struct kbase_ipa_model *model, u32 *coeffp)
+{
+ u32 temp_scaling_factor;
+ struct kbase_ipa_model_simple_data *model_data =
+ (struct kbase_ipa_model_simple_data *) model->model_data;
+ u64 coeff_big;
+ int temp;
+
+ temp = ACCESS_ONCE(model_data->current_temperature);
+
+ /* Range: 0 <= temp_scaling_factor < 2^24 */
+ temp_scaling_factor = calculate_temp_scaling_factor(model_data->ts,
+ temp);
+
+ /*
+ * Range: 0 <= coeff_big < 2^52 to avoid overflowing *coeffp. This
+ * means static_coefficient must be in range
+ * 0 <= static_coefficient < 2^28.
+ */
+ coeff_big = (u64) model_data->static_coefficient * (u64) temp_scaling_factor;
+ *coeffp = coeff_big / 1000000;
+
+ return 0;
+}
+
+static int model_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp,
+ u32 current_freq)
+{
+ struct kbase_ipa_model_simple_data *model_data =
+ (struct kbase_ipa_model_simple_data *) model->model_data;
+
+ *coeffp = model_data->dynamic_coefficient;
+
+ return 0;
+}
+
+static int add_params(struct kbase_ipa_model *model)
+{
+ int err = 0;
+ struct kbase_ipa_model_simple_data *model_data =
+ (struct kbase_ipa_model_simple_data *)model->model_data;
+
+ err = kbase_ipa_model_add_param_s32(model, "static-coefficient",
+ &model_data->static_coefficient,
+ 1, true);
+ if (err)
+ goto end;
+
+ err = kbase_ipa_model_add_param_s32(model, "dynamic-coefficient",
+ &model_data->dynamic_coefficient,
+ 1, true);
+ if (err)
+ goto end;
+
+ err = kbase_ipa_model_add_param_s32(model, "ts",
+ model_data->ts, 4, true);
+ if (err)
+ goto end;
+
+ err = kbase_ipa_model_add_param_string(model, "thermal-zone",
+ model_data->tz_name,
+ sizeof(model_data->tz_name), true);
+ if (err)
+ goto end;
+
+ model_data->temperature_poll_interval_ms = 200;
+ err = kbase_ipa_model_add_param_s32(model, "temp-poll-interval-ms",
+ &model_data->temperature_poll_interval_ms,
+ 1, false);
+
+end:
+ return err;
+}
+
+static int kbase_simple_power_model_init(struct kbase_ipa_model *model)
+{
+ int err;
+ struct kbase_ipa_model_simple_data *model_data;
+
+ model_data = kzalloc(sizeof(struct kbase_ipa_model_simple_data),
+ GFP_KERNEL);
+ if (!model_data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ model->model_data = (void *) model_data;
+
+ model_data->current_temperature = FALLBACK_STATIC_TEMPERATURE;
+ model_data->poll_temperature_thread = kthread_run(poll_temperature,
+ (void *) model_data,
+ "mali-simple-power-model-temp-poll");
+ if (!model_data->poll_temperature_thread) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = add_params(model);
+
+exit:
+ if (err) {
+ if (model_data->poll_temperature_thread)
+ kthread_stop(model_data->poll_temperature_thread);
+ kfree(model_data);
+ }
+
+ return err;
+}
+
+static int kbase_simple_power_model_recalculate(struct kbase_ipa_model *model)
+{
+ struct kbase_ipa_model_simple_data *model_data =
+ (struct kbase_ipa_model_simple_data *)model->model_data;
+ struct thermal_zone_device *tz;
+
+ if (!strnlen(model_data->tz_name, sizeof(model_data->tz_name))) {
+ tz = NULL;
+ } else {
+ tz = thermal_zone_get_zone_by_name(model_data->tz_name);
+
+ if (IS_ERR_OR_NULL(tz)) {
+ pr_warn_ratelimited("Error %ld getting thermal zone \'%s\', not yet ready?\n",
+ PTR_ERR(tz), model_data->tz_name);
+ tz = NULL;
+ return -EPROBE_DEFER;
+ }
+ }
+
+ ACCESS_ONCE(model_data->gpu_tz) = tz;
+
+ return 0;
+}
+
+static void kbase_simple_power_model_term(struct kbase_ipa_model *model)
+{
+ struct kbase_ipa_model_simple_data *model_data =
+ (struct kbase_ipa_model_simple_data *)model->model_data;
+
+ kthread_stop(model_data->poll_temperature_thread);
+
+ kfree(model_data);
+}
+
+struct kbase_ipa_model_ops kbase_simple_ipa_model_ops = {
+ .name = "mali-simple-power-model",
+ .init = &kbase_simple_power_model_init,
+ .recalculate = &kbase_simple_power_model_recalculate,
+ .term = &kbase_simple_power_model_term,
+ .get_dynamic_coeff = &model_dynamic_coeff,
+ .get_static_coeff = &model_static_coeff,
+ .do_utilization_scaling_in_framework = true,
+};
diff --git a/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_simple.h b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_simple.h
new file mode 100644
index 000000000000..e78d6173300b
--- /dev/null
+++ b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_simple.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_IPA_SIMPLE_H_
+#define _KBASE_IPA_SIMPLE_H_
+
+#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
+
+extern struct kbase_ipa_model_ops kbase_simple_ipa_model_ops;
+
+#if MALI_UNIT_TEST
+/**
+ * kbase_simple_power_model_set_dummy_temp() - set a dummy temperature value
+ * @temp: Temperature of the thermal zone, in millidegrees celsius.
+ *
+ * This is only intended for use in unit tests, to ensure that the temperature
+ * values used by the simple power model are predictable. Deterministic
+ * behavior is necessary to allow validation of the static power values
+ * computed by this model.
+ */
+void kbase_simple_power_model_set_dummy_temp(int temp);
+#endif /* MALI_UNIT_TEST */
+
+#endif /* (defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */
+
+#endif /* _KBASE_IPA_SIMPLE_H_ */
diff --git a/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_vinstr_common.c b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_vinstr_common.c
new file mode 100644
index 000000000000..d3964d0d3c73
--- /dev/null
+++ b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_vinstr_common.c
@@ -0,0 +1,217 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include "mali_kbase_ipa_vinstr_common.h"
+
+#if MALI_UNIT_TEST
+static ktime_t dummy_time;
+
+/* Intercept calls to the kernel function using a macro */
+#ifdef ktime_get
+#undef ktime_get
+#endif
+#define ktime_get() (ACCESS_ONCE(dummy_time))
+
+void kbase_ipa_set_dummy_time(ktime_t t)
+{
+ ACCESS_ONCE(dummy_time) = t;
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_set_dummy_time);
+
+#endif /* MALI_UNIT_TEST */
+
+/**
+ * read_hwcnt() - read a counter value
+ * @model_data: pointer to model data
+ * @offset: offset, in bytes, into vinstr buffer
+ *
+ * Return: A 32-bit counter value. Range: 0 < value < 2^27 (worst case would be
+ * incrementing every cycle over a ~100ms sample period at a high frequency,
+ * e.g. 1 GHz: 2^30 * 0.1seconds ~= 2^27.
+ */
+static inline u32 kbase_ipa_read_hwcnt(
+ struct kbase_ipa_model_vinstr_data *model_data,
+ u32 offset)
+{
+ u8 *p = model_data->vinstr_buffer;
+
+ return *(u32 *)&p[offset];
+}
+
+static inline s64 kbase_ipa_add_saturate(s64 a, s64 b)
+{
+ if (S64_MAX - a < b)
+ return S64_MAX;
+ return a + b;
+}
+
+s64 kbase_ipa_sum_all_shader_cores(
+ struct kbase_ipa_model_vinstr_data *model_data,
+ s32 coeff, u32 counter)
+{
+ struct kbase_device *kbdev = model_data->kbdev;
+ u64 core_mask;
+ u32 base = 0;
+ s64 ret = 0;
+
+ core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+ while (core_mask != 0ull) {
+ if ((core_mask & 1ull) != 0ull) {
+ /* 0 < counter_value < 2^27 */
+ u32 counter_value = kbase_ipa_read_hwcnt(model_data,
+ base + counter);
+
+ /* 0 < ret < 2^27 * max_num_cores = 2^32 */
+ ret = kbase_ipa_add_saturate(ret, counter_value);
+ }
+ base += KBASE_IPA_NR_BYTES_PER_BLOCK;
+ core_mask >>= 1;
+ }
+
+ /* Range: -2^54 < ret < 2^54 */
+ ret *= coeff;
+
+ return ret / 1000000;
+}
+
+s64 kbase_ipa_single_counter(
+ struct kbase_ipa_model_vinstr_data *model_data,
+ s32 coeff, u32 counter)
+{
+ /* Range: 0 < counter_value < 2^27 */
+ const u32 counter_value = kbase_ipa_read_hwcnt(model_data, counter);
+
+ /* Range: -2^49 < ret < 2^49 */
+ const s64 multiplied = (s64) counter_value * (s64) coeff;
+
+ /* Range: -2^29 < return < 2^29 */
+ return multiplied / 1000000;
+}
+
+int kbase_ipa_attach_vinstr(struct kbase_ipa_model_vinstr_data *model_data)
+{
+ struct kbase_device *kbdev = model_data->kbdev;
+ struct kbase_uk_hwcnt_reader_setup setup;
+ size_t dump_size;
+
+ dump_size = kbase_vinstr_dump_size(kbdev);
+ model_data->vinstr_buffer = kzalloc(dump_size, GFP_KERNEL);
+ if (!model_data->vinstr_buffer) {
+ dev_err(kbdev->dev, "Failed to allocate IPA dump buffer");
+ return -1;
+ }
+
+ setup.jm_bm = ~0u;
+ setup.shader_bm = ~0u;
+ setup.tiler_bm = ~0u;
+ setup.mmu_l2_bm = ~0u;
+ model_data->vinstr_cli = kbase_vinstr_hwcnt_kernel_setup(kbdev->vinstr_ctx,
+ &setup, model_data->vinstr_buffer);
+ if (!model_data->vinstr_cli) {
+ dev_err(kbdev->dev, "Failed to register IPA with vinstr core");
+ kfree(model_data->vinstr_buffer);
+ model_data->vinstr_buffer = NULL;
+ return -1;
+ }
+
+ model_data->last_sample_read_time = ktime_get();
+ kbase_vinstr_hwc_clear(model_data->vinstr_cli);
+
+ return 0;
+}
+
+void kbase_ipa_detach_vinstr(struct kbase_ipa_model_vinstr_data *model_data)
+{
+ if (model_data->vinstr_cli)
+ kbase_vinstr_detach_client(model_data->vinstr_cli);
+ model_data->vinstr_cli = NULL;
+ kfree(model_data->vinstr_buffer);
+ model_data->vinstr_buffer = NULL;
+}
+
+int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp,
+ u32 current_freq)
+{
+ struct kbase_ipa_model_vinstr_data *model_data =
+ (struct kbase_ipa_model_vinstr_data *)model->model_data;
+ s64 energy = 0;
+ size_t i;
+ ktime_t now = ktime_get();
+ ktime_t time_since_last_sample =
+ ktime_sub(now, model_data->last_sample_read_time);
+ /* Range: 2^0 < time_since_last_sample_ms < 2^10 (1-1000ms) */
+ s64 time_since_last_sample_ms = ktime_to_ms(time_since_last_sample);
+ u64 coeff = 0;
+ u64 num_cycles;
+ int err = 0;
+
+ err = kbase_vinstr_hwc_dump(model_data->vinstr_cli,
+ BASE_HWCNT_READER_EVENT_MANUAL);
+ if (err)
+ goto err0;
+
+ model_data->last_sample_read_time = now;
+
+ /* Range of 'energy' is +/- 2^34 * number of IPA groups, so around
+ * -2^38 < energy < 2^38 */
+ for (i = 0; i < model_data->groups_def_num; i++) {
+ const struct kbase_ipa_group *group = &model_data->groups_def[i];
+ s32 coeff, group_energy;
+
+ coeff = model_data->group_values[i];
+ group_energy = group->op(model_data, coeff, group->counter);
+
+ energy = kbase_ipa_add_saturate(energy, group_energy);
+ }
+
+ /* Range: 0 <= coeff < 2^38 */
+ if (energy > 0)
+ coeff = energy;
+
+ /* Scale by user-specified factor and divide by 1000. But actually
+ * cancel the division out, because we want the num_cycles in KHz and
+ * don't want to lose precision. */
+
+ /* Range: 0 < coeff < 2^53 */
+ coeff = coeff * model_data->scaling_factor;
+
+ if (time_since_last_sample_ms == 0) {
+ time_since_last_sample_ms = 1;
+ } else if (time_since_last_sample_ms < 0) {
+ err = -ERANGE;
+ goto err0;
+ }
+
+ /* Range: 2^20 < num_cycles < 2^40 mCycles */
+ num_cycles = (u64) current_freq * (u64) time_since_last_sample_ms;
+ /* Range: 2^10 < num_cycles < 2^30 Cycles */
+ num_cycles /= 1000000;
+
+ /* num_cycles should never be 0 in _normal_ usage (because we expect
+ * frequencies on the order of MHz and >10ms polling intervals), but
+ * protect against divide-by-zero anyway. */
+ if (num_cycles == 0)
+ num_cycles = 1;
+
+ /* Range: 0 < coeff < 2^43 */
+ coeff = div_u64(coeff, num_cycles);
+
+err0:
+ /* Clamp to a sensible range - 2^16 gives about 14W at 400MHz/750mV */
+ *coeffp = clamp(coeff, (u64) 0, (u64) 1 << 16);
+ return err;
+}
diff --git a/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_vinstr_common.h b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_vinstr_common.h
new file mode 100644
index 000000000000..25b36c8e3089
--- /dev/null
+++ b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_vinstr_common.h
@@ -0,0 +1,161 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_IPA_VINSTR_COMMON_H_
+#define _KBASE_IPA_VINSTR_COMMON_H_
+
+#include "mali_kbase.h"
+
+/* Maximum length for the name of an IPA group. */
+#define KBASE_IPA_MAX_GROUP_NAME_LEN 15
+
+/* Maximum number of IPA groups for an IPA model. */
+#define KBASE_IPA_MAX_GROUP_DEF_NUM 16
+
+/* Number of bytes per hardware counter in a vinstr_buffer. */
+#define KBASE_IPA_NR_BYTES_PER_CNT 4
+
+/* Number of hardware counters per block in a vinstr_buffer. */
+#define KBASE_IPA_NR_CNT_PER_BLOCK 64
+
+/* Number of bytes per block in a vinstr_buffer. */
+#define KBASE_IPA_NR_BYTES_PER_BLOCK \
+ (KBASE_IPA_NR_CNT_PER_BLOCK * KBASE_IPA_NR_BYTES_PER_CNT)
+
+
+
+/**
+ * struct kbase_ipa_model_vinstr_data - IPA context per device
+ * @kbdev: pointer to kbase device
+ * @groups_def: Array of IPA groups.
+ * @groups_def_num: Number of elements in the array of IPA groups.
+ * @vinstr_cli: vinstr client handle
+ * @vinstr_buffer: buffer to dump hardware counters onto
+ * @last_sample_read_time: timestamp of last vinstr buffer read
+ * @scaling_factor: user-specified power scaling factor. This is
+ * interpreted as a fraction where the denominator is
+ * 1000. Range approx 0.0-32.0:
+ * 0 < scaling_factor < 2^15
+ */
+struct kbase_ipa_model_vinstr_data {
+ struct kbase_device *kbdev;
+ s32 group_values[KBASE_IPA_MAX_GROUP_DEF_NUM];
+ const struct kbase_ipa_group *groups_def;
+ size_t groups_def_num;
+ struct kbase_vinstr_client *vinstr_cli;
+ void *vinstr_buffer;
+ ktime_t last_sample_read_time;
+ s32 scaling_factor;
+};
+
+/**
+ * struct ipa_group - represents a single IPA group
+ * @name: name of the IPA group
+ * @default_value: default value of coefficient for IPA group.
+ * Coefficients are interpreted as fractions where the
+ * denominator is 1000000.
+ * @op: which operation to be performed on the counter values
+ * @counter: counter used to calculate energy for IPA group
+ */
+struct kbase_ipa_group {
+ char name[KBASE_IPA_MAX_GROUP_NAME_LEN + 1];
+ s32 default_value;
+ s64 (*op)(struct kbase_ipa_model_vinstr_data *, s32, u32);
+ u32 counter;
+};
+
+/*
+ * sum_all_shader_cores() - sum a counter over all cores
+ * @model_data pointer to model data
+ * @coeff model coefficient. Unity is ~2^20, so range approx
+ * +/- 4.0: -2^22 < coeff < 2^22
+
+ * Calculate energy estimation based on hardware counter `counter'
+ * across all shader cores.
+ *
+ * Return: Sum of counter values. Range: -2^34 < ret < 2^34
+ */
+s64 kbase_ipa_sum_all_shader_cores(
+ struct kbase_ipa_model_vinstr_data *model_data,
+ s32 coeff, u32 counter);
+
+/*
+ * sum_single_counter() - sum a single counter
+ * @model_data pointer to model data
+ * @coeff model coefficient. Unity is ~2^20, so range approx
+ * +/- 4.0: -2^22 < coeff < 2^22
+
+ * Calculate energy estimation based on hardware counter `counter'.
+ *
+ * Return: Counter value. Range: -2^34 < ret < 2^34
+ */
+s64 kbase_ipa_single_counter(
+ struct kbase_ipa_model_vinstr_data *model_data,
+ s32 coeff, u32 counter);
+
+/*
+ * attach_vinstr() - attach a vinstr_buffer to an IPA model.
+ * @model_data pointer to model data
+ *
+ * Attach a vinstr_buffer to an IPA model. The vinstr_buffer
+ * allows access to the hardware counters used to calculate
+ * energy consumption.
+ *
+ * Return: 0 on success, or an error code.
+ */
+int kbase_ipa_attach_vinstr(struct kbase_ipa_model_vinstr_data *model_data);
+
+/*
+ * detach_vinstr() - detach a vinstr_buffer from an IPA model.
+ * @model_data pointer to model data
+ *
+ * Detach a vinstr_buffer from an IPA model.
+ */
+void kbase_ipa_detach_vinstr(struct kbase_ipa_model_vinstr_data *model_data);
+
+/**
+ * kbase_ipa_vinstr_dynamic_coeff() - calculate dynamic power based on HW counters
+ * @model: pointer to instantiated model
+ * @coeffp: pointer to location where calculated power, in
+ * pW/(Hz V^2), is stored.
+ * @current_freq: frequency the GPU has been running at over the sample
+ * period. In Hz. Range: 10 MHz < 1GHz,
+ * 2^20 < current_freq < 2^30
+ *
+ * This is a GPU-agnostic implementation of the get_dynamic_coeff()
+ * function of an IPA model. It relies on the model being populated
+ * with GPU-specific attributes at initialization time.
+ *
+ * Return: 0 on success, or an error code.
+ */
+int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp,
+ u32 current_freq);
+
+#if MALI_UNIT_TEST
+/**
+ * kbase_ipa_set_dummy_time() - set a dummy monotonic time value
+ * @t: a monotonic time value
+ *
+ * This is only intended for use in unit tests, to ensure that the kernel time
+ * values used by a power model are predictable. Deterministic behavior is
+ * necessary to allow validation of the dynamic power values computed by the
+ * model.
+ */
+void kbase_ipa_set_dummy_time(ktime_t t);
+#endif /* MALI_UNIT_TEST */
+
+#endif /* _KBASE_IPA_VINSTR_COMMON_H_ */
diff --git a/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_vinstr_g71.c b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_vinstr_g71.c
new file mode 100644
index 000000000000..14241a0d68b5
--- /dev/null
+++ b/drivers/gpu/arm_gpu/ipa/mali_kbase_ipa_vinstr_g71.c
@@ -0,0 +1,128 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+#include <linux/thermal.h>
+
+#include "mali_kbase_ipa_vinstr_common.h"
+#include "mali_kbase.h"
+
+
+#define JM_BASE (0 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+#define TILER_BASE (1 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+#define MMU_BASE (2 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+#define SC0_BASE (3 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+
+#define GPU_ACTIVE (JM_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 6)
+#define TILER_ACTIVE (TILER_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 45)
+#define L2_ANY_LOOKUP (MMU_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 25)
+#define FRAG_ACTIVE (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 4)
+#define EXEC_CORE_ACTIVE (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 26)
+#define EXEC_INSTR_COUNT (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 28)
+#define TEX_COORD_ISSUE (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 40)
+#define VARY_SLOT_32 (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 50)
+#define VARY_SLOT_16 (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 51)
+#define BEATS_RD_LSC (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 56)
+#define BEATS_WR_LSC (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 61)
+#define BEATS_WR_TIB (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 62)
+
+static const struct kbase_ipa_group ipa_groups_def[] = {
+ {
+ .name = "l2_access",
+ .default_value = 526300,
+ .op = kbase_ipa_single_counter,
+ .counter = L2_ANY_LOOKUP,
+ },
+ {
+ .name = "exec_instr_count",
+ .default_value = 301100,
+ .op = kbase_ipa_sum_all_shader_cores,
+ .counter = EXEC_INSTR_COUNT,
+ },
+ {
+ .name = "tex_issue",
+ .default_value = 197400,
+ .op = kbase_ipa_sum_all_shader_cores,
+ .counter = TEX_COORD_ISSUE,
+ },
+ {
+ .name = "tile_wb",
+ .default_value = -156400,
+ .op = kbase_ipa_sum_all_shader_cores,
+ .counter = BEATS_WR_TIB,
+ },
+ {
+ .name = "gpu_active",
+ .default_value = 115800,
+ .op = kbase_ipa_single_counter,
+ .counter = GPU_ACTIVE,
+ },
+};
+
+static int kbase_g71_power_model_init(struct kbase_ipa_model *model)
+{
+ int i, err = 0;
+ struct kbase_ipa_model_vinstr_data *model_data;
+
+ model_data = kzalloc(sizeof(*model_data), GFP_KERNEL);
+ if (!model_data)
+ return -EINVAL;
+
+ model_data->kbdev = model->kbdev;
+ model_data->groups_def = ipa_groups_def;
+ BUILD_BUG_ON(ARRAY_SIZE(ipa_groups_def) > KBASE_IPA_MAX_GROUP_DEF_NUM);
+ model_data->groups_def_num = ARRAY_SIZE(ipa_groups_def);
+
+ model->model_data = (void *) model_data;
+
+ for (i = 0; i < ARRAY_SIZE(ipa_groups_def); ++i) {
+ const struct kbase_ipa_group *group = &ipa_groups_def[i];
+
+ model_data->group_values[i] = group->default_value;
+ err = kbase_ipa_model_add_param_s32(model, group->name,
+ &model_data->group_values[i],
+ 1, false);
+ if (err)
+ break;
+ }
+
+ model_data->scaling_factor = 15000;
+ err = kbase_ipa_model_add_param_s32(model, "scale",
+ &model_data->scaling_factor,
+ 1, false);
+
+ err = kbase_ipa_attach_vinstr(model_data);
+
+ return err;
+}
+
+static void kbase_g71_power_model_term(struct kbase_ipa_model *model)
+{
+ struct kbase_ipa_model_vinstr_data *model_data =
+ (struct kbase_ipa_model_vinstr_data *)model->model_data;
+
+ kbase_ipa_detach_vinstr(model_data);
+ kfree(model_data);
+}
+
+
+struct kbase_ipa_model_ops kbase_g71_ipa_model_ops = {
+ .name = "mali-g71-power-model",
+ .init = kbase_g71_power_model_init,
+ .term = kbase_g71_power_model_term,
+ .get_dynamic_coeff = kbase_ipa_vinstr_dynamic_coeff,
+ .do_utilization_scaling_in_framework = false,
+};
+KBASE_EXPORT_TEST_API(kbase_g71_ipa_model_ops);
diff --git a/drivers/gpu/arm_gpu/mali_base_hwconfig_features.h b/drivers/gpu/arm_gpu/mali_base_hwconfig_features.h
new file mode 100644
index 000000000000..bead0ab167af
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_base_hwconfig_features.h
@@ -0,0 +1,368 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features,
+ * please update base/tools/hwconfig_generator/hwc_{issues,features}.py
+ * For more information see base/tools/hwconfig_generator/README
+ */
+
+#ifndef _BASE_HWCONFIG_FEATURES_H_
+#define _BASE_HWCONFIG_FEATURES_H_
+
+enum base_hw_feature {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
+ BASE_HW_FEATURE_IMAGES_IN_FRAGMENT_SHADERS,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_V4,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_AARCH64_MMU,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_generic[] = {
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t60x[] = {
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_V4,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t62x[] = {
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_V4,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t72x[] = {
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_V4,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t76x[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tFxx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t83x[] = {
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t82x[] = {
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tMIx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tHEx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tSIx[] = {
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tDVx[] = {
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_END
+};
+
+
+#ifdef MALI_INCLUDE_TGOX
+static const enum base_hw_feature base_hw_features_tGOx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_AARCH64_MMU,
+ BASE_HW_FEATURE_END
+};
+
+#endif /* MALI_INCLUDE_TGOX */
+
+#ifdef MALI_INCLUDE_TKAX
+static const enum base_hw_feature base_hw_features_tKAx[] = {
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_END
+};
+
+#endif /* MALI_INCLUDE_TKAX */
+
+#ifdef MALI_INCLUDE_TTRX
+static const enum base_hw_feature base_hw_features_tTRx[] = {
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_END
+};
+
+#endif /* MALI_INCLUDE_TTRX */
+
+#endif /* _BASE_HWCONFIG_FEATURES_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_base_hwconfig_issues.h b/drivers/gpu/arm_gpu/mali_base_hwconfig_issues.h
new file mode 100644
index 000000000000..d068aaf2cda8
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_base_hwconfig_issues.h
@@ -0,0 +1,1134 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features,
+ * please update base/tools/hwconfig_generator/hwc_{issues,features}.py
+ * For more information see base/tools/hwconfig_generator/README
+ */
+
+#ifndef _BASE_HWCONFIG_ISSUES_H_
+#define _BASE_HWCONFIG_ISSUES_H_
+
+enum base_hw_issue {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_6367,
+ BASE_HW_ISSUE_6398,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_6787,
+ BASE_HW_ISSUE_7027,
+ BASE_HW_ISSUE_7144,
+ BASE_HW_ISSUE_7304,
+ BASE_HW_ISSUE_8073,
+ BASE_HW_ISSUE_8186,
+ BASE_HW_ISSUE_8215,
+ BASE_HW_ISSUE_8245,
+ BASE_HW_ISSUE_8250,
+ BASE_HW_ISSUE_8260,
+ BASE_HW_ISSUE_8280,
+ BASE_HW_ISSUE_8316,
+ BASE_HW_ISSUE_8381,
+ BASE_HW_ISSUE_8394,
+ BASE_HW_ISSUE_8401,
+ BASE_HW_ISSUE_8408,
+ BASE_HW_ISSUE_8443,
+ BASE_HW_ISSUE_8456,
+ BASE_HW_ISSUE_8564,
+ BASE_HW_ISSUE_8634,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_8791,
+ BASE_HW_ISSUE_8833,
+ BASE_HW_ISSUE_8879,
+ BASE_HW_ISSUE_8896,
+ BASE_HW_ISSUE_8975,
+ BASE_HW_ISSUE_8986,
+ BASE_HW_ISSUE_8987,
+ BASE_HW_ISSUE_9010,
+ BASE_HW_ISSUE_9418,
+ BASE_HW_ISSUE_9423,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_9510,
+ BASE_HW_ISSUE_9566,
+ BASE_HW_ISSUE_9630,
+ BASE_HW_ISSUE_10127,
+ BASE_HW_ISSUE_10327,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_10817,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10959,
+ BASE_HW_ISSUE_10969,
+ BASE_HW_ISSUE_10984,
+ BASE_HW_ISSUE_10995,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T720_1386,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_7940,
+ BASE_HW_ISSUE_TMIX_8042,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TMIX_8138,
+ BASE_HW_ISSUE_TMIX_8206,
+ BASE_HW_ISSUE_TMIX_8343,
+ BASE_HW_ISSUE_TMIX_8463,
+ BASE_HW_ISSUE_TMIX_8456,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_generic[] = {
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = {
+ BASE_HW_ISSUE_6367,
+ BASE_HW_ISSUE_6398,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_6787,
+ BASE_HW_ISSUE_7027,
+ BASE_HW_ISSUE_7144,
+ BASE_HW_ISSUE_7304,
+ BASE_HW_ISSUE_8073,
+ BASE_HW_ISSUE_8186,
+ BASE_HW_ISSUE_8215,
+ BASE_HW_ISSUE_8245,
+ BASE_HW_ISSUE_8250,
+ BASE_HW_ISSUE_8260,
+ BASE_HW_ISSUE_8280,
+ BASE_HW_ISSUE_8316,
+ BASE_HW_ISSUE_8381,
+ BASE_HW_ISSUE_8394,
+ BASE_HW_ISSUE_8401,
+ BASE_HW_ISSUE_8408,
+ BASE_HW_ISSUE_8443,
+ BASE_HW_ISSUE_8456,
+ BASE_HW_ISSUE_8564,
+ BASE_HW_ISSUE_8634,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_8791,
+ BASE_HW_ISSUE_8833,
+ BASE_HW_ISSUE_8896,
+ BASE_HW_ISSUE_8975,
+ BASE_HW_ISSUE_8986,
+ BASE_HW_ISSUE_8987,
+ BASE_HW_ISSUE_9010,
+ BASE_HW_ISSUE_9418,
+ BASE_HW_ISSUE_9423,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_9510,
+ BASE_HW_ISSUE_9566,
+ BASE_HW_ISSUE_9630,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10969,
+ BASE_HW_ISSUE_10984,
+ BASE_HW_ISSUE_10995,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_3964,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t60x_r0p0_eac[] = {
+ BASE_HW_ISSUE_6367,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_6787,
+ BASE_HW_ISSUE_7027,
+ BASE_HW_ISSUE_7304,
+ BASE_HW_ISSUE_8408,
+ BASE_HW_ISSUE_8564,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_8975,
+ BASE_HW_ISSUE_9010,
+ BASE_HW_ISSUE_9418,
+ BASE_HW_ISSUE_9423,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_9510,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10969,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t60x_r0p1[] = {
+ BASE_HW_ISSUE_6367,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_6787,
+ BASE_HW_ISSUE_7027,
+ BASE_HW_ISSUE_7304,
+ BASE_HW_ISSUE_8408,
+ BASE_HW_ISSUE_8564,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_8975,
+ BASE_HW_ISSUE_9010,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_9510,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t62x_r0p1[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10127,
+ BASE_HW_ISSUE_10327,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10817,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10959,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t62x_r1p0[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10959,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t62x_r1p1[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10959,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p1[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p1_50rel0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p2[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p3[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r1p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t72x_r0p0[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t72x_r1p0[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T720_1386,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t72x_r1p1[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T720_1386,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t72x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t76x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t60x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t62x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tFRx_r0p1[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tFRx_r0p2[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tFRx_r1p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tFRx_r2p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tFRx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t86x_r0p2[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t86x_r1p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t86x_r2p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t86x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t83x_r0p1[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T720_1386,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t83x_r1p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T720_1386,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t83x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t82x_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T720_1386,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t82x_r0p1[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T720_1386,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t82x_r1p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T720_1386,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t82x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tMIx_r0p0_05dev0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8042,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TMIX_8138,
+ BASE_HW_ISSUE_TMIX_8206,
+ BASE_HW_ISSUE_TMIX_8343,
+ BASE_HW_ISSUE_TMIX_8463,
+ BASE_HW_ISSUE_TMIX_8456,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tMIx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_7940,
+ BASE_HW_ISSUE_TMIX_8042,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TMIX_8138,
+ BASE_HW_ISSUE_TMIX_8206,
+ BASE_HW_ISSUE_TMIX_8343,
+ BASE_HW_ISSUE_TMIX_8463,
+ BASE_HW_ISSUE_TMIX_8456,
+ BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tMIx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_7940,
+ BASE_HW_ISSUE_TMIX_8042,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TMIX_8138,
+ BASE_HW_ISSUE_TMIX_8206,
+ BASE_HW_ISSUE_TMIX_8343,
+ BASE_HW_ISSUE_TMIX_8456,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tHEx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8042,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tHEx_r0p1[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8042,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tHEx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8042,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tSIx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tSIx_r0p1[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tSIx_r1p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tSIx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tDVx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tDVx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+
+
+#ifdef MALI_INCLUDE_TGOX
+static const enum base_hw_issue base_hw_issues_tGOx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+#endif /* MALI_INCLUDE_TGOX */
+
+#ifdef MALI_INCLUDE_TGOX
+static const enum base_hw_issue base_hw_issues_model_tGOx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+#endif /* MALI_INCLUDE_TGOX */
+
+#ifdef MALI_INCLUDE_TKAX
+static const enum base_hw_issue base_hw_issues_tKAx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+#endif /* MALI_INCLUDE_TKAX */
+
+#ifdef MALI_INCLUDE_TKAX
+static const enum base_hw_issue base_hw_issues_model_tKAx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+#endif /* MALI_INCLUDE_TKAX */
+
+#ifdef MALI_INCLUDE_TTRX
+static const enum base_hw_issue base_hw_issues_tTRx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+#endif /* MALI_INCLUDE_TTRX */
+
+#ifdef MALI_INCLUDE_TTRX
+static const enum base_hw_issue base_hw_issues_model_tTRx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+#endif /* MALI_INCLUDE_TTRX */
+
+#endif /* _BASE_HWCONFIG_ISSUES_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_base_kernel.h b/drivers/gpu/arm_gpu/mali_base_kernel.h
new file mode 100644
index 000000000000..998d097509e8
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_base_kernel.h
@@ -0,0 +1,1818 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Base structures shared with the kernel.
+ */
+
+#ifndef _BASE_KERNEL_H_
+#define _BASE_KERNEL_H_
+
+/* Support UK10_2 IOCTLS */
+#define BASE_LEGACY_UK10_2_SUPPORT 1
+
+/* Support UK10_4 IOCTLS */
+#define BASE_LEGACY_UK10_4_SUPPORT 1
+
+typedef struct base_mem_handle {
+ struct {
+ u64 handle;
+ } basep;
+} base_mem_handle;
+
+#include "mali_base_mem_priv.h"
+#include "mali_kbase_profiling_gator_api.h"
+#include "mali_midg_coherency.h"
+#include "mali_kbase_gpu_id.h"
+
+/*
+ * Dependency stuff, keep it private for now. May want to expose it if
+ * we decide to make the number of semaphores a configurable
+ * option.
+ */
+#define BASE_JD_ATOM_COUNT 256
+
+#define BASEP_JD_SEM_PER_WORD_LOG2 5
+#define BASEP_JD_SEM_PER_WORD (1 << BASEP_JD_SEM_PER_WORD_LOG2)
+#define BASEP_JD_SEM_WORD_NR(x) ((x) >> BASEP_JD_SEM_PER_WORD_LOG2)
+#define BASEP_JD_SEM_MASK_IN_WORD(x) (1 << ((x) & (BASEP_JD_SEM_PER_WORD - 1)))
+#define BASEP_JD_SEM_ARRAY_SIZE BASEP_JD_SEM_WORD_NR(BASE_JD_ATOM_COUNT)
+
+/* Set/reset values for a software event */
+#define BASE_JD_SOFT_EVENT_SET ((unsigned char)1)
+#define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0)
+
+#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3
+
+#define BASE_MAX_COHERENT_GROUPS 16
+
+#if defined CDBG_ASSERT
+#define LOCAL_ASSERT CDBG_ASSERT
+#elif defined KBASE_DEBUG_ASSERT
+#define LOCAL_ASSERT KBASE_DEBUG_ASSERT
+#else
+#error assert macro not defined!
+#endif
+
+#if defined PAGE_MASK
+#define LOCAL_PAGE_LSB ~PAGE_MASK
+#else
+#include <osu/mali_osu.h>
+
+#if defined OSU_CONFIG_CPU_PAGE_SIZE_LOG2
+#define LOCAL_PAGE_LSB ((1ul << OSU_CONFIG_CPU_PAGE_SIZE_LOG2) - 1)
+#else
+#error Failed to find page size
+#endif
+#endif
+
+/**
+ * @addtogroup base_user_api User-side Base APIs
+ * @{
+ */
+
+/**
+ * @addtogroup base_user_api_memory User-side Base Memory APIs
+ * @{
+ */
+
+/**
+ * typedef base_mem_alloc_flags - Memory allocation, access/hint flags.
+ *
+ * A combination of MEM_PROT/MEM_HINT flags must be passed to each allocator
+ * in order to determine the best cache policy. Some combinations are
+ * of course invalid (e.g. MEM_PROT_CPU_WR | MEM_HINT_CPU_RD),
+ * which defines a write-only region on the CPU side, which is
+ * heavily read by the CPU...
+ * Other flags are only meaningful to a particular allocator.
+ * More flags can be added to this list, as long as they don't clash
+ * (see BASE_MEM_FLAGS_NR_BITS for the number of the first free bit).
+ */
+typedef u32 base_mem_alloc_flags;
+
+/* Memory allocation, access/hint flags.
+ *
+ * See base_mem_alloc_flags.
+ */
+
+/* IN */
+/* Read access CPU side
+ */
+#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0)
+
+/* Write access CPU side
+ */
+#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1)
+
+/* Read access GPU side
+ */
+#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2)
+
+/* Write access GPU side
+ */
+#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3)
+
+/* Execute allowed on the GPU side
+ */
+#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4)
+
+ /* BASE_MEM_HINT flags have been removed, but their values are reserved
+ * for backwards compatibility with older user-space drivers. The values
+ * can be re-used once support for r5p0 user-space drivers is removed,
+ * presumably in r7p0.
+ *
+ * RESERVED: (1U << 5)
+ * RESERVED: (1U << 6)
+ * RESERVED: (1U << 7)
+ * RESERVED: (1U << 8)
+ */
+
+/* Grow backing store on GPU Page Fault
+ */
+#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9)
+
+/* Page coherence Outer shareable, if available
+ */
+#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10)
+
+/* Page coherence Inner shareable
+ */
+#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11)
+
+/* Should be cached on the CPU
+ */
+#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12)
+
+/* IN/OUT */
+/* Must have same VA on both the GPU and the CPU
+ */
+#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13)
+
+/* OUT */
+/* Must call mmap to acquire a GPU address for the alloc
+ */
+#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14)
+
+/* IN */
+/* Page coherence Outer shareable, required.
+ */
+#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15)
+
+/* Secure memory
+ */
+#define BASE_MEM_SECURE ((base_mem_alloc_flags)1 << 16)
+
+/* Not needed physical memory
+ */
+#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17)
+
+/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the
+ * addresses to be the same
+ */
+#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18)
+
+/* Number of bits used as flags for base memory management
+ *
+ * Must be kept in sync with the base_mem_alloc_flags flags
+ */
+#define BASE_MEM_FLAGS_NR_BITS 19
+
+/* A mask for all output bits, excluding IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP
+
+/* A mask for all input bits, including IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_INPUT_MASK \
+ (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK)
+
+/* A mask for all the flags which are modifiable via the base_mem_set_flags
+ * interface.
+ */
+#define BASE_MEM_FLAGS_MODIFIABLE \
+ (BASE_MEM_DONT_NEED | BASE_MEM_COHERENT_SYSTEM | \
+ BASE_MEM_COHERENT_LOCAL)
+
+/**
+ * enum base_mem_import_type - Memory types supported by @a base_mem_import
+ *
+ * @BASE_MEM_IMPORT_TYPE_INVALID: Invalid type
+ * @BASE_MEM_IMPORT_TYPE_UMP: UMP import. Handle type is ump_secure_id.
+ * @BASE_MEM_IMPORT_TYPE_UMM: UMM import. Handle type is a file descriptor (int)
+ * @BASE_MEM_IMPORT_TYPE_USER_BUFFER: User buffer import. Handle is a
+ * base_mem_import_user_buffer
+ *
+ * Each type defines what the supported handle type is.
+ *
+ * If any new type is added here ARM must be contacted
+ * to allocate a numeric value for it.
+ * Do not just add a new type without synchronizing with ARM
+ * as future releases from ARM might include other new types
+ * which could clash with your custom types.
+ */
+typedef enum base_mem_import_type {
+ BASE_MEM_IMPORT_TYPE_INVALID = 0,
+ BASE_MEM_IMPORT_TYPE_UMP = 1,
+ BASE_MEM_IMPORT_TYPE_UMM = 2,
+ BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3
+} base_mem_import_type;
+
+/**
+ * struct base_mem_import_user_buffer - Handle of an imported user buffer
+ *
+ * @ptr: address of imported user buffer
+ * @length: length of imported user buffer in bytes
+ *
+ * This structure is used to represent a handle of an imported user buffer.
+ */
+
+struct base_mem_import_user_buffer {
+ u64 ptr;
+ u64 length;
+};
+
+/**
+ * @brief Invalid memory handle.
+ *
+ * Return value from functions returning @ref base_mem_handle on error.
+ *
+ * @warning @ref base_mem_handle_new_invalid must be used instead of this macro
+ * in C++ code or other situations where compound literals cannot be used.
+ */
+#define BASE_MEM_INVALID_HANDLE ((base_mem_handle) { {BASEP_MEM_INVALID_HANDLE} })
+
+/**
+ * @brief Special write-alloc memory handle.
+ *
+ * A special handle is used to represent a region where a special page is mapped
+ * with a write-alloc cache setup, typically used when the write result of the
+ * GPU isn't needed, but the GPU must write anyway.
+ *
+ * @warning @ref base_mem_handle_new_write_alloc must be used instead of this macro
+ * in C++ code or other situations where compound literals cannot be used.
+ */
+#define BASE_MEM_WRITE_ALLOC_PAGES_HANDLE ((base_mem_handle) { {BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE} })
+
+#define BASEP_MEM_INVALID_HANDLE (0ull << 12)
+#define BASE_MEM_MMU_DUMP_HANDLE (1ull << 12)
+#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12)
+#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12)
+#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12)
+/* reserved handles ..-64<<PAGE_SHIFT> for future special handles */
+#define BASE_MEM_COOKIE_BASE (64ul << 12)
+#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \
+ BASE_MEM_COOKIE_BASE)
+
+/* Mask to detect 4GB boundary alignment */
+#define BASE_MEM_MASK_4GB 0xfffff000UL
+
+
+/* Bit mask of cookies used for for memory allocation setup */
+#define KBASE_COOKIE_MASK ~1UL /* bit 0 is reserved */
+
+
+/**
+ * @brief Result codes of changing the size of the backing store allocated to a tmem region
+ */
+typedef enum base_backing_threshold_status {
+ BASE_BACKING_THRESHOLD_OK = 0, /**< Resize successful */
+ BASE_BACKING_THRESHOLD_ERROR_OOM = -2, /**< Increase failed due to an out-of-memory condition */
+ BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS = -4 /**< Invalid arguments (not tmem, illegal size request, etc.) */
+} base_backing_threshold_status;
+
+/**
+ * @addtogroup base_user_api_memory_defered User-side Base Defered Memory Coherency APIs
+ * @{
+ */
+
+/**
+ * @brief a basic memory operation (sync-set).
+ *
+ * The content of this structure is private, and should only be used
+ * by the accessors.
+ */
+typedef struct base_syncset {
+ struct basep_syncset basep_sset;
+} base_syncset;
+
+/** @} end group base_user_api_memory_defered */
+
+/**
+ * Handle to represent imported memory object.
+ * Simple opague handle to imported memory, can't be used
+ * with anything but base_external_resource_init to bind to an atom.
+ */
+typedef struct base_import_handle {
+ struct {
+ u64 handle;
+ } basep;
+} base_import_handle;
+
+/** @} end group base_user_api_memory */
+
+/**
+ * @addtogroup base_user_api_job_dispatch User-side Base Job Dispatcher APIs
+ * @{
+ */
+
+typedef int platform_fence_type;
+#define INVALID_PLATFORM_FENCE ((platform_fence_type)-1)
+
+/**
+ * Base stream handle.
+ *
+ * References an underlying base stream object.
+ */
+typedef struct base_stream {
+ struct {
+ int fd;
+ } basep;
+} base_stream;
+
+/**
+ * Base fence handle.
+ *
+ * References an underlying base fence object.
+ */
+typedef struct base_fence {
+ struct {
+ int fd;
+ int stream_fd;
+ } basep;
+} base_fence;
+
+/**
+ * @brief Per-job data
+ *
+ * This structure is used to store per-job data, and is completely unused
+ * by the Base driver. It can be used to store things such as callback
+ * function pointer, data to handle job completion. It is guaranteed to be
+ * untouched by the Base driver.
+ */
+typedef struct base_jd_udata {
+ u64 blob[2]; /**< per-job data array */
+} base_jd_udata;
+
+/**
+ * @brief Memory aliasing info
+ *
+ * Describes a memory handle to be aliased.
+ * A subset of the handle can be chosen for aliasing, given an offset and a
+ * length.
+ * A special handle BASE_MEM_WRITE_ALLOC_PAGES_HANDLE is used to represent a
+ * region where a special page is mapped with a write-alloc cache setup,
+ * typically used when the write result of the GPU isn't needed, but the GPU
+ * must write anyway.
+ *
+ * Offset and length are specified in pages.
+ * Offset must be within the size of the handle.
+ * Offset+length must not overrun the size of the handle.
+ *
+ * @handle Handle to alias, can be BASE_MEM_WRITE_ALLOC_PAGES_HANDLE
+ * @offset Offset within the handle to start aliasing from, in pages.
+ * Not used with BASE_MEM_WRITE_ALLOC_PAGES_HANDLE.
+ * @length Length to alias, in pages. For BASE_MEM_WRITE_ALLOC_PAGES_HANDLE
+ * specifies the number of times the special page is needed.
+ */
+struct base_mem_aliasing_info {
+ base_mem_handle handle;
+ u64 offset;
+ u64 length;
+};
+
+/**
+ * struct base_jit_alloc_info - Structure which describes a JIT allocation
+ * request.
+ * @gpu_alloc_addr: The GPU virtual address to write the JIT
+ * allocated GPU virtual address to.
+ * @va_pages: The minimum number of virtual pages required.
+ * @commit_pages: The minimum number of physical pages which
+ * should back the allocation.
+ * @extent: Granularity of physical pages to grow the
+ * allocation by during a fault.
+ * @id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ * Zero is not a valid value.
+ */
+struct base_jit_alloc_info {
+ u64 gpu_alloc_addr;
+ u64 va_pages;
+ u64 commit_pages;
+ u64 extent;
+ u8 id;
+};
+
+/**
+ * @brief Job dependency type.
+ *
+ * A flags field will be inserted into the atom structure to specify whether a dependency is a data or
+ * ordering dependency (by putting it before/after 'core_req' in the structure it should be possible to add without
+ * changing the structure size).
+ * When the flag is set for a particular dependency to signal that it is an ordering only dependency then
+ * errors will not be propagated.
+ */
+typedef u8 base_jd_dep_type;
+
+
+#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */
+#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */
+#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */
+
+/**
+ * @brief Job chain hardware requirements.
+ *
+ * A job chain must specify what GPU features it needs to allow the
+ * driver to schedule the job correctly. By not specifying the
+ * correct settings can/will cause an early job termination. Multiple
+ * values can be ORed together to specify multiple requirements.
+ * Special case is ::BASE_JD_REQ_DEP, which is used to express complex
+ * dependencies, and that doesn't execute anything on the hardware.
+ */
+typedef u32 base_jd_core_req;
+
+/* Requirements that come from the HW */
+
+/**
+ * No requirement, dependency only
+ */
+#define BASE_JD_REQ_DEP ((base_jd_core_req)0)
+
+/**
+ * Requires fragment shaders
+ */
+#define BASE_JD_REQ_FS ((base_jd_core_req)1 << 0)
+
+/**
+ * Requires compute shaders
+ * This covers any of the following Midgard Job types:
+ * - Vertex Shader Job
+ * - Geometry Shader Job
+ * - An actual Compute Shader Job
+ *
+ * Compare this with @ref BASE_JD_REQ_ONLY_COMPUTE, which specifies that the
+ * job is specifically just the "Compute Shader" job type, and not the "Vertex
+ * Shader" nor the "Geometry Shader" job type.
+ */
+#define BASE_JD_REQ_CS ((base_jd_core_req)1 << 1)
+#define BASE_JD_REQ_T ((base_jd_core_req)1 << 2) /**< Requires tiling */
+#define BASE_JD_REQ_CF ((base_jd_core_req)1 << 3) /**< Requires cache flushes */
+#define BASE_JD_REQ_V ((base_jd_core_req)1 << 4) /**< Requires value writeback */
+
+/* SW-only requirements - the HW does not expose these as part of the job slot capabilities */
+
+/* Requires fragment job with AFBC encoding */
+#define BASE_JD_REQ_FS_AFBC ((base_jd_core_req)1 << 13)
+
+/**
+ * SW-only requirement: coalesce completion events.
+ * If this bit is set then completion of this atom will not cause an event to
+ * be sent to userspace, whether successful or not; completion events will be
+ * deferred until an atom completes which does not have this bit set.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EXTERNAL_RESOURCES.
+ */
+#define BASE_JD_REQ_EVENT_COALESCE ((base_jd_core_req)1 << 5)
+
+/**
+ * SW Only requirement: the job chain requires a coherent core group. We don't
+ * mind which coherent core group is used.
+ */
+#define BASE_JD_REQ_COHERENT_GROUP ((base_jd_core_req)1 << 6)
+
+/**
+ * SW Only requirement: The performance counters should be enabled only when
+ * they are needed, to reduce power consumption.
+ */
+
+#define BASE_JD_REQ_PERMON ((base_jd_core_req)1 << 7)
+
+/**
+ * SW Only requirement: External resources are referenced by this atom.
+ * When external resources are referenced no syncsets can be bundled with the atom
+ * but should instead be part of a NULL jobs inserted into the dependency tree.
+ * The first pre_dep object must be configured for the external resouces to use,
+ * the second pre_dep object can be used to create other dependencies.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE.
+ */
+#define BASE_JD_REQ_EXTERNAL_RESOURCES ((base_jd_core_req)1 << 8)
+
+/**
+ * SW Only requirement: Software defined job. Jobs with this bit set will not be submitted
+ * to the hardware but will cause some action to happen within the driver
+ */
+#define BASE_JD_REQ_SOFT_JOB ((base_jd_core_req)1 << 9)
+
+#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME (BASE_JD_REQ_SOFT_JOB | 0x1)
+#define BASE_JD_REQ_SOFT_FENCE_TRIGGER (BASE_JD_REQ_SOFT_JOB | 0x2)
+#define BASE_JD_REQ_SOFT_FENCE_WAIT (BASE_JD_REQ_SOFT_JOB | 0x3)
+
+/**
+ * SW Only requirement : Replay job.
+ *
+ * If the preceding job fails, the replay job will cause the jobs specified in
+ * the list of base_jd_replay_payload pointed to by the jc pointer to be
+ * replayed.
+ *
+ * A replay job will only cause jobs to be replayed up to BASEP_JD_REPLAY_LIMIT
+ * times. If a job fails more than BASEP_JD_REPLAY_LIMIT times then the replay
+ * job is failed, as well as any following dependencies.
+ *
+ * The replayed jobs will require a number of atom IDs. If there are not enough
+ * free atom IDs then the replay job will fail.
+ *
+ * If the preceding job does not fail, then the replay job is returned as
+ * completed.
+ *
+ * The replayed jobs will never be returned to userspace. The preceding failed
+ * job will be returned to userspace as failed; the status of this job should
+ * be ignored. Completion should be determined by the status of the replay soft
+ * job.
+ *
+ * In order for the jobs to be replayed, the job headers will have to be
+ * modified. The Status field will be reset to NOT_STARTED. If the Job Type
+ * field indicates a Vertex Shader Job then it will be changed to Null Job.
+ *
+ * The replayed jobs have the following assumptions :
+ *
+ * - No external resources. Any required external resources will be held by the
+ * replay atom.
+ * - Pre-dependencies are created based on job order.
+ * - Atom numbers are automatically assigned.
+ * - device_nr is set to 0. This is not relevant as
+ * BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set.
+ * - Priority is inherited from the replay job.
+ */
+#define BASE_JD_REQ_SOFT_REPLAY (BASE_JD_REQ_SOFT_JOB | 0x4)
+/**
+ * SW only requirement: event wait/trigger job.
+ *
+ * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set.
+ * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the
+ * other waiting jobs. It completes immediately.
+ * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it
+ * possible for other jobs to wait upon. It completes immediately.
+ */
+#define BASE_JD_REQ_SOFT_EVENT_WAIT (BASE_JD_REQ_SOFT_JOB | 0x5)
+#define BASE_JD_REQ_SOFT_EVENT_SET (BASE_JD_REQ_SOFT_JOB | 0x6)
+#define BASE_JD_REQ_SOFT_EVENT_RESET (BASE_JD_REQ_SOFT_JOB | 0x7)
+
+#define BASE_JD_REQ_SOFT_DEBUG_COPY (BASE_JD_REQ_SOFT_JOB | 0x8)
+
+/**
+ * SW only requirement: Just In Time allocation
+ *
+ * This job requests a JIT allocation based on the request in the
+ * @base_jit_alloc_info structure which is passed via the jc element of
+ * the atom.
+ *
+ * It should be noted that the id entry in @base_jit_alloc_info must not
+ * be reused until it has been released via @BASE_JD_REQ_SOFT_JIT_FREE.
+ *
+ * Should this soft job fail it is expected that a @BASE_JD_REQ_SOFT_JIT_FREE
+ * soft job to free the JIT allocation is still made.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_ALLOC (BASE_JD_REQ_SOFT_JOB | 0x9)
+/**
+ * SW only requirement: Just In Time free
+ *
+ * This job requests a JIT allocation created by @BASE_JD_REQ_SOFT_JIT_ALLOC
+ * to be freed. The ID of the JIT allocation is passed via the jc element of
+ * the atom.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_FREE (BASE_JD_REQ_SOFT_JOB | 0xa)
+
+/**
+ * SW only requirement: Map external resource
+ *
+ * This job requests external resource(s) are mapped once the dependencies
+ * of the job have been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * @base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_MAP (BASE_JD_REQ_SOFT_JOB | 0xb)
+/**
+ * SW only requirement: Unmap external resource
+ *
+ * This job requests external resource(s) are unmapped once the dependencies
+ * of the job has been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * @base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP (BASE_JD_REQ_SOFT_JOB | 0xc)
+
+/**
+ * HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders)
+ *
+ * This indicates that the Job Chain contains Midgard Jobs of the 'Compute Shaders' type.
+ *
+ * In contrast to @ref BASE_JD_REQ_CS, this does \b not indicate that the Job
+ * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs.
+ */
+#define BASE_JD_REQ_ONLY_COMPUTE ((base_jd_core_req)1 << 10)
+
+/**
+ * HW Requirement: Use the base_jd_atom::device_nr field to specify a
+ * particular core group
+ *
+ * If both @ref BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag takes priority
+ *
+ * This is only guaranteed to work for @ref BASE_JD_REQ_ONLY_COMPUTE atoms.
+ *
+ * If the core availability policy is keeping the required core group turned off, then
+ * the job will fail with a @ref BASE_JD_EVENT_PM_EVENT error code.
+ */
+#define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP ((base_jd_core_req)1 << 11)
+
+/**
+ * SW Flag: If this bit is set then the successful completion of this atom
+ * will not cause an event to be sent to userspace
+ */
+#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE ((base_jd_core_req)1 << 12)
+
+/**
+ * SW Flag: If this bit is set then completion of this atom will not cause an
+ * event to be sent to userspace, whether successful or not.
+ */
+#define BASEP_JD_REQ_EVENT_NEVER ((base_jd_core_req)1 << 14)
+
+/**
+ * SW Flag: Skip GPU cache clean and invalidation before starting a GPU job.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job starts which does not have this bit set or a job completes
+ * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_END bit set. Do not use if
+ * the CPU may have written to memory addressed by the job since the last job
+ * without this bit set was submitted.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_START ((base_jd_core_req)1 << 15)
+
+/**
+ * SW Flag: Skip GPU cache clean and invalidation after a GPU job completes.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job completes which does not have this bit set or a job starts
+ * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_START bti set. Do not use if
+ * the CPU may read from or partially overwrite memory addressed by the job
+ * before the next job without this bit set completes.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16)
+
+/**
+ * These requirement bits are currently unused in base_jd_core_req
+ */
+#define BASEP_JD_REQ_RESERVED \
+ (~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \
+ BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \
+ BASE_JD_REQ_EVENT_COALESCE | \
+ BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \
+ BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \
+ BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END))
+
+/**
+ * Mask of all bits in base_jd_core_req that control the type of the atom.
+ *
+ * This allows dependency only atoms to have flags set
+ */
+#define BASE_JD_REQ_ATOM_TYPE \
+ (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \
+ BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE)
+
+/**
+ * Mask of all bits in base_jd_core_req that control the type of a soft job.
+ */
+#define BASE_JD_REQ_SOFT_JOB_TYPE (BASE_JD_REQ_SOFT_JOB | 0x1f)
+
+/*
+ * Returns non-zero value if core requirements passed define a soft job or
+ * a dependency only job.
+ */
+#define BASE_JD_REQ_SOFT_JOB_OR_DEP(core_req) \
+ ((core_req & BASE_JD_REQ_SOFT_JOB) || \
+ (core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP)
+
+/**
+ * @brief States to model state machine processed by kbasep_js_job_check_ref_cores(), which
+ * handles retaining cores for power management and affinity management.
+ *
+ * The state @ref KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY prevents an attack
+ * where lots of atoms could be submitted before powerup, and each has an
+ * affinity chosen that causes other atoms to have an affinity
+ * violation. Whilst the affinity was not causing violations at the time it
+ * was chosen, it could cause violations thereafter. For example, 1000 jobs
+ * could have had their affinity chosen during the powerup time, so any of
+ * those 1000 jobs could cause an affinity violation later on.
+ *
+ * The attack would otherwise occur because other atoms/contexts have to wait for:
+ * -# the currently running atoms (which are causing the violation) to
+ * finish
+ * -# and, the atoms that had their affinity chosen during powerup to
+ * finish. These are run preferentially because they don't cause a
+ * violation, but instead continue to cause the violation in others.
+ * -# or, the attacker is scheduled out (which might not happen for just 2
+ * contexts)
+ *
+ * By re-choosing the affinity (which is designed to avoid violations at the
+ * time it's chosen), we break condition (2) of the wait, which minimizes the
+ * problem to just waiting for current jobs to finish (which can be bounded if
+ * the Job Scheduling Policy has a timer).
+ */
+enum kbase_atom_coreref_state {
+ /** Starting state: No affinity chosen, and cores must be requested. kbase_jd_atom::affinity==0 */
+ KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED,
+ /** Cores requested, but waiting for them to be powered. Requested cores given by kbase_jd_atom::affinity */
+ KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES,
+ /** Cores given by kbase_jd_atom::affinity are powered, but affinity might be out-of-date, so must recheck */
+ KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY,
+ /** Cores given by kbase_jd_atom::affinity are powered, and affinity is up-to-date, but must check for violations */
+ KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS,
+ /** Cores are powered, kbase_jd_atom::affinity up-to-date, no affinity violations: atom can be submitted to HW */
+ KBASE_ATOM_COREREF_STATE_READY
+};
+
+/*
+ * Base Atom priority
+ *
+ * Only certain priority levels are actually implemented, as specified by the
+ * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority
+ * level that is not one of those defined below.
+ *
+ * Priority levels only affect scheduling between atoms of the same type within
+ * a base context, and only after the atoms have had dependencies resolved.
+ * Fragment atoms does not affect non-frament atoms with lower priorities, and
+ * the other way around. For example, a low priority atom that has had its
+ * dependencies resolved might run before a higher priority atom that has not
+ * had its dependencies resolved.
+ *
+ * The scheduling between base contexts/processes and between atoms from
+ * different base contexts/processes is unaffected by atom priority.
+ *
+ * The atoms are scheduled as follows with respect to their priorities:
+ * - Let atoms 'X' and 'Y' be for the same job slot who have dependencies
+ * resolved, and atom 'X' has a higher priority than atom 'Y'
+ * - If atom 'Y' is currently running on the HW, then it is interrupted to
+ * allow atom 'X' to run soon after
+ * - If instead neither atom 'Y' nor atom 'X' are running, then when choosing
+ * the next atom to run, atom 'X' will always be chosen instead of atom 'Y'
+ * - Any two atoms that have the same priority could run in any order with
+ * respect to each other. That is, there is no ordering constraint between
+ * atoms of the same priority.
+ */
+typedef u8 base_jd_prio;
+
+/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */
+#define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0)
+/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and
+ * BASE_JD_PRIO_LOW */
+#define BASE_JD_PRIO_HIGH ((base_jd_prio)1)
+/* Low atom priority. */
+#define BASE_JD_PRIO_LOW ((base_jd_prio)2)
+
+/* Count of the number of priority levels. This itself is not a valid
+ * base_jd_prio setting */
+#define BASE_JD_NR_PRIO_LEVELS 3
+
+enum kbase_jd_atom_state {
+ /** Atom is not used */
+ KBASE_JD_ATOM_STATE_UNUSED,
+ /** Atom is queued in JD */
+ KBASE_JD_ATOM_STATE_QUEUED,
+ /** Atom has been given to JS (is runnable/running) */
+ KBASE_JD_ATOM_STATE_IN_JS,
+ /** Atom has been completed, but not yet handed back to job dispatcher
+ * for dependency resolution */
+ KBASE_JD_ATOM_STATE_HW_COMPLETED,
+ /** Atom has been completed, but not yet handed back to userspace */
+ KBASE_JD_ATOM_STATE_COMPLETED
+};
+
+typedef u8 base_atom_id; /**< Type big enough to store an atom number in */
+
+struct base_dependency {
+ base_atom_id atom_id; /**< An atom number */
+ base_jd_dep_type dependency_type; /**< Dependency type */
+};
+
+/* This structure has changed since UK 10.2 for which base_jd_core_req was a u16 value.
+ * In order to keep the size of the structure same, padding field has been adjusted
+ * accordingly and core_req field of a u32 type (to which UK 10.3 base_jd_core_req defines)
+ * is added at the end of the structure. Place in the structure previously occupied by u16 core_req
+ * is kept but renamed to compat_core_req and as such it can be used in ioctl call for job submission
+ * as long as UK 10.2 legacy is supported. Once when this support ends, this field can be left
+ * for possible future use. */
+typedef struct base_jd_atom_v2 {
+ u64 jc; /**< job-chain GPU address */
+ struct base_jd_udata udata; /**< user data */
+ u64 extres_list; /**< list of external resources */
+ u16 nr_extres; /**< nr of external resources */
+ u16 compat_core_req; /**< core requirements which correspond to the legacy support for UK 10.2 */
+ struct base_dependency pre_dep[2]; /**< pre-dependencies, one need to use SETTER function to assign this field,
+ this is done in order to reduce possibility of improper assigment of a dependency field */
+ base_atom_id atom_number; /**< unique number to identify the atom */
+ base_jd_prio prio; /**< Atom priority. Refer to @ref base_jd_prio for more details */
+ u8 device_nr; /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
+ u8 padding[1];
+ base_jd_core_req core_req; /**< core requirements */
+} base_jd_atom_v2;
+
+typedef enum base_external_resource_access {
+ BASE_EXT_RES_ACCESS_SHARED,
+ BASE_EXT_RES_ACCESS_EXCLUSIVE
+} base_external_resource_access;
+
+typedef struct base_external_resource {
+ u64 ext_resource;
+} base_external_resource;
+
+
+/**
+ * The maximum number of external resources which can be mapped/unmapped
+ * in a single request.
+ */
+#define BASE_EXT_RES_COUNT_MAX 10
+
+/**
+ * struct base_external_resource_list - Structure which describes a list of
+ * external resources.
+ * @count: The number of resources.
+ * @ext_res: Array of external resources which is
+ * sized at allocation time.
+ */
+struct base_external_resource_list {
+ u64 count;
+ struct base_external_resource ext_res[1];
+};
+
+struct base_jd_debug_copy_buffer {
+ u64 address;
+ u64 size;
+ struct base_external_resource extres;
+};
+
+/**
+ * @brief Setter for a dependency structure
+ *
+ * @param[in] dep The kbase jd atom dependency to be initialized.
+ * @param id The atom_id to be assigned.
+ * @param dep_type The dep_type to be assigned.
+ *
+ */
+static inline void base_jd_atom_dep_set(struct base_dependency *dep,
+ base_atom_id id, base_jd_dep_type dep_type)
+{
+ LOCAL_ASSERT(dep != NULL);
+
+ /*
+ * make sure we don't set not allowed combinations
+ * of atom_id/dependency_type.
+ */
+ LOCAL_ASSERT((id == 0 && dep_type == BASE_JD_DEP_TYPE_INVALID) ||
+ (id > 0 && dep_type != BASE_JD_DEP_TYPE_INVALID));
+
+ dep->atom_id = id;
+ dep->dependency_type = dep_type;
+}
+
+/**
+ * @brief Make a copy of a dependency structure
+ *
+ * @param[in,out] dep The kbase jd atom dependency to be written.
+ * @param[in] from The dependency to make a copy from.
+ *
+ */
+static inline void base_jd_atom_dep_copy(struct base_dependency *dep,
+ const struct base_dependency *from)
+{
+ LOCAL_ASSERT(dep != NULL);
+
+ base_jd_atom_dep_set(dep, from->atom_id, from->dependency_type);
+}
+
+/**
+ * @brief Soft-atom fence trigger setup.
+ *
+ * Sets up an atom to be a SW-only atom signaling a fence
+ * when it reaches the run state.
+ *
+ * Using the existing base dependency system the fence can
+ * be set to trigger when a GPU job has finished.
+ *
+ * The base fence object must not be terminated until the atom
+ * has been submitted to @a base_jd_submit and @a base_jd_submit has returned.
+ *
+ * @a fence must be a valid fence set up with @a base_fence_init.
+ * Calling this function with a uninitialized fence results in undefined behavior.
+ *
+ * @param[out] atom A pre-allocated atom to configure as a fence trigger SW atom
+ * @param[in] fence The base fence object to trigger.
+ */
+static inline void base_jd_fence_trigger_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
+{
+ LOCAL_ASSERT(atom);
+ LOCAL_ASSERT(fence);
+ LOCAL_ASSERT(fence->basep.fd == INVALID_PLATFORM_FENCE);
+ LOCAL_ASSERT(fence->basep.stream_fd >= 0);
+ atom->jc = (uintptr_t) fence;
+ atom->core_req = BASE_JD_REQ_SOFT_FENCE_TRIGGER;
+}
+
+/**
+ * @brief Soft-atom fence wait setup.
+ *
+ * Sets up an atom to be a SW-only atom waiting on a fence.
+ * When the fence becomes triggered the atom becomes runnable
+ * and completes immediately.
+ *
+ * Using the existing base dependency system the fence can
+ * be set to block a GPU job until it has been triggered.
+ *
+ * The base fence object must not be terminated until the atom
+ * has been submitted to @a base_jd_submit and @a base_jd_submit has returned.
+ *
+ * @a fence must be a valid fence set up with @a base_fence_init or @a base_fence_import.
+ * Calling this function with a uninitialized fence results in undefined behavior.
+ *
+ * @param[out] atom A pre-allocated atom to configure as a fence wait SW atom
+ * @param[in] fence The base fence object to wait on
+ */
+static inline void base_jd_fence_wait_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
+{
+ LOCAL_ASSERT(atom);
+ LOCAL_ASSERT(fence);
+ LOCAL_ASSERT(fence->basep.fd >= 0);
+ atom->jc = (uintptr_t) fence;
+ atom->core_req = BASE_JD_REQ_SOFT_FENCE_WAIT;
+}
+
+/**
+ * @brief External resource info initialization.
+ *
+ * Sets up an external resource object to reference
+ * a memory allocation and the type of access requested.
+ *
+ * @param[in] res The resource object to initialize
+ * @param handle The handle to the imported memory object, must be
+ * obtained by calling @ref base_mem_as_import_handle().
+ * @param access The type of access requested
+ */
+static inline void base_external_resource_init(struct base_external_resource *res, struct base_import_handle handle, base_external_resource_access access)
+{
+ u64 address;
+
+ address = handle.basep.handle;
+
+ LOCAL_ASSERT(res != NULL);
+ LOCAL_ASSERT(0 == (address & LOCAL_PAGE_LSB));
+ LOCAL_ASSERT(access == BASE_EXT_RES_ACCESS_SHARED || access == BASE_EXT_RES_ACCESS_EXCLUSIVE);
+
+ res->ext_resource = address | (access & LOCAL_PAGE_LSB);
+}
+
+/**
+ * @brief Job chain event code bits
+ * Defines the bits used to create ::base_jd_event_code
+ */
+enum {
+ BASE_JD_SW_EVENT_KERNEL = (1u << 15), /**< Kernel side event */
+ BASE_JD_SW_EVENT = (1u << 14), /**< SW defined event */
+ BASE_JD_SW_EVENT_SUCCESS = (1u << 13), /**< Event idicates success (SW events only) */
+ BASE_JD_SW_EVENT_JOB = (0u << 11), /**< Job related event */
+ BASE_JD_SW_EVENT_BAG = (1u << 11), /**< Bag related event */
+ BASE_JD_SW_EVENT_INFO = (2u << 11), /**< Misc/info event */
+ BASE_JD_SW_EVENT_RESERVED = (3u << 11), /**< Reserved event type */
+ BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11) /**< Mask to extract the type from an event code */
+};
+
+/**
+ * @brief Job chain event codes
+ *
+ * HW and low-level SW events are represented by event codes.
+ * The status of jobs which succeeded are also represented by
+ * an event code (see ::BASE_JD_EVENT_DONE).
+ * Events are usually reported as part of a ::base_jd_event.
+ *
+ * The event codes are encoded in the following way:
+ * @li 10:0 - subtype
+ * @li 12:11 - type
+ * @li 13 - SW success (only valid if the SW bit is set)
+ * @li 14 - SW event (HW event if not set)
+ * @li 15 - Kernel event (should never be seen in userspace)
+ *
+ * Events are split up into ranges as follows:
+ * - BASE_JD_EVENT_RANGE_\<description\>_START
+ * - BASE_JD_EVENT_RANGE_\<description\>_END
+ *
+ * \a code is in \<description\>'s range when:
+ * - <tt>BASE_JD_EVENT_RANGE_\<description\>_START <= code < BASE_JD_EVENT_RANGE_\<description\>_END </tt>
+ *
+ * Ranges can be asserted for adjacency by testing that the END of the previous
+ * is equal to the START of the next. This is useful for optimizing some tests
+ * for range.
+ *
+ * A limitation is that the last member of this enum must explicitly be handled
+ * (with an assert-unreachable statement) in switch statements that use
+ * variables of this type. Otherwise, the compiler warns that we have not
+ * handled that enum value.
+ */
+typedef enum base_jd_event_code {
+ /* HW defined exceptions */
+
+ /** Start of HW Non-fault status codes
+ *
+ * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault,
+ * because the job was hard-stopped
+ */
+ BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0,
+
+ /* non-fatal exceptions */
+ BASE_JD_EVENT_NOT_STARTED = 0x00, /**< Can't be seen by userspace, treated as 'previous job done' */
+ BASE_JD_EVENT_DONE = 0x01,
+ BASE_JD_EVENT_STOPPED = 0x03, /**< Can't be seen by userspace, becomes TERMINATED, DONE or JOB_CANCELLED */
+ BASE_JD_EVENT_TERMINATED = 0x04, /**< This is actually a fault status code - the job was hard stopped */
+ BASE_JD_EVENT_ACTIVE = 0x08, /**< Can't be seen by userspace, jobs only returned on complete/fail/cancel */
+
+ /** End of HW Non-fault status codes
+ *
+ * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault,
+ * because the job was hard-stopped
+ */
+ BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40,
+
+ /** Start of HW fault and SW Error status codes */
+ BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40,
+
+ /* job exceptions */
+ BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40,
+ BASE_JD_EVENT_JOB_POWER_FAULT = 0x41,
+ BASE_JD_EVENT_JOB_READ_FAULT = 0x42,
+ BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43,
+ BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44,
+ BASE_JD_EVENT_JOB_BUS_FAULT = 0x48,
+ BASE_JD_EVENT_INSTR_INVALID_PC = 0x50,
+ BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51,
+ BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52,
+ BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53,
+ BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54,
+ BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55,
+ BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56,
+ BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58,
+ BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59,
+ BASE_JD_EVENT_STATE_FAULT = 0x5A,
+ BASE_JD_EVENT_OUT_OF_MEMORY = 0x60,
+ BASE_JD_EVENT_UNKNOWN = 0x7F,
+
+ /* GPU exceptions */
+ BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80,
+ BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88,
+
+ /* MMU exceptions */
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4,
+ BASE_JD_EVENT_PERMISSION_FAULT = 0xC8,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4,
+ BASE_JD_EVENT_ACCESS_FLAG = 0xD8,
+
+ /* SW defined exceptions */
+ BASE_JD_EVENT_MEM_GROWTH_FAILED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_TIMED_OUT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001,
+ BASE_JD_EVENT_JOB_CANCELLED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
+ BASE_JD_EVENT_JOB_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
+ BASE_JD_EVENT_PM_EVENT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004,
+ BASE_JD_EVENT_FORCE_REPLAY = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x005,
+
+ BASE_JD_EVENT_BAG_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003,
+
+ /** End of HW fault and SW Error status codes */
+ BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+ /** Start of SW Success status codes */
+ BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | 0x000,
+
+ BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_BAG | 0x000,
+ BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000,
+
+ /** End of SW Success status codes */
+ BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+ /** Start of Kernel-only status codes. Such codes are never returned to user-space */
+ BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | 0x000,
+ BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000,
+
+ /** End of Kernel-only status codes. */
+ BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF
+} base_jd_event_code;
+
+/**
+ * @brief Event reporting structure
+ *
+ * This structure is used by the kernel driver to report information
+ * about GPU events. The can either be HW-specific events or low-level
+ * SW events, such as job-chain completion.
+ *
+ * The event code contains an event type field which can be extracted
+ * by ANDing with ::BASE_JD_SW_EVENT_TYPE_MASK.
+ *
+ * Based on the event type base_jd_event::data holds:
+ * @li ::BASE_JD_SW_EVENT_JOB : the offset in the ring-buffer for the completed
+ * job-chain
+ * @li ::BASE_JD_SW_EVENT_BAG : The address of the ::base_jd_bag that has
+ * been completed (ie all contained job-chains have been completed).
+ * @li ::BASE_JD_SW_EVENT_INFO : base_jd_event::data not used
+ */
+typedef struct base_jd_event_v2 {
+ base_jd_event_code event_code; /**< event code */
+ base_atom_id atom_number; /**< the atom number that has completed */
+ struct base_jd_udata udata; /**< user data */
+} base_jd_event_v2;
+
+/**
+ * Padding required to ensure that the @ref struct base_dump_cpu_gpu_counters structure fills
+ * a full cache line.
+ */
+
+#define BASE_CPU_GPU_CACHE_LINE_PADDING (36)
+
+
+/**
+ * @brief Structure for BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS jobs.
+ *
+ * This structure is stored into the memory pointed to by the @c jc field of @ref base_jd_atom.
+ *
+ * This structure must be padded to ensure that it will occupy whole cache lines. This is to avoid
+ * cases where access to pages containing the structure is shared between cached and un-cached
+ * memory regions, which would cause memory corruption. Here we set the structure size to be 64 bytes
+ * which is the cache line for ARM A15 processors.
+ */
+
+typedef struct base_dump_cpu_gpu_counters {
+ u64 system_time;
+ u64 cycle_counter;
+ u64 sec;
+ u32 usec;
+ u8 padding[BASE_CPU_GPU_CACHE_LINE_PADDING];
+} base_dump_cpu_gpu_counters;
+
+
+
+/** @} end group base_user_api_job_dispatch */
+
+#define GPU_MAX_JOB_SLOTS 16
+
+/**
+ * @page page_base_user_api_gpuprops User-side Base GPU Property Query API
+ *
+ * The User-side Base GPU Property Query API encapsulates two
+ * sub-modules:
+ *
+ * - @ref base_user_api_gpuprops_dyn "Dynamic GPU Properties"
+ * - @ref base_plat_config_gpuprops "Base Platform Config GPU Properties"
+ *
+ * There is a related third module outside of Base, which is owned by the MIDG
+ * module:
+ * - @ref gpu_props_static "Midgard Compile-time GPU Properties"
+ *
+ * Base only deals with properties that vary between different Midgard
+ * implementations - the Dynamic GPU properties and the Platform Config
+ * properties.
+ *
+ * For properties that are constant for the Midgard Architecture, refer to the
+ * MIDG module. However, we will discuss their relevance here <b>just to
+ * provide background information.</b>
+ *
+ * @section sec_base_user_api_gpuprops_about About the GPU Properties in Base and MIDG modules
+ *
+ * The compile-time properties (Platform Config, Midgard Compile-time
+ * properties) are exposed as pre-processor macros.
+ *
+ * Complementing the compile-time properties are the Dynamic GPU
+ * Properties, which act as a conduit for the Midgard Configuration
+ * Discovery.
+ *
+ * In general, the dynamic properties are present to verify that the platform
+ * has been configured correctly with the right set of Platform Config
+ * Compile-time Properties.
+ *
+ * As a consistent guide across the entire DDK, the choice for dynamic or
+ * compile-time should consider the following, in order:
+ * -# Can the code be written so that it doesn't need to know the
+ * implementation limits at all?
+ * -# If you need the limits, get the information from the Dynamic Property
+ * lookup. This should be done once as you fetch the context, and then cached
+ * as part of the context data structure, so it's cheap to access.
+ * -# If there's a clear and arguable inefficiency in using Dynamic Properties,
+ * then use a Compile-Time Property (Platform Config, or Midgard Compile-time
+ * property). Examples of where this might be sensible follow:
+ * - Part of a critical inner-loop
+ * - Frequent re-use throughout the driver, causing significant extra load
+ * instructions or control flow that would be worthwhile optimizing out.
+ *
+ * We cannot provide an exhaustive set of examples, neither can we provide a
+ * rule for every possible situation. Use common sense, and think about: what
+ * the rest of the driver will be doing; how the compiler might represent the
+ * value if it is a compile-time constant; whether an OEM shipping multiple
+ * devices would benefit much more from a single DDK binary, instead of
+ * insignificant micro-optimizations.
+ *
+ * @section sec_base_user_api_gpuprops_dyn Dynamic GPU Properties
+ *
+ * Dynamic GPU properties are presented in two sets:
+ * -# the commonly used properties in @ref base_gpu_props, which have been
+ * unpacked from GPU register bitfields.
+ * -# The full set of raw, unprocessed properties in @ref gpu_raw_gpu_props
+ * (also a member of @ref base_gpu_props). All of these are presented in
+ * the packed form, as presented by the GPU registers themselves.
+ *
+ * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
+ * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
+ * behaving differently?". In this case, all information about the
+ * configuration is potentially useful, but it <b>does not need to be processed
+ * by the driver</b>. Instead, the raw registers can be processed by the Mali
+ * Tools software on the host PC.
+ *
+ * The properties returned extend the Midgard Configuration Discovery
+ * registers. For example, GPU clock speed is not specified in the Midgard
+ * Architecture, but is <b>necessary for OpenCL's clGetDeviceInfo() function</b>.
+ *
+ * The GPU properties are obtained by a call to
+ * _mali_base_get_gpu_props(). This simply returns a pointer to a const
+ * base_gpu_props structure. It is constant for the life of a base
+ * context. Multiple calls to _mali_base_get_gpu_props() to a base context
+ * return the same pointer to a constant structure. This avoids cache pollution
+ * of the common data.
+ *
+ * This pointer must not be freed, because it does not point to the start of a
+ * region allocated by the memory allocator; instead, just close the @ref
+ * base_context.
+ *
+ *
+ * @section sec_base_user_api_gpuprops_config Platform Config Compile-time Properties
+ *
+ * The Platform Config File sets up gpu properties that are specific to a
+ * certain platform. Properties that are 'Implementation Defined' in the
+ * Midgard Architecture spec are placed here.
+ *
+ * @note Reference configurations are provided for Midgard Implementations, such as
+ * the Mali-T600 family. The customer need not repeat this information, and can select one of
+ * these reference configurations. For example, VA_BITS, PA_BITS and the
+ * maximum number of samples per pixel might vary between Midgard Implementations, but
+ * \b not for platforms using the Mali-T604. This information is placed in
+ * the reference configuration files.
+ *
+ * The System Integrator creates the following structure:
+ * - platform_XYZ
+ * - platform_XYZ/plat
+ * - platform_XYZ/plat/plat_config.h
+ *
+ * They then edit plat_config.h, using the example plat_config.h files as a
+ * guide.
+ *
+ * At the very least, the customer must set @ref CONFIG_GPU_CORE_TYPE, and will
+ * receive a helpful \#error message if they do not do this correctly. This
+ * selects the Reference Configuration for the Midgard Implementation. The rationale
+ * behind this decision (against asking the customer to write \#include
+ * <gpus/mali_t600.h> in their plat_config.h) is as follows:
+ * - This mechanism 'looks' like a regular config file (such as Linux's
+ * .config)
+ * - It is difficult to get wrong in a way that will produce strange build
+ * errors:
+ * - They need not know where the mali_t600.h, other_midg_gpu.h etc. files are stored - and
+ * so they won't accidentally pick another file with 'mali_t600' in its name
+ * - When the build doesn't work, the System Integrator may think the DDK is
+ * doesn't work, and attempt to fix it themselves:
+ * - For the @ref CONFIG_GPU_CORE_TYPE mechanism, the only way to get past the
+ * error is to set @ref CONFIG_GPU_CORE_TYPE, and this is what the \#error tells
+ * you.
+ * - For a \#include mechanism, checks must still be made elsewhere, which the
+ * System Integrator may try working around by setting \#defines (such as
+ * VA_BITS) themselves in their plat_config.h. In the worst case, they may
+ * set the prevention-mechanism \#define of
+ * "A_CORRECT_MIDGARD_CORE_WAS_CHOSEN".
+ * - In this case, they would believe they are on the right track, because
+ * the build progresses with their fix, but with errors elsewhere.
+ *
+ * However, there is nothing to prevent the customer using \#include to organize
+ * their own configurations files hierarchically.
+ *
+ * The mechanism for the header file processing is as follows:
+ *
+ * @dot
+ digraph plat_config_mechanism {
+ rankdir=BT
+ size="6,6"
+
+ "mali_base.h";
+ "gpu/mali_gpu.h";
+
+ node [ shape=box ];
+ {
+ rank = same; ordering = out;
+
+ "gpu/mali_gpu_props.h";
+ "base/midg_gpus/mali_t600.h";
+ "base/midg_gpus/other_midg_gpu.h";
+ }
+ { rank = same; "plat/plat_config.h"; }
+ {
+ rank = same;
+ "gpu/mali_gpu.h" [ shape=box ];
+ gpu_chooser [ label="" style="invisible" width=0 height=0 fixedsize=true ];
+ select_gpu [ label="Mali-T600 | Other\n(select_gpu.h)" shape=polygon,sides=4,distortion=0.25 width=3.3 height=0.99 fixedsize=true ] ;
+ }
+ node [ shape=box ];
+ { rank = same; "plat/plat_config.h"; }
+ { rank = same; "mali_base.h"; }
+
+ "mali_base.h" -> "gpu/mali_gpu.h" -> "gpu/mali_gpu_props.h";
+ "mali_base.h" -> "plat/plat_config.h" ;
+ "mali_base.h" -> select_gpu ;
+
+ "plat/plat_config.h" -> gpu_chooser [style="dotted,bold" dir=none weight=4] ;
+ gpu_chooser -> select_gpu [style="dotted,bold"] ;
+
+ select_gpu -> "base/midg_gpus/mali_t600.h" ;
+ select_gpu -> "base/midg_gpus/other_midg_gpu.h" ;
+ }
+ @enddot
+ *
+ *
+ * @section sec_base_user_api_gpuprops_kernel Kernel Operation
+ *
+ * During Base Context Create time, user-side makes a single kernel call:
+ * - A call to fill user memory with GPU information structures
+ *
+ * The kernel-side will fill the provided the entire processed @ref base_gpu_props
+ * structure, because this information is required in both
+ * user and kernel side; it does not make sense to decode it twice.
+ *
+ * Coherency groups must be derived from the bitmasks, but this can be done
+ * kernel side, and just once at kernel startup: Coherency groups must already
+ * be known kernel-side, to support chains that specify a 'Only Coherent Group'
+ * SW requirement, or 'Only Coherent Group with Tiler' SW requirement.
+ *
+ * @section sec_base_user_api_gpuprops_cocalc Coherency Group calculation
+ * Creation of the coherent group data is done at device-driver startup, and so
+ * is one-time. This will most likely involve a loop with CLZ, shifting, and
+ * bit clearing on the L2_PRESENT mask, depending on whether the
+ * system is L2 Coherent. The number of shader cores is done by a
+ * population count, since faulty cores may be disabled during production,
+ * producing a non-contiguous mask.
+ *
+ * The memory requirements for this algorithm can be determined either by a u64
+ * population count on the L2_PRESENT mask (a LUT helper already is
+ * required for the above), or simple assumption that there can be no more than
+ * 16 coherent groups, since core groups are typically 4 cores.
+ */
+
+/**
+ * @addtogroup base_user_api_gpuprops User-side Base GPU Property Query APIs
+ * @{
+ */
+
+/**
+ * @addtogroup base_user_api_gpuprops_dyn Dynamic HW Properties
+ * @{
+ */
+
+#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3
+
+#define BASE_MAX_COHERENT_GROUPS 16
+
+struct mali_base_gpu_core_props {
+ /**
+ * Product specific value.
+ */
+ u32 product_id;
+
+ /**
+ * Status of the GPU release.
+ * No defined values, but starts at 0 and increases by one for each
+ * release status (alpha, beta, EAC, etc.).
+ * 4 bit values (0-15).
+ */
+ u16 version_status;
+
+ /**
+ * Minor release number of the GPU. "P" part of an "RnPn" release number.
+ * 8 bit values (0-255).
+ */
+ u16 minor_revision;
+
+ /**
+ * Major release number of the GPU. "R" part of an "RnPn" release number.
+ * 4 bit values (0-15).
+ */
+ u16 major_revision;
+
+ u16 padding;
+
+ /**
+ * This property is deprecated since it has not contained the real current
+ * value of GPU clock speed. It is kept here only for backwards compatibility.
+ * For the new ioctl interface, it is ignored and is treated as a padding
+ * to keep the structure of the same size and retain the placement of its
+ * members.
+ */
+ u32 gpu_speed_mhz;
+
+ /**
+ * @usecase GPU clock max/min speed is required for computing best/worst case
+ * in tasks as job scheduling ant irq_throttling. (It is not specified in the
+ * Midgard Architecture).
+ * Also, GPU clock max speed is used for OpenCL's clGetDeviceInfo() function.
+ */
+ u32 gpu_freq_khz_max;
+ u32 gpu_freq_khz_min;
+
+ /**
+ * Size of the shader program counter, in bits.
+ */
+ u32 log2_program_counter_size;
+
+ /**
+ * TEXTURE_FEATURES_x registers, as exposed by the GPU. This is a
+ * bitpattern where a set bit indicates that the format is supported.
+ *
+ * Before using a texture format, it is recommended that the corresponding
+ * bit be checked.
+ */
+ u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
+
+ /**
+ * Theoretical maximum memory available to the GPU. It is unlikely that a
+ * client will be able to allocate all of this memory for their own
+ * purposes, but this at least provides an upper bound on the memory
+ * available to the GPU.
+ *
+ * This is required for OpenCL's clGetDeviceInfo() call when
+ * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL GPU devices. The
+ * client will not be expecting to allocate anywhere near this value.
+ */
+ u64 gpu_available_memory_size;
+};
+
+/**
+ *
+ * More information is possible - but associativity and bus width are not
+ * required by upper-level apis.
+ */
+struct mali_base_gpu_l2_cache_props {
+ u8 log2_line_size;
+ u8 log2_cache_size;
+ u8 num_l2_slices; /* Number of L2C slices. 1 or higher */
+ u8 padding[5];
+};
+
+struct mali_base_gpu_tiler_props {
+ u32 bin_size_bytes; /* Max is 4*2^15 */
+ u32 max_active_levels; /* Max is 2^15 */
+};
+
+/**
+ * GPU threading system details.
+ */
+struct mali_base_gpu_thread_props {
+ u32 max_threads; /* Max. number of threads per core */
+ u32 max_workgroup_size; /* Max. number of threads per workgroup */
+ u32 max_barrier_size; /* Max. number of threads that can synchronize on a simple barrier */
+ u16 max_registers; /* Total size [1..65535] of the register file available per core. */
+ u8 max_task_queue; /* Max. tasks [1..255] which may be sent to a core before it becomes blocked. */
+ u8 max_thread_group_split; /* Max. allowed value [1..15] of the Thread Group Split field. */
+ u8 impl_tech; /* 0 = Not specified, 1 = Silicon, 2 = FPGA, 3 = SW Model/Emulation */
+ u8 padding[7];
+};
+
+/**
+ * @brief descriptor for a coherent group
+ *
+ * \c core_mask exposes all cores in that coherent group, and \c num_cores
+ * provides a cached population-count for that mask.
+ *
+ * @note Whilst all cores are exposed in the mask, not all may be available to
+ * the application, depending on the Kernel Power policy.
+ *
+ * @note if u64s must be 8-byte aligned, then this structure has 32-bits of wastage.
+ */
+struct mali_base_gpu_coherent_group {
+ u64 core_mask; /**< Core restriction mask required for the group */
+ u16 num_cores; /**< Number of cores in the group */
+ u16 padding[3];
+};
+
+/**
+ * @brief Coherency group information
+ *
+ * Note that the sizes of the members could be reduced. However, the \c group
+ * member might be 8-byte aligned to ensure the u64 core_mask is 8-byte
+ * aligned, thus leading to wastage if the other members sizes were reduced.
+ *
+ * The groups are sorted by core mask. The core masks are non-repeating and do
+ * not intersect.
+ */
+struct mali_base_gpu_coherent_group_info {
+ u32 num_groups;
+
+ /**
+ * Number of core groups (coherent or not) in the GPU. Equivalent to the number of L2 Caches.
+ *
+ * The GPU Counter dumping writes 2048 bytes per core group, regardless of
+ * whether the core groups are coherent or not. Hence this member is needed
+ * to calculate how much memory is required for dumping.
+ *
+ * @note Do not use it to work out how many valid elements are in the
+ * group[] member. Use num_groups instead.
+ */
+ u32 num_core_groups;
+
+ /**
+ * Coherency features of the memory, accessed by @ref gpu_mem_features
+ * methods
+ */
+ u32 coherency;
+
+ u32 padding;
+
+ /**
+ * Descriptors of coherent groups
+ */
+ struct mali_base_gpu_coherent_group group[BASE_MAX_COHERENT_GROUPS];
+};
+
+/**
+ * A complete description of the GPU's Hardware Configuration Discovery
+ * registers.
+ *
+ * The information is presented inefficiently for access. For frequent access,
+ * the values should be better expressed in an unpacked form in the
+ * base_gpu_props structure.
+ *
+ * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
+ * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
+ * behaving differently?". In this case, all information about the
+ * configuration is potentially useful, but it <b>does not need to be processed
+ * by the driver</b>. Instead, the raw registers can be processed by the Mali
+ * Tools software on the host PC.
+ *
+ */
+struct gpu_raw_gpu_props {
+ u64 shader_present;
+ u64 tiler_present;
+ u64 l2_present;
+ u64 stack_present;
+
+ u32 l2_features;
+ u32 suspend_size; /* API 8.2+ */
+ u32 mem_features;
+ u32 mmu_features;
+
+ u32 as_present;
+
+ u32 js_present;
+ u32 js_features[GPU_MAX_JOB_SLOTS];
+ u32 tiler_features;
+ u32 texture_features[3];
+
+ u32 gpu_id;
+
+ u32 thread_max_threads;
+ u32 thread_max_workgroup_size;
+ u32 thread_max_barrier_size;
+ u32 thread_features;
+
+ /*
+ * Note: This is the _selected_ coherency mode rather than the
+ * available modes as exposed in the coherency_features register.
+ */
+ u32 coherency_mode;
+};
+
+/**
+ * Return structure for _mali_base_get_gpu_props().
+ *
+ * NOTE: the raw_props member in this data structure contains the register
+ * values from which the value of the other members are derived. The derived
+ * members exist to allow for efficient access and/or shielding the details
+ * of the layout of the registers.
+ *
+ */
+typedef struct mali_base_gpu_props {
+ struct mali_base_gpu_core_props core_props;
+ struct mali_base_gpu_l2_cache_props l2_props;
+ u64 unused_1; /* keep for backwards compatibility */
+ struct mali_base_gpu_tiler_props tiler_props;
+ struct mali_base_gpu_thread_props thread_props;
+
+ /** This member is large, likely to be 128 bytes */
+ struct gpu_raw_gpu_props raw_props;
+
+ /** This must be last member of the structure */
+ struct mali_base_gpu_coherent_group_info coherency_info;
+} base_gpu_props;
+
+/** @} end group base_user_api_gpuprops_dyn */
+
+/** @} end group base_user_api_gpuprops */
+
+/**
+ * @addtogroup base_user_api_core User-side Base core APIs
+ * @{
+ */
+
+/**
+ * \enum base_context_create_flags
+ *
+ * Flags to pass to ::base_context_init.
+ * Flags can be ORed together to enable multiple things.
+ *
+ * These share the same space as BASEP_CONTEXT_FLAG_*, and so must
+ * not collide with them.
+ */
+enum base_context_create_flags {
+ /** No flags set */
+ BASE_CONTEXT_CREATE_FLAG_NONE = 0,
+
+ /** Base context is embedded in a cctx object (flag used for CINSTR software counter macros) */
+ BASE_CONTEXT_CCTX_EMBEDDED = (1u << 0),
+
+ /** Base context is a 'System Monitor' context for Hardware counters.
+ *
+ * One important side effect of this is that job submission is disabled. */
+ BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED = (1u << 1)
+};
+
+/**
+ * Bitpattern describing the ::base_context_create_flags that can be passed to base_context_init()
+ */
+#define BASE_CONTEXT_CREATE_ALLOWED_FLAGS \
+ (((u32)BASE_CONTEXT_CCTX_EMBEDDED) | \
+ ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED))
+
+/**
+ * Bitpattern describing the ::base_context_create_flags that can be passed to the kernel
+ */
+#define BASE_CONTEXT_CREATE_KERNEL_FLAGS \
+ ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED)
+
+/*
+ * Private flags used on the base context
+ *
+ * These start at bit 31, and run down to zero.
+ *
+ * They share the same space as @ref base_context_create_flags, and so must
+ * not collide with them.
+ */
+/** Private flag tracking whether job descriptor dumping is disabled */
+#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED ((u32)(1 << 31))
+
+/** @} end group base_user_api_core */
+
+/** @} end group base_user_api */
+
+/**
+ * @addtogroup base_plat_config_gpuprops Base Platform Config GPU Properties
+ * @{
+ *
+ * C Pre-processor macros are exposed here to do with Platform
+ * Config.
+ *
+ * These include:
+ * - GPU Properties that are constant on a particular Midgard Family
+ * Implementation e.g. Maximum samples per pixel on Mali-T600.
+ * - General platform config for the GPU, such as the GPU major and minor
+ * revison.
+ */
+
+/** @} end group base_plat_config_gpuprops */
+
+/**
+ * @addtogroup base_api Base APIs
+ * @{
+ */
+
+/**
+ * @brief The payload for a replay job. This must be in GPU memory.
+ */
+typedef struct base_jd_replay_payload {
+ /**
+ * Pointer to the first entry in the base_jd_replay_jc list. These
+ * will be replayed in @b reverse order (so that extra ones can be added
+ * to the head in future soft jobs without affecting this soft job)
+ */
+ u64 tiler_jc_list;
+
+ /**
+ * Pointer to the fragment job chain.
+ */
+ u64 fragment_jc;
+
+ /**
+ * Pointer to the tiler heap free FBD field to be modified.
+ */
+ u64 tiler_heap_free;
+
+ /**
+ * Hierarchy mask for the replayed fragment jobs. May be zero.
+ */
+ u16 fragment_hierarchy_mask;
+
+ /**
+ * Hierarchy mask for the replayed tiler jobs. May be zero.
+ */
+ u16 tiler_hierarchy_mask;
+
+ /**
+ * Default weight to be used for hierarchy levels not in the original
+ * mask.
+ */
+ u32 hierarchy_default_weight;
+
+ /**
+ * Core requirements for the tiler job chain
+ */
+ base_jd_core_req tiler_core_req;
+
+ /**
+ * Core requirements for the fragment job chain
+ */
+ base_jd_core_req fragment_core_req;
+} base_jd_replay_payload;
+
+#ifdef BASE_LEGACY_UK10_2_SUPPORT
+typedef struct base_jd_replay_payload_uk10_2 {
+ u64 tiler_jc_list;
+ u64 fragment_jc;
+ u64 tiler_heap_free;
+ u16 fragment_hierarchy_mask;
+ u16 tiler_hierarchy_mask;
+ u32 hierarchy_default_weight;
+ u16 tiler_core_req;
+ u16 fragment_core_req;
+ u8 padding[4];
+} base_jd_replay_payload_uk10_2;
+#endif /* BASE_LEGACY_UK10_2_SUPPORT */
+
+/**
+ * @brief An entry in the linked list of job chains to be replayed. This must
+ * be in GPU memory.
+ */
+typedef struct base_jd_replay_jc {
+ /**
+ * Pointer to next entry in the list. A setting of NULL indicates the
+ * end of the list.
+ */
+ u64 next;
+
+ /**
+ * Pointer to the job chain.
+ */
+ u64 jc;
+
+} base_jd_replay_jc;
+
+/* Maximum number of jobs allowed in a fragment chain in the payload of a
+ * replay job */
+#define BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT 256
+
+/** @} end group base_api */
+
+typedef struct base_profiling_controls {
+ u32 profiling_controls[FBDUMP_CONTROL_MAX];
+} base_profiling_controls;
+
+/* Enable additional tracepoints for latency measurements (TL_ATOM_READY,
+ * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST) */
+#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0)
+
+/* Indicate that job dumping is enabled. This could affect certain timers
+ * to account for the performance impact. */
+#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1)
+
+#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
+ BASE_TLSTREAM_JOB_DUMPING_ENABLED)
+
+#endif /* _BASE_KERNEL_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_base_mem_priv.h b/drivers/gpu/arm_gpu/mali_base_mem_priv.h
new file mode 100644
index 000000000000..4a98a72cc37a
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_base_mem_priv.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _BASE_MEM_PRIV_H_
+#define _BASE_MEM_PRIV_H_
+
+#define BASE_SYNCSET_OP_MSYNC (1U << 0)
+#define BASE_SYNCSET_OP_CSYNC (1U << 1)
+
+/*
+ * This structure describe a basic memory coherency operation.
+ * It can either be:
+ * @li a sync from CPU to Memory:
+ * - type = ::BASE_SYNCSET_OP_MSYNC
+ * - mem_handle = a handle to the memory object on which the operation
+ * is taking place
+ * - user_addr = the address of the range to be synced
+ * - size = the amount of data to be synced, in bytes
+ * - offset is ignored.
+ * @li a sync from Memory to CPU:
+ * - type = ::BASE_SYNCSET_OP_CSYNC
+ * - mem_handle = a handle to the memory object on which the operation
+ * is taking place
+ * - user_addr = the address of the range to be synced
+ * - size = the amount of data to be synced, in bytes.
+ * - offset is ignored.
+ */
+struct basep_syncset {
+ base_mem_handle mem_handle;
+ u64 user_addr;
+ u64 size;
+ u8 type;
+ u8 padding[7];
+};
+
+#endif
diff --git a/drivers/gpu/arm_gpu/mali_base_vendor_specific_func.h b/drivers/gpu/arm_gpu/mali_base_vendor_specific_func.h
new file mode 100644
index 000000000000..be454a216a39
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_base_vendor_specific_func.h
@@ -0,0 +1,24 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+#ifndef _BASE_VENDOR_SPEC_FUNC_H_
+#define _BASE_VENDOR_SPEC_FUNC_H_
+
+int kbase_get_vendor_specific_cpu_clock_speed(u32 * const);
+
+#endif /*_BASE_VENDOR_SPEC_FUNC_H_*/
diff --git a/drivers/gpu/arm_gpu/mali_kbase.h b/drivers/gpu/arm_gpu/mali_kbase.h
new file mode 100644
index 000000000000..d77f186956f0
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase.h
@@ -0,0 +1,613 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_H_
+#define _KBASE_H_
+
+#include <mali_malisw.h>
+
+#include <mali_kbase_debug.h>
+
+#include <asm/page.h>
+
+#include <linux/atomic.h>
+#include <linux/highmem.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "mali_base_kernel.h"
+#include <mali_kbase_uku.h>
+#include <mali_kbase_linux.h>
+
+/*
+ * Include mali_kbase_defs.h first as this provides types needed by other local
+ * header files.
+ */
+#include "mali_kbase_defs.h"
+
+#include "mali_kbase_context.h"
+#include "mali_kbase_strings.h"
+#include "mali_kbase_mem_lowlevel.h"
+#include "mali_kbase_trace_timeline.h"
+#include "mali_kbase_js.h"
+#include "mali_kbase_mem.h"
+#include "mali_kbase_utility.h"
+#include "mali_kbase_gpu_memory_debugfs.h"
+#include "mali_kbase_mem_profile_debugfs.h"
+#include "mali_kbase_debug_job_fault.h"
+#include "mali_kbase_jd_debugfs.h"
+#include "mali_kbase_gpuprops.h"
+#include "mali_kbase_jm.h"
+#include "mali_kbase_vinstr.h"
+
+#include "ipa/mali_kbase_ipa.h"
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+#include <trace/events/gpu.h>
+#endif
+
+#ifndef u64_to_user_ptr
+/* Introduced in Linux v4.6 */
+#define u64_to_user_ptr(x) ((void __user *)(uintptr_t)x)
+#endif
+
+/*
+ * Kernel-side Base (KBase) APIs
+ */
+
+struct kbase_device *kbase_device_alloc(void);
+/*
+* note: configuration attributes member of kbdev needs to have
+* been setup before calling kbase_device_init
+*/
+
+/*
+* API to acquire device list semaphore and return pointer
+* to the device list head
+*/
+const struct list_head *kbase_dev_list_get(void);
+/* API to release the device list semaphore */
+void kbase_dev_list_put(const struct list_head *dev_list);
+
+int kbase_device_init(struct kbase_device * const kbdev);
+void kbase_device_term(struct kbase_device *kbdev);
+void kbase_device_free(struct kbase_device *kbdev);
+int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature);
+
+/* Needed for gator integration and for reporting vsync information */
+struct kbase_device *kbase_find_device(int minor);
+void kbase_release_device(struct kbase_device *kbdev);
+
+void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value);
+
+struct kbase_context *
+kbase_create_context(struct kbase_device *kbdev, bool is_compat);
+void kbase_destroy_context(struct kbase_context *kctx);
+
+int kbase_jd_init(struct kbase_context *kctx);
+void kbase_jd_exit(struct kbase_context *kctx);
+
+/**
+ * kbase_jd_submit - Submit atoms to the job dispatcher
+ *
+ * @kctx: The kbase context to submit to
+ * @user_addr: The address in user space of the struct base_jd_atom_v2 array
+ * @nr_atoms: The number of atoms in the array
+ * @stride: sizeof(struct base_jd_atom_v2)
+ * @uk6_atom: true if the atoms are legacy atoms (struct base_jd_atom_v2_uk6)
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_jd_submit(struct kbase_context *kctx,
+ void __user *user_addr, u32 nr_atoms, u32 stride,
+ bool uk6_atom);
+
+/**
+ * kbase_jd_done_worker - Handle a job completion
+ * @data: a &struct work_struct
+ *
+ * This function requeues the job from the runpool (if it was soft-stopped or
+ * removed from NEXT registers).
+ *
+ * Removes it from the system if it finished/failed/was cancelled.
+ *
+ * Resolves dependencies to add dependent jobs to the context, potentially
+ * starting them if necessary (which may add more references to the context)
+ *
+ * Releases the reference to the context from the no-longer-running job.
+ *
+ * Handles retrying submission outside of IRQ context if it failed from within
+ * IRQ context.
+ */
+void kbase_jd_done_worker(struct work_struct *data);
+
+void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
+ kbasep_js_atom_done_code done_code);
+void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
+void kbase_jd_zap_context(struct kbase_context *kctx);
+bool jd_done_nolock(struct kbase_jd_atom *katom,
+ struct list_head *completed_jobs_ctx);
+void kbase_jd_free_external_resources(struct kbase_jd_atom *katom);
+bool jd_submit_atom(struct kbase_context *kctx,
+ const struct base_jd_atom_v2 *user_atom,
+ struct kbase_jd_atom *katom);
+void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom);
+
+void kbase_job_done(struct kbase_device *kbdev, u32 done);
+
+/**
+ * kbase_job_slot_ctx_priority_check_locked(): - Check for lower priority atoms
+ * and soft stop them
+ * @kctx: Pointer to context to check.
+ * @katom: Pointer to priority atom.
+ *
+ * Atoms from @kctx on the same job slot as @katom, which have lower priority
+ * than @katom will be soft stopped and put back in the queue, so that atoms
+ * with higher priority can run.
+ *
+ * The hwaccess_lock must be held when calling this function.
+ */
+void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom);
+
+void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
+ struct kbase_jd_atom *target_katom);
+void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
+ struct kbase_jd_atom *target_katom, u32 sw_flags);
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+ struct kbase_jd_atom *target_katom);
+void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
+ base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom);
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+ struct kbase_jd_atom *target_katom);
+
+void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event);
+int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent);
+int kbase_event_pending(struct kbase_context *ctx);
+int kbase_event_init(struct kbase_context *kctx);
+void kbase_event_close(struct kbase_context *kctx);
+void kbase_event_cleanup(struct kbase_context *kctx);
+void kbase_event_wakeup(struct kbase_context *kctx);
+
+int kbase_process_soft_job(struct kbase_jd_atom *katom);
+int kbase_prepare_soft_job(struct kbase_jd_atom *katom);
+void kbase_finish_soft_job(struct kbase_jd_atom *katom);
+void kbase_cancel_soft_job(struct kbase_jd_atom *katom);
+void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev);
+void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom);
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom);
+#endif
+int kbase_soft_event_update(struct kbase_context *kctx,
+ u64 event,
+ unsigned char new_status);
+
+bool kbase_replay_process(struct kbase_jd_atom *katom);
+
+void kbasep_soft_job_timeout_worker(unsigned long data);
+void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt);
+
+/* api used internally for register access. Contains validation and tracing */
+void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value);
+int kbase_device_trace_buffer_install(
+ struct kbase_context *kctx, u32 *tb, size_t size);
+void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx);
+
+/* api to be ported per OS, only need to do the raw register access */
+void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value);
+u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset);
+
+void kbasep_as_do_poke(struct work_struct *work);
+
+/** Returns the name associated with a Mali exception code
+ *
+ * This function is called from the interrupt handler when a GPU fault occurs.
+ * It reports the details of the fault using KBASE_DEBUG_PRINT_WARN.
+ *
+ * @param[in] kbdev The kbase device that the GPU fault occurred from.
+ * @param[in] exception_code exception code
+ * @return name associated with the exception code
+ */
+const char *kbase_exception_name(struct kbase_device *kbdev,
+ u32 exception_code);
+
+/**
+ * Check whether a system suspend is in progress, or has already been suspended
+ *
+ * The caller should ensure that either kbdev->pm.active_count_lock is held, or
+ * a dmb was executed recently (to ensure the value is most
+ * up-to-date). However, without a lock the value could change afterwards.
+ *
+ * @return false if a suspend is not in progress
+ * @return !=false otherwise
+ */
+static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev)
+{
+ return kbdev->pm.suspending;
+}
+
+/**
+ * Return the atom's ID, as was originally supplied by userspace in
+ * base_jd_atom_v2::atom_number
+ */
+static inline int kbase_jd_atom_id(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ int result;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(katom);
+ KBASE_DEBUG_ASSERT(katom->kctx == kctx);
+
+ result = katom - &kctx->jctx.atoms[0];
+ KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT);
+ return result;
+}
+
+/**
+ * kbase_jd_atom_from_id - Return the atom structure for the given atom ID
+ * @kctx: Context pointer
+ * @id: ID of atom to retrieve
+ *
+ * Return: Pointer to struct kbase_jd_atom associated with the supplied ID
+ */
+static inline struct kbase_jd_atom *kbase_jd_atom_from_id(
+ struct kbase_context *kctx, int id)
+{
+ return &kctx->jctx.atoms[id];
+}
+
+/**
+ * Initialize the disjoint state
+ *
+ * The disjoint event count and state are both set to zero.
+ *
+ * Disjoint functions usage:
+ *
+ * The disjoint event count should be incremented whenever a disjoint event occurs.
+ *
+ * There are several cases which are regarded as disjoint behavior. Rather than just increment
+ * the counter during disjoint events we also increment the counter when jobs may be affected
+ * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state.
+ *
+ * Disjoint state is entered during GPU reset and for the entire time that an atom is replaying
+ * (as part of the replay workaround). Increasing the disjoint state also increases the count of
+ * disjoint events.
+ *
+ * The disjoint state is then used to increase the count of disjoint events during job submission
+ * and job completion. Any atom submitted or completed while the disjoint state is greater than
+ * zero is regarded as a disjoint event.
+ *
+ * The disjoint event counter is also incremented immediately whenever a job is soft stopped
+ * and during context creation.
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_init(struct kbase_device *kbdev);
+
+/**
+ * Increase the count of disjoint events
+ * called when a disjoint event has happened
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_event(struct kbase_device *kbdev);
+
+/**
+ * Increase the count of disjoint events only if the GPU is in a disjoint state
+ *
+ * This should be called when something happens which could be disjoint if the GPU
+ * is in a disjoint state. The state refcount keeps track of this.
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_event_potential(struct kbase_device *kbdev);
+
+/**
+ * Returns the count of disjoint events
+ *
+ * @param kbdev The kbase device
+ * @return the count of disjoint events
+ */
+u32 kbase_disjoint_event_get(struct kbase_device *kbdev);
+
+/**
+ * Increment the refcount state indicating that the GPU is in a disjoint state.
+ *
+ * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
+ * eventually after the disjoint state has completed @ref kbase_disjoint_state_down
+ * should be called
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_state_up(struct kbase_device *kbdev);
+
+/**
+ * Decrement the refcount state
+ *
+ * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
+ *
+ * Called after @ref kbase_disjoint_state_up once the disjoint state is over
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_state_down(struct kbase_device *kbdev);
+
+/**
+ * If a job is soft stopped and the number of contexts is >= this value
+ * it is reported as a disjoint event
+ */
+#define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2
+
+#if !defined(UINT64_MAX)
+ #define UINT64_MAX ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
+#endif
+
+#if KBASE_TRACE_ENABLE
+void kbasep_trace_debugfs_init(struct kbase_device *kbdev);
+
+#ifndef CONFIG_MALI_SYSTEM_TRACE
+/** Add trace values about a job-slot
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, 0)
+
+/** Add trace values about a job-slot, with info
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, info_val)
+
+/** Add trace values about a ctx refcount
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, 0)
+/** Add trace values about a ctx refcount, and info
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, info_val)
+
+/** Add trace values (no slot or refcount)
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ 0, 0, 0, info_val)
+
+/** Clear the trace */
+#define KBASE_TRACE_CLEAR(kbdev) \
+ kbasep_trace_clear(kbdev)
+
+/** Dump the slot trace */
+#define KBASE_TRACE_DUMP(kbdev) \
+ kbasep_trace_dump(kbdev)
+
+/** PRIVATE - do not use directly. Use KBASE_TRACE_ADD() instead */
+void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val);
+/** PRIVATE - do not use directly. Use KBASE_TRACE_CLEAR() instead */
+void kbasep_trace_clear(struct kbase_device *kbdev);
+#else /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
+/* Dispatch kbase trace events as system trace events */
+#include <mali_linux_kbase_trace.h>
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
+ trace_mali_##code(jobslot, 0)
+
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
+ trace_mali_##code(jobslot, info_val)
+
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
+ trace_mali_##code(refcount, 0)
+
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
+ trace_mali_##code(refcount, info_val)
+
+#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val)\
+ trace_mali_##code(gpu_addr, info_val)
+
+#define KBASE_TRACE_CLEAR(kbdev)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(0);\
+ } while (0)
+#define KBASE_TRACE_DUMP(kbdev)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(0);\
+ } while (0)
+
+#endif /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
+#else
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(gpu_addr);\
+ CSTD_UNUSED(jobslot);\
+ } while (0)
+
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(gpu_addr);\
+ CSTD_UNUSED(jobslot);\
+ CSTD_UNUSED(info_val);\
+ CSTD_NOP(0);\
+ } while (0)
+
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(gpu_addr);\
+ CSTD_UNUSED(refcount);\
+ CSTD_NOP(0);\
+ } while (0)
+
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(gpu_addr);\
+ CSTD_UNUSED(info_val);\
+ CSTD_NOP(0);\
+ } while (0)
+
+#define KBASE_TRACE_ADD(kbdev, code, subcode, ctx, katom, val)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(subcode);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(val);\
+ CSTD_NOP(0);\
+ } while (0)
+
+#define KBASE_TRACE_CLEAR(kbdev)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(0);\
+ } while (0)
+#define KBASE_TRACE_DUMP(kbdev)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(0);\
+ } while (0)
+#endif /* KBASE_TRACE_ENABLE */
+/** PRIVATE - do not use directly. Use KBASE_TRACE_DUMP() instead */
+void kbasep_trace_dump(struct kbase_device *kbdev);
+
+#ifdef CONFIG_MALI_DEBUG
+/**
+ * kbase_set_driver_inactive - Force driver to go inactive
+ * @kbdev: Device pointer
+ * @inactive: true if driver should go inactive, false otherwise
+ *
+ * Forcing the driver inactive will cause all future IOCTLs to wait until the
+ * driver is made active again. This is intended solely for the use of tests
+ * which require that no jobs are running while the test executes.
+ */
+void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive);
+#endif /* CONFIG_MALI_DEBUG */
+
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
+
+/* kbase_io_history_init - initialize data struct for register access history
+ *
+ * @kbdev The register history to initialize
+ * @n The number of register accesses that the buffer could hold
+ *
+ * @return 0 if successfully initialized, failure otherwise
+ */
+int kbase_io_history_init(struct kbase_io_history *h, u16 n);
+
+/* kbase_io_history_term - uninit all resources for the register access history
+ *
+ * @h The register history to terminate
+ */
+void kbase_io_history_term(struct kbase_io_history *h);
+
+/* kbase_io_history_dump - print the register history to the kernel ring buffer
+ *
+ * @kbdev Pointer to kbase_device containing the register history to dump
+ */
+void kbase_io_history_dump(struct kbase_device *kbdev);
+
+/**
+ * kbase_io_history_resize - resize the register access history buffer.
+ *
+ * @h: Pointer to a valid register history to resize
+ * @new_size: Number of accesses the buffer could hold
+ *
+ * A successful resize will clear all recent register accesses.
+ * If resizing fails for any reason (e.g., could not allocate memory, invalid
+ * buffer size) then the original buffer will be kept intact.
+ *
+ * @return 0 if the buffer was resized, failure otherwise
+ */
+int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size);
+
+#else /* CONFIG_DEBUG_FS */
+
+#define kbase_io_history_init(...) ((int)0)
+
+#define kbase_io_history_term CSTD_NOP
+
+#define kbase_io_history_dump CSTD_NOP
+
+#define kbase_io_history_resize CSTD_NOP
+
+#endif /* CONFIG_DEBUG_FS */
+
+
+#endif
+
+
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_10969_workaround.c b/drivers/gpu/arm_gpu/mali_kbase_10969_workaround.c
new file mode 100644
index 000000000000..6b3559d93351
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_10969_workaround.c
@@ -0,0 +1,210 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2015,2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+#include <linux/dma-mapping.h>
+#include <mali_kbase.h>
+#include <mali_kbase_10969_workaround.h>
+
+/* This function is used to solve an HW issue with single iterator GPUs.
+ * If a fragment job is soft-stopped on the edge of its bounding box, can happen that the
+ * restart index is out of bounds and the rerun causes a tile range fault. If this happens
+ * we try to clamp the restart index to a correct value and rerun the job.
+ */
+/* Mask of X and Y coordinates for the coordinates words in the descriptors*/
+#define X_COORDINATE_MASK 0x00000FFF
+#define Y_COORDINATE_MASK 0x0FFF0000
+/* Max number of words needed from the fragment shader job descriptor */
+#define JOB_HEADER_SIZE_IN_WORDS 10
+#define JOB_HEADER_SIZE (JOB_HEADER_SIZE_IN_WORDS*sizeof(u32))
+
+/* Word 0: Status Word */
+#define JOB_DESC_STATUS_WORD 0
+/* Word 1: Restart Index */
+#define JOB_DESC_RESTART_INDEX_WORD 1
+/* Word 2: Fault address low word */
+#define JOB_DESC_FAULT_ADDR_LOW_WORD 2
+/* Word 8: Minimum Tile Coordinates */
+#define FRAG_JOB_DESC_MIN_TILE_COORD_WORD 8
+/* Word 9: Maximum Tile Coordinates */
+#define FRAG_JOB_DESC_MAX_TILE_COORD_WORD 9
+
+int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom)
+{
+ struct device *dev = katom->kctx->kbdev->dev;
+ u32 clamped = 0;
+ struct kbase_va_region *region;
+ struct tagged_addr *page_array;
+ u64 page_index;
+ u32 offset = katom->jc & (~PAGE_MASK);
+ u32 *page_1 = NULL;
+ u32 *page_2 = NULL;
+ u32 job_header[JOB_HEADER_SIZE_IN_WORDS];
+ void *dst = job_header;
+ u32 minX, minY, maxX, maxY;
+ u32 restartX, restartY;
+ struct page *p;
+ u32 copy_size;
+
+ dev_warn(dev, "Called TILE_RANGE_FAULT workaround clamping function.\n");
+ if (!(katom->core_req & BASE_JD_REQ_FS))
+ return 0;
+
+ kbase_gpu_vm_lock(katom->kctx);
+ region = kbase_region_tracker_find_region_enclosing_address(katom->kctx,
+ katom->jc);
+ if (!region || (region->flags & KBASE_REG_FREE))
+ goto out_unlock;
+
+ page_array = kbase_get_cpu_phy_pages(region);
+ if (!page_array)
+ goto out_unlock;
+
+ page_index = (katom->jc >> PAGE_SHIFT) - region->start_pfn;
+
+ p = phys_to_page(as_phys_addr_t(page_array[page_index]));
+
+ /* we need the first 10 words of the fragment shader job descriptor.
+ * We need to check that the offset + 10 words is less that the page
+ * size otherwise we need to load the next page.
+ * page_size_overflow will be equal to 0 in case the whole descriptor
+ * is within the page > 0 otherwise.
+ */
+ copy_size = MIN(PAGE_SIZE - offset, JOB_HEADER_SIZE);
+
+ page_1 = kmap_atomic(p);
+
+ /* page_1 is a u32 pointer, offset is expressed in bytes */
+ page_1 += offset>>2;
+
+ kbase_sync_single_for_cpu(katom->kctx->kbdev,
+ kbase_dma_addr(p) + offset,
+ copy_size, DMA_BIDIRECTIONAL);
+
+ memcpy(dst, page_1, copy_size);
+
+ /* The data needed overflows page the dimension,
+ * need to map the subsequent page */
+ if (copy_size < JOB_HEADER_SIZE) {
+ p = phys_to_page(as_phys_addr_t(page_array[page_index + 1]));
+ page_2 = kmap_atomic(p);
+
+ kbase_sync_single_for_cpu(katom->kctx->kbdev,
+ kbase_dma_addr(p),
+ JOB_HEADER_SIZE - copy_size, DMA_BIDIRECTIONAL);
+
+ memcpy(dst + copy_size, page_2, JOB_HEADER_SIZE - copy_size);
+ }
+
+ /* We managed to correctly map one or two pages (in case of overflow) */
+ /* Get Bounding Box data and restart index from fault address low word */
+ minX = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & X_COORDINATE_MASK;
+ minY = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & Y_COORDINATE_MASK;
+ maxX = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & X_COORDINATE_MASK;
+ maxY = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & Y_COORDINATE_MASK;
+ restartX = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & X_COORDINATE_MASK;
+ restartY = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & Y_COORDINATE_MASK;
+
+ dev_warn(dev, "Before Clamping:\n"
+ "Jobstatus: %08x\n"
+ "restartIdx: %08x\n"
+ "Fault_addr_low: %08x\n"
+ "minCoordsX: %08x minCoordsY: %08x\n"
+ "maxCoordsX: %08x maxCoordsY: %08x\n",
+ job_header[JOB_DESC_STATUS_WORD],
+ job_header[JOB_DESC_RESTART_INDEX_WORD],
+ job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
+ minX, minY,
+ maxX, maxY);
+
+ /* Set the restart index to the one which generated the fault*/
+ job_header[JOB_DESC_RESTART_INDEX_WORD] =
+ job_header[JOB_DESC_FAULT_ADDR_LOW_WORD];
+
+ if (restartX < minX) {
+ job_header[JOB_DESC_RESTART_INDEX_WORD] = (minX) | restartY;
+ dev_warn(dev,
+ "Clamping restart X index to minimum. %08x clamped to %08x\n",
+ restartX, minX);
+ clamped = 1;
+ }
+ if (restartY < minY) {
+ job_header[JOB_DESC_RESTART_INDEX_WORD] = (minY) | restartX;
+ dev_warn(dev,
+ "Clamping restart Y index to minimum. %08x clamped to %08x\n",
+ restartY, minY);
+ clamped = 1;
+ }
+ if (restartX > maxX) {
+ job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxX) | restartY;
+ dev_warn(dev,
+ "Clamping restart X index to maximum. %08x clamped to %08x\n",
+ restartX, maxX);
+ clamped = 1;
+ }
+ if (restartY > maxY) {
+ job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxY) | restartX;
+ dev_warn(dev,
+ "Clamping restart Y index to maximum. %08x clamped to %08x\n",
+ restartY, maxY);
+ clamped = 1;
+ }
+
+ if (clamped) {
+ /* Reset the fault address low word
+ * and set the job status to STOPPED */
+ job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] = 0x0;
+ job_header[JOB_DESC_STATUS_WORD] = BASE_JD_EVENT_STOPPED;
+ dev_warn(dev, "After Clamping:\n"
+ "Jobstatus: %08x\n"
+ "restartIdx: %08x\n"
+ "Fault_addr_low: %08x\n"
+ "minCoordsX: %08x minCoordsY: %08x\n"
+ "maxCoordsX: %08x maxCoordsY: %08x\n",
+ job_header[JOB_DESC_STATUS_WORD],
+ job_header[JOB_DESC_RESTART_INDEX_WORD],
+ job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
+ minX, minY,
+ maxX, maxY);
+
+ /* Flush CPU cache to update memory for future GPU reads*/
+ memcpy(page_1, dst, copy_size);
+ p = phys_to_page(as_phys_addr_t(page_array[page_index]));
+
+ kbase_sync_single_for_device(katom->kctx->kbdev,
+ kbase_dma_addr(p) + offset,
+ copy_size, DMA_TO_DEVICE);
+
+ if (copy_size < JOB_HEADER_SIZE) {
+ memcpy(page_2, dst + copy_size,
+ JOB_HEADER_SIZE - copy_size);
+ p = phys_to_page(as_phys_addr_t(page_array[page_index +
+ 1]));
+
+ kbase_sync_single_for_device(katom->kctx->kbdev,
+ kbase_dma_addr(p),
+ JOB_HEADER_SIZE - copy_size,
+ DMA_TO_DEVICE);
+ }
+ }
+ if (copy_size < JOB_HEADER_SIZE)
+ kunmap_atomic(page_2);
+
+ kunmap_atomic(page_1);
+
+out_unlock:
+ kbase_gpu_vm_unlock(katom->kctx);
+ return clamped;
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_10969_workaround.h b/drivers/gpu/arm_gpu/mali_kbase_10969_workaround.h
new file mode 100644
index 000000000000..099a29861672
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_10969_workaround.h
@@ -0,0 +1,23 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_10969_WORKAROUND_
+#define _KBASE_10969_WORKAROUND_
+
+int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom);
+
+#endif /* _KBASE_10969_WORKAROUND_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_as_fault_debugfs.c b/drivers/gpu/arm_gpu/mali_kbase_as_fault_debugfs.c
new file mode 100644
index 000000000000..f910fe970feb
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_as_fault_debugfs.c
@@ -0,0 +1,102 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/debugfs.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_as_fault_debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_MALI_DEBUG
+
+static int kbase_as_fault_read(struct seq_file *sfile, void *data)
+{
+ uintptr_t as_no = (uintptr_t) sfile->private;
+
+ struct list_head *entry;
+ const struct list_head *kbdev_list;
+ struct kbase_device *kbdev = NULL;
+
+ kbdev_list = kbase_dev_list_get();
+
+ list_for_each(entry, kbdev_list) {
+ kbdev = list_entry(entry, struct kbase_device, entry);
+
+ if(kbdev->debugfs_as_read_bitmap & (1ULL << as_no)) {
+
+ /* don't show this one again until another fault occors */
+ kbdev->debugfs_as_read_bitmap &= ~(1ULL << as_no);
+
+ /* output the last page fault addr */
+ seq_printf(sfile, "%llu\n", (u64) kbdev->as[as_no].fault_addr);
+ }
+
+ }
+
+ kbase_dev_list_put(kbdev_list);
+
+ return 0;
+}
+
+static int kbase_as_fault_debugfs_open(struct inode *in, struct file *file)
+{
+ return single_open(file, kbase_as_fault_read , in->i_private);
+}
+
+static const struct file_operations as_fault_fops = {
+ .open = kbase_as_fault_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#endif /* CONFIG_MALI_DEBUG */
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * Initialize debugfs entry for each address space
+ */
+void kbase_as_fault_debugfs_init(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_MALI_DEBUG
+ uint i;
+ char as_name[64];
+ struct dentry *debugfs_directory;
+
+ kbdev->debugfs_as_read_bitmap = 0ULL;
+
+ KBASE_DEBUG_ASSERT(kbdev->nr_hw_address_spaces);
+ KBASE_DEBUG_ASSERT(sizeof(kbdev->as[0].fault_addr) == sizeof(u64));
+
+ debugfs_directory = debugfs_create_dir("address_spaces",
+ kbdev->mali_debugfs_directory);
+
+ if(debugfs_directory) {
+ for(i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ snprintf(as_name, ARRAY_SIZE(as_name), "as%u", i);
+ debugfs_create_file(as_name, S_IRUGO,
+ debugfs_directory, (void*) ((uintptr_t) i), &as_fault_fops);
+ }
+ }
+ else
+ dev_warn(kbdev->dev, "unable to create address_spaces debugfs directory");
+
+#endif /* CONFIG_MALI_DEBUG */
+#endif /* CONFIG_DEBUG_FS */
+ return;
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_as_fault_debugfs.h b/drivers/gpu/arm_gpu/mali_kbase_as_fault_debugfs.h
new file mode 100644
index 000000000000..3ed2248897fc
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_as_fault_debugfs.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_AS_FAULT_DEBUG_FS_H
+#define _KBASE_AS_FAULT_DEBUG_FS_H
+
+/**
+ * kbase_as_fault_debugfs_init() - Add debugfs files for reporting page faults
+ *
+ * @kbdev: Pointer to kbase_device
+ */
+void kbase_as_fault_debugfs_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_as_fault_debugfs_new() - make the last fault available on debugfs
+ *
+ * @kbdev: Pointer to kbase_device
+ * @as_no: The address space the fault occurred on
+ */
+static inline void
+kbase_as_fault_debugfs_new(struct kbase_device *kbdev, int as_no)
+{
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_MALI_DEBUG
+ kbdev->debugfs_as_read_bitmap |= (1ULL << as_no);
+#endif /* CONFIG_DEBUG_FS */
+#endif /* CONFIG_MALI_DEBUG */
+ return;
+}
+
+#endif /*_KBASE_AS_FAULT_DEBUG_FS_H*/
diff --git a/drivers/gpu/arm_gpu/mali_kbase_cache_policy.c b/drivers/gpu/arm_gpu/mali_kbase_cache_policy.c
new file mode 100644
index 000000000000..1d11de67aa80
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_cache_policy.c
@@ -0,0 +1,54 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Cache Policy API.
+ */
+
+#include "mali_kbase_cache_policy.h"
+
+/*
+ * The output flags should be a combination of the following values:
+ * KBASE_REG_CPU_CACHED: CPU cache should be enabled.
+ */
+u32 kbase_cache_enabled(u32 flags, u32 nr_pages)
+{
+ u32 cache_flags = 0;
+
+ CSTD_UNUSED(nr_pages);
+
+ if (flags & BASE_MEM_CACHED_CPU)
+ cache_flags |= KBASE_REG_CPU_CACHED;
+
+ return cache_flags;
+}
+
+
+void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ dma_sync_single_for_device(kbdev->dev, handle, size, dir);
+}
+
+
+void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ dma_sync_single_for_cpu(kbdev->dev, handle, size, dir);
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_cache_policy.h b/drivers/gpu/arm_gpu/mali_kbase_cache_policy.h
new file mode 100644
index 000000000000..0c18bdb357b0
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_cache_policy.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Cache Policy API.
+ */
+
+#ifndef _KBASE_CACHE_POLICY_H_
+#define _KBASE_CACHE_POLICY_H_
+
+#include "mali_kbase.h"
+#include "mali_base_kernel.h"
+
+/**
+ * kbase_cache_enabled - Choose the cache policy for a specific region
+ * @flags: flags describing attributes of the region
+ * @nr_pages: total number of pages (backed or not) for the region
+ *
+ * Tells whether the CPU and GPU caches should be enabled or not for a specific
+ * region.
+ * This function can be modified to customize the cache policy depending on the
+ * flags and size of the region.
+ *
+ * Return: a combination of %KBASE_REG_CPU_CACHED and %KBASE_REG_GPU_CACHED
+ * depending on the cache policy
+ */
+u32 kbase_cache_enabled(u32 flags, u32 nr_pages);
+
+#endif /* _KBASE_CACHE_POLICY_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_config.c b/drivers/gpu/arm_gpu/mali_kbase_config.c
new file mode 100644
index 000000000000..fb615ae02ead
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_config.c
@@ -0,0 +1,51 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config_defaults.h>
+
+int kbasep_platform_device_init(struct kbase_device *kbdev)
+{
+ struct kbase_platform_funcs_conf *platform_funcs_p;
+
+ platform_funcs_p = (struct kbase_platform_funcs_conf *)PLATFORM_FUNCS;
+ if (platform_funcs_p && platform_funcs_p->platform_init_func)
+ return platform_funcs_p->platform_init_func(kbdev);
+
+ return 0;
+}
+
+void kbasep_platform_device_term(struct kbase_device *kbdev)
+{
+ struct kbase_platform_funcs_conf *platform_funcs_p;
+
+ platform_funcs_p = (struct kbase_platform_funcs_conf *)PLATFORM_FUNCS;
+ if (platform_funcs_p && platform_funcs_p->platform_term_func)
+ platform_funcs_p->platform_term_func(kbdev);
+}
+
+int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed)
+{
+ KBASE_DEBUG_ASSERT(NULL != clock_speed);
+
+ *clock_speed = 100;
+ return 0;
+}
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_config.h b/drivers/gpu/arm_gpu/mali_kbase_config.h
new file mode 100644
index 000000000000..356d52bcd774
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_config.h
@@ -0,0 +1,345 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_config.h
+ * Configuration API and Attributes for KBase
+ */
+
+#ifndef _KBASE_CONFIG_H_
+#define _KBASE_CONFIG_H_
+
+#include <asm/page.h>
+
+#include <mali_malisw.h>
+#include <mali_kbase_backend_config.h>
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_config Configuration API and Attributes
+ * @{
+ */
+
+#include <linux/rbtree.h>
+
+/* Forward declaration of struct kbase_device */
+struct kbase_device;
+
+/**
+ * kbase_platform_funcs_conf - Specifies platform init/term function pointers
+ *
+ * Specifies the functions pointers for platform specific initialization and
+ * termination. By default no functions are required. No additional platform
+ * specific control is necessary.
+ */
+struct kbase_platform_funcs_conf {
+ /**
+ * platform_init_func - platform specific init function pointer
+ * @kbdev - kbase_device pointer
+ *
+ * Returns 0 on success, negative error code otherwise.
+ *
+ * Function pointer for platform specific initialization or NULL if no
+ * initialization function is required. At the point this the GPU is
+ * not active and its power and clocks are in unknown (platform specific
+ * state) as kbase doesn't yet have control of power and clocks.
+ *
+ * The platform specific private pointer kbase_device::platform_context
+ * can be accessed (and possibly initialized) in here.
+ */
+ int (*platform_init_func)(struct kbase_device *kbdev);
+ /**
+ * platform_term_func - platform specific termination function pointer
+ * @kbdev - kbase_device pointer
+ *
+ * Function pointer for platform specific termination or NULL if no
+ * termination function is required. At the point this the GPU will be
+ * idle but still powered and clocked.
+ *
+ * The platform specific private pointer kbase_device::platform_context
+ * can be accessed (and possibly terminated) in here.
+ */
+ void (*platform_term_func)(struct kbase_device *kbdev);
+};
+
+/*
+ * @brief Specifies the callbacks for power management
+ *
+ * By default no callbacks will be made and the GPU must not be powered off.
+ */
+struct kbase_pm_callback_conf {
+ /** Callback for when the GPU is idle and the power to it can be switched off.
+ *
+ * The system integrator can decide whether to either do nothing, just switch off
+ * the clocks to the GPU, or to completely power down the GPU.
+ * The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the
+ * platform \em callbacks responsibility to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf).
+ */
+ void (*power_off_callback)(struct kbase_device *kbdev);
+
+ /** Callback for when the GPU is about to become active and power must be supplied.
+ *
+ * This function must not return until the GPU is powered and clocked sufficiently for register access to
+ * succeed. The return value specifies whether the GPU was powered down since the call to power_off_callback.
+ * If the GPU state has been lost then this function must return 1, otherwise it should return 0.
+ * The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the
+ * platform \em callbacks responsibility to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf).
+ *
+ * The return value of the first call to this function is ignored.
+ *
+ * @return 1 if the GPU state may have been lost, 0 otherwise.
+ */
+ int (*power_on_callback)(struct kbase_device *kbdev);
+
+ /** Callback for when the system is requesting a suspend and GPU power
+ * must be switched off.
+ *
+ * Note that if this callback is present, then this may be called
+ * without a preceding call to power_off_callback. Therefore this
+ * callback must be able to take any action that might otherwise happen
+ * in power_off_callback.
+ *
+ * The platform specific private pointer kbase_device::platform_context
+ * can be accessed and modified in here. It is the platform \em
+ * callbacks responsibility to initialize and terminate this pointer if
+ * used (see @ref kbase_platform_funcs_conf).
+ */
+ void (*power_suspend_callback)(struct kbase_device *kbdev);
+
+ /** Callback for when the system is resuming from a suspend and GPU
+ * power must be switched on.
+ *
+ * Note that if this callback is present, then this may be called
+ * without a following call to power_on_callback. Therefore this
+ * callback must be able to take any action that might otherwise happen
+ * in power_on_callback.
+ *
+ * The platform specific private pointer kbase_device::platform_context
+ * can be accessed and modified in here. It is the platform \em
+ * callbacks responsibility to initialize and terminate this pointer if
+ * used (see @ref kbase_platform_funcs_conf).
+ */
+ void (*power_resume_callback)(struct kbase_device *kbdev);
+
+ /** Callback for handling runtime power management initialization.
+ *
+ * The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback
+ * will become active from calls made to the OS from within this function.
+ * The runtime calls can be triggered by calls from @ref power_off_callback and @ref power_on_callback.
+ * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+ *
+ * @return 0 on success, else int error code.
+ */
+ int (*power_runtime_init_callback)(struct kbase_device *kbdev);
+
+ /** Callback for handling runtime power management termination.
+ *
+ * The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback
+ * should no longer be called by the OS on completion of this function.
+ * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+ */
+ void (*power_runtime_term_callback)(struct kbase_device *kbdev);
+
+ /** Callback for runtime power-off power management callback
+ *
+ * For linux this callback will be called by the kernel runtime_suspend callback.
+ * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+ *
+ * @return 0 on success, else OS error code.
+ */
+ void (*power_runtime_off_callback)(struct kbase_device *kbdev);
+
+ /** Callback for runtime power-on power management callback
+ *
+ * For linux this callback will be called by the kernel runtime_resume callback.
+ * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+ */
+ int (*power_runtime_on_callback)(struct kbase_device *kbdev);
+
+ /*
+ * Optional callback for checking if GPU can be suspended when idle
+ *
+ * This callback will be called by the runtime power management core
+ * when the reference count goes to 0 to provide notification that the
+ * GPU now seems idle.
+ *
+ * If this callback finds that the GPU can't be powered off, or handles
+ * suspend by powering off directly or queueing up a power off, a
+ * non-zero value must be returned to prevent the runtime PM core from
+ * also triggering a suspend.
+ *
+ * Returning 0 will cause the runtime PM core to conduct a regular
+ * autosuspend.
+ *
+ * This callback is optional and if not provided regular autosuspend
+ * will be triggered.
+ *
+ * Note: The Linux kernel must have CONFIG_PM_RUNTIME enabled to use
+ * this feature.
+ *
+ * Return 0 if GPU can be suspended, positive value if it can not be
+ * suspeneded by runtime PM, else OS error code
+ */
+ int (*power_runtime_idle_callback)(struct kbase_device *kbdev);
+};
+
+/**
+ * kbase_cpuprops_get_default_clock_speed - default for CPU_SPEED_FUNC
+ * @clock_speed - see kbase_cpu_clk_speed_func for details on the parameters
+ *
+ * Returns 0 on success, negative error code otherwise.
+ *
+ * Default implementation of CPU_SPEED_FUNC. This function sets clock_speed
+ * to 100, so will be an underestimate for any real system.
+ */
+int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed);
+
+/**
+ * kbase_cpu_clk_speed_func - Type of the function pointer for CPU_SPEED_FUNC
+ * @param clock_speed - pointer to store the current CPU clock speed in MHz
+ *
+ * Returns 0 on success, otherwise negative error code.
+ *
+ * This is mainly used to implement OpenCL's clGetDeviceInfo().
+ */
+typedef int (*kbase_cpu_clk_speed_func) (u32 *clock_speed);
+
+/**
+ * kbase_gpu_clk_speed_func - Type of the function pointer for GPU_SPEED_FUNC
+ * @param clock_speed - pointer to store the current GPU clock speed in MHz
+ *
+ * Returns 0 on success, otherwise negative error code.
+ * When an error is returned the caller assumes maximum GPU speed stored in
+ * gpu_freq_khz_max.
+ *
+ * If the system timer is not available then this function is required
+ * for the OpenCL queue profiling to return correct timing information.
+ *
+ */
+typedef int (*kbase_gpu_clk_speed_func) (u32 *clock_speed);
+
+#ifdef CONFIG_OF
+struct kbase_platform_config {
+};
+#else
+
+/*
+ * @brief Specifies start and end of I/O memory region.
+ */
+struct kbase_io_memory_region {
+ u64 start;
+ u64 end;
+};
+
+/*
+ * @brief Specifies I/O related resources like IRQs and memory region for I/O operations.
+ */
+struct kbase_io_resources {
+ u32 job_irq_number;
+ u32 mmu_irq_number;
+ u32 gpu_irq_number;
+ struct kbase_io_memory_region io_memory_region;
+};
+
+struct kbase_platform_config {
+ const struct kbase_io_resources *io_resources;
+};
+
+#endif /* CONFIG_OF */
+
+/**
+ * @brief Gets the pointer to platform config.
+ *
+ * @return Pointer to the platform config
+ */
+struct kbase_platform_config *kbase_get_platform_config(void);
+
+/**
+ * kbasep_platform_device_init: - Platform specific call to initialize hardware
+ * @kbdev: kbase device pointer
+ *
+ * Function calls a platform defined routine if specified in the configuration
+ * attributes. The routine can initialize any hardware and context state that
+ * is required for the GPU block to function.
+ *
+ * Return: 0 if no errors have been found in the config.
+ * Negative error code otherwise.
+ */
+int kbasep_platform_device_init(struct kbase_device *kbdev);
+
+/**
+ * kbasep_platform_device_term - Platform specific call to terminate hardware
+ * @kbdev: Kbase device pointer
+ *
+ * Function calls a platform defined routine if specified in the configuration
+ * attributes. The routine can destroy any platform specific context state and
+ * shut down any hardware functionality that are outside of the Power Management
+ * callbacks.
+ *
+ */
+void kbasep_platform_device_term(struct kbase_device *kbdev);
+
+
+/**
+ * kbase_platform_early_init - Early initialisation of the platform code
+ *
+ * This function will be called when the module is loaded to perform any
+ * early initialisation required by the platform code. Such as reading
+ * platform specific device tree entries for the GPU.
+ *
+ * Return: 0 for success, any other fail causes module initialisation to fail
+ */
+int kbase_platform_early_init(void);
+
+#ifndef CONFIG_OF
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+/**
+ * kbase_platform_fake_register - Register a platform device for the GPU
+ *
+ * This can be used to register a platform device on systems where device tree
+ * is not enabled and the platform initialisation code in the kernel doesn't
+ * create the GPU device. Where possible device tree should be used instead.
+ *
+ * Return: 0 for success, any other fail causes module initialisation to fail
+ */
+int kbase_platform_fake_register(void);
+
+/**
+ * kbase_platform_fake_unregister - Unregister a fake platform device
+ *
+ * Unregister the platform device created with kbase_platform_fake_register()
+ */
+void kbase_platform_fake_unregister(void);
+#endif
+#endif
+
+ /** @} *//* end group kbase_config */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_CONFIG_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_config_defaults.h b/drivers/gpu/arm_gpu/mali_kbase_config_defaults.h
new file mode 100644
index 000000000000..69079e7d9680
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_config_defaults.h
@@ -0,0 +1,226 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_config_defaults.h
+ *
+ * Default values for configuration settings
+ *
+ */
+
+#ifndef _KBASE_CONFIG_DEFAULTS_H_
+#define _KBASE_CONFIG_DEFAULTS_H_
+
+/* Include mandatory definitions per platform */
+#include <mali_kbase_config_platform.h>
+
+/**
+* Boolean indicating whether the driver is configured to be secure at
+* a potential loss of performance.
+*
+* This currently affects only r0p0-15dev0 HW and earlier.
+*
+* On r0p0-15dev0 HW and earlier, there are tradeoffs between security and
+* performance:
+*
+* - When this is set to true, the driver remains fully secure,
+* but potentially loses performance compared with setting this to
+* false.
+* - When set to false, the driver is open to certain security
+* attacks.
+*
+* From r0p0-00rel0 and onwards, there is no security loss by setting
+* this to false, and no performance loss by setting it to
+* true.
+*/
+#define DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE false
+
+enum {
+ /**
+ * Use unrestricted Address ID width on the AXI bus.
+ */
+ KBASE_AID_32 = 0x0,
+
+ /**
+ * Restrict GPU to a half of maximum Address ID count.
+ * This will reduce performance, but reduce bus load due to GPU.
+ */
+ KBASE_AID_16 = 0x3,
+
+ /**
+ * Restrict GPU to a quarter of maximum Address ID count.
+ * This will reduce performance, but reduce bus load due to GPU.
+ */
+ KBASE_AID_8 = 0x2,
+
+ /**
+ * Restrict GPU to an eighth of maximum Address ID count.
+ * This will reduce performance, but reduce bus load due to GPU.
+ */
+ KBASE_AID_4 = 0x1
+};
+
+/**
+ * Default setting for read Address ID limiting on AXI bus.
+ *
+ * Attached value: u32 register value
+ * KBASE_AID_32 - use the full 32 IDs (5 ID bits)
+ * KBASE_AID_16 - use 16 IDs (4 ID bits)
+ * KBASE_AID_8 - use 8 IDs (3 ID bits)
+ * KBASE_AID_4 - use 4 IDs (2 ID bits)
+ * Default value: KBASE_AID_32 (no limit). Note hardware implementation
+ * may limit to a lower value.
+ */
+#define DEFAULT_ARID_LIMIT KBASE_AID_32
+
+/**
+ * Default setting for write Address ID limiting on AXI.
+ *
+ * Attached value: u32 register value
+ * KBASE_AID_32 - use the full 32 IDs (5 ID bits)
+ * KBASE_AID_16 - use 16 IDs (4 ID bits)
+ * KBASE_AID_8 - use 8 IDs (3 ID bits)
+ * KBASE_AID_4 - use 4 IDs (2 ID bits)
+ * Default value: KBASE_AID_32 (no limit). Note hardware implementation
+ * may limit to a lower value.
+ */
+#define DEFAULT_AWID_LIMIT KBASE_AID_32
+
+/**
+ * Default UMP device mapping. A UMP_DEVICE_<device>_SHIFT value which
+ * defines which UMP device this GPU should be mapped to.
+ */
+#define DEFAULT_UMP_GPU_DEVICE_SHIFT UMP_DEVICE_Z_SHIFT
+
+/*
+ * Default period for DVFS sampling
+ */
+#define DEFAULT_PM_DVFS_PERIOD 100 /* 100ms */
+
+/*
+ * Power Management poweroff tick granuality. This is in nanoseconds to
+ * allow HR timer support.
+ *
+ * On each scheduling tick, the power manager core may decide to:
+ * -# Power off one or more shader cores
+ * -# Power off the entire GPU
+ */
+#define DEFAULT_PM_GPU_POWEROFF_TICK_NS (400000) /* 400us */
+
+/*
+ * Power Manager number of ticks before shader cores are powered off
+ */
+#define DEFAULT_PM_POWEROFF_TICK_SHADER (2) /* 400-800us */
+
+/*
+ * Power Manager number of ticks before GPU is powered off
+ */
+#define DEFAULT_PM_POWEROFF_TICK_GPU (2) /* 400-800us */
+
+/*
+ * Default scheduling tick granuality
+ */
+#define DEFAULT_JS_SCHEDULING_PERIOD_NS (100000000u) /* 100ms */
+
+/*
+ * Default minimum number of scheduling ticks before jobs are soft-stopped.
+ *
+ * This defines the time-slice for a job (which may be different from that of a
+ * context)
+ */
+#define DEFAULT_JS_SOFT_STOP_TICKS (1) /* 100ms-200ms */
+
+/*
+ * Default minimum number of scheduling ticks before CL jobs are soft-stopped.
+ */
+#define DEFAULT_JS_SOFT_STOP_TICKS_CL (1) /* 100ms-200ms */
+
+/*
+ * Default minimum number of scheduling ticks before jobs are hard-stopped
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_SS (50) /* 5s */
+#define DEFAULT_JS_HARD_STOP_TICKS_SS_8408 (300) /* 30s */
+
+/*
+ * Default minimum number of scheduling ticks before CL jobs are hard-stopped.
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_CL (50) /* 5s */
+
+/*
+ * Default minimum number of scheduling ticks before jobs are hard-stopped
+ * during dumping
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_DUMPING (15000) /* 1500s */
+
+/*
+ * Default timeout for some software jobs, after which the software event wait
+ * jobs will be cancelled.
+ */
+#define DEFAULT_JS_SOFT_JOB_TIMEOUT (3000) /* 3s */
+
+/*
+ * Default minimum number of scheduling ticks before the GPU is reset to clear a
+ * "stuck" job
+ */
+#define DEFAULT_JS_RESET_TICKS_SS (55) /* 5.5s */
+#define DEFAULT_JS_RESET_TICKS_SS_8408 (450) /* 45s */
+
+/*
+ * Default minimum number of scheduling ticks before the GPU is reset to clear a
+ * "stuck" CL job.
+ */
+#define DEFAULT_JS_RESET_TICKS_CL (55) /* 5.5s */
+
+/*
+ * Default minimum number of scheduling ticks before the GPU is reset to clear a
+ * "stuck" job during dumping.
+ */
+#define DEFAULT_JS_RESET_TICKS_DUMPING (15020) /* 1502s */
+
+/*
+ * Default number of milliseconds given for other jobs on the GPU to be
+ * soft-stopped when the GPU needs to be reset.
+ */
+#define DEFAULT_RESET_TIMEOUT_MS (3000) /* 3s */
+
+/*
+ * Default timeslice that a context is scheduled in for, in nanoseconds.
+ *
+ * When a context has used up this amount of time across its jobs, it is
+ * scheduled out to let another run.
+ *
+ * @note the resolution is nanoseconds (ns) here, because that's the format
+ * often used by the OS.
+ */
+#define DEFAULT_JS_CTX_TIMESLICE_NS (50000000) /* 50ms */
+
+/*
+ * Perform GPU power down using only platform specific code, skipping DDK power
+ * management.
+ *
+ * If this is non-zero then kbase will avoid powering down shader cores, the
+ * tiler, and the L2 cache, instead just powering down the entire GPU through
+ * platform specific code. This may be required for certain platform
+ * integrations.
+ *
+ * Note that as this prevents kbase from powering down shader cores, this limits
+ * the available power policies to coarse_demand and always_on.
+ */
+#define PLATFORM_POWER_DOWN_ONLY (0)
+
+#endif /* _KBASE_CONFIG_DEFAULTS_H_ */
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_context.c b/drivers/gpu/arm_gpu/mali_kbase_context.c
new file mode 100644
index 000000000000..ad20e6135ba5
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_context.c
@@ -0,0 +1,362 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Base kernel context APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_dma_fence.h>
+#include <mali_kbase_ctx_sched.h>
+
+/**
+ * kbase_create_context() - Create a kernel base context.
+ * @kbdev: Kbase device
+ * @is_compat: Force creation of a 32-bit context
+ *
+ * Allocate and init a kernel base context.
+ *
+ * Return: new kbase context
+ */
+struct kbase_context *
+kbase_create_context(struct kbase_device *kbdev, bool is_compat)
+{
+ struct kbase_context *kctx;
+ int err;
+ struct page *p;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ /* zero-inited as lot of code assume it's zero'ed out on create */
+ kctx = vzalloc(sizeof(*kctx));
+
+ if (!kctx)
+ goto out;
+
+ /* creating a context is considered a disjoint event */
+ kbase_disjoint_event(kbdev);
+
+ kctx->kbdev = kbdev;
+ kctx->as_nr = KBASEP_AS_NR_INVALID;
+ atomic_set(&kctx->refcount, 0);
+ if (is_compat)
+ kbase_ctx_flag_set(kctx, KCTX_COMPAT);
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ kctx->timeline.owner_tgid = task_tgid_nr(current);
+#endif
+ atomic_set(&kctx->setup_complete, 0);
+ atomic_set(&kctx->setup_in_progress, 0);
+ spin_lock_init(&kctx->mm_update_lock);
+ kctx->process_mm = NULL;
+ atomic_set(&kctx->nonmapped_pages, 0);
+ kctx->slots_pullable = 0;
+ kctx->tgid = current->tgid;
+ kctx->pid = current->pid;
+
+ err = kbase_mem_pool_init(&kctx->mem_pool,
+ kbdev->mem_pool_max_size_default,
+ KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER,
+ kctx->kbdev,
+ &kbdev->mem_pool);
+ if (err)
+ goto free_kctx;
+
+ err = kbase_mem_pool_init(&kctx->lp_mem_pool,
+ (kbdev->mem_pool_max_size_default >> 9),
+ KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER,
+ kctx->kbdev,
+ &kbdev->lp_mem_pool);
+ if (err)
+ goto free_mem_pool;
+
+ err = kbase_mem_evictable_init(kctx);
+ if (err)
+ goto free_both_pools;
+
+ atomic_set(&kctx->used_pages, 0);
+
+ err = kbase_jd_init(kctx);
+ if (err)
+ goto deinit_evictable;
+
+ err = kbasep_js_kctx_init(kctx);
+ if (err)
+ goto free_jd; /* safe to call kbasep_js_kctx_term in this case */
+
+ err = kbase_event_init(kctx);
+ if (err)
+ goto free_jd;
+
+ atomic_set(&kctx->drain_pending, 0);
+
+ mutex_init(&kctx->reg_lock);
+
+ mutex_init(&kctx->mem_partials_lock);
+ INIT_LIST_HEAD(&kctx->mem_partials);
+
+ INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
+ spin_lock_init(&kctx->waiting_soft_jobs_lock);
+#ifdef CONFIG_KDS
+ INIT_LIST_HEAD(&kctx->waiting_kds_resource);
+#endif
+ err = kbase_dma_fence_init(kctx);
+ if (err)
+ goto free_event;
+
+ err = kbase_mmu_init(kctx);
+ if (err)
+ goto term_dma_fence;
+
+ do {
+ err = kbase_mem_pool_grow(&kctx->mem_pool,
+ MIDGARD_MMU_BOTTOMLEVEL);
+ if (err)
+ goto pgd_no_mem;
+
+ mutex_lock(&kctx->mmu_lock);
+ kctx->pgd = kbase_mmu_alloc_pgd(kctx);
+ mutex_unlock(&kctx->mmu_lock);
+ } while (!kctx->pgd);
+
+ p = kbase_mem_alloc_page(&kctx->mem_pool);
+ if (!p)
+ goto no_sink_page;
+ kctx->aliasing_sink_page = as_tagged(page_to_phys(p));
+
+ init_waitqueue_head(&kctx->event_queue);
+
+ kctx->cookies = KBASE_COOKIE_MASK;
+
+ /* Make sure page 0 is not used... */
+ err = kbase_region_tracker_init(kctx);
+ if (err)
+ goto no_region_tracker;
+
+ err = kbase_sticky_resource_init(kctx);
+ if (err)
+ goto no_sticky;
+
+ err = kbase_jit_init(kctx);
+ if (err)
+ goto no_jit;
+#ifdef CONFIG_GPU_TRACEPOINTS
+ atomic_set(&kctx->jctx.work_id, 0);
+#endif
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ atomic_set(&kctx->timeline.jd_atoms_in_flight, 0);
+#endif
+
+ kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;
+
+ mutex_init(&kctx->vinstr_cli_lock);
+
+ setup_timer(&kctx->soft_job_timeout,
+ kbasep_soft_job_timeout_worker,
+ (uintptr_t)kctx);
+
+ return kctx;
+
+no_jit:
+ kbase_gpu_vm_lock(kctx);
+ kbase_sticky_resource_term(kctx);
+ kbase_gpu_vm_unlock(kctx);
+no_sticky:
+ kbase_region_tracker_term(kctx);
+no_region_tracker:
+ kbase_mem_pool_free(&kctx->mem_pool, p, false);
+no_sink_page:
+ /* VM lock needed for the call to kbase_mmu_free_pgd */
+ kbase_gpu_vm_lock(kctx);
+ kbase_mmu_free_pgd(kctx);
+ kbase_gpu_vm_unlock(kctx);
+pgd_no_mem:
+ kbase_mmu_term(kctx);
+term_dma_fence:
+ kbase_dma_fence_term(kctx);
+free_event:
+ kbase_event_cleanup(kctx);
+free_jd:
+ /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
+ kbasep_js_kctx_term(kctx);
+ kbase_jd_exit(kctx);
+deinit_evictable:
+ kbase_mem_evictable_deinit(kctx);
+free_both_pools:
+ kbase_mem_pool_term(&kctx->lp_mem_pool);
+free_mem_pool:
+ kbase_mem_pool_term(&kctx->mem_pool);
+free_kctx:
+ vfree(kctx);
+out:
+ return NULL;
+}
+KBASE_EXPORT_SYMBOL(kbase_create_context);
+
+static void kbase_reg_pending_dtor(struct kbase_va_region *reg)
+{
+ dev_dbg(reg->kctx->kbdev->dev, "Freeing pending unmapped region\n");
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+ kfree(reg);
+}
+
+/**
+ * kbase_destroy_context - Destroy a kernel base context.
+ * @kctx: Context to destroy
+ *
+ * Calls kbase_destroy_os_context() to free OS specific structures.
+ * Will release all outstanding regions.
+ */
+void kbase_destroy_context(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev;
+ int pages;
+ unsigned long pending_regions_to_clean;
+ unsigned long flags;
+ struct page *p;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+ KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
+
+ /* Ensure the core is powered up for the destroy process */
+ /* A suspend won't happen here, because we're in a syscall from a userspace
+ * thread. */
+ kbase_pm_context_active(kbdev);
+
+ kbase_jd_zap_context(kctx);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Removing the rest of the debugfs entries here as we want to keep the
+ * atom debugfs interface alive until all atoms have completed. This
+ * is useful for debugging hung contexts. */
+ debugfs_remove_recursive(kctx->kctx_dentry);
+#endif
+
+ kbase_event_cleanup(kctx);
+
+ /*
+ * JIT must be terminated before the code below as it must be called
+ * without the region lock being held.
+ * The code above ensures no new JIT allocations can be made by
+ * by the time we get to this point of context tear down.
+ */
+ kbase_jit_term(kctx);
+
+ kbase_gpu_vm_lock(kctx);
+
+ kbase_sticky_resource_term(kctx);
+
+ /* MMU is disabled as part of scheduling out the context */
+ kbase_mmu_free_pgd(kctx);
+
+ /* drop the aliasing sink page now that it can't be mapped anymore */
+ p = phys_to_page(as_phys_addr_t(kctx->aliasing_sink_page));
+ kbase_mem_pool_free(&kctx->mem_pool, p, false);
+
+ /* free pending region setups */
+ pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
+ while (pending_regions_to_clean) {
+ unsigned int cookie = __ffs(pending_regions_to_clean);
+
+ BUG_ON(!kctx->pending_regions[cookie]);
+
+ kbase_reg_pending_dtor(kctx->pending_regions[cookie]);
+
+ kctx->pending_regions[cookie] = NULL;
+ pending_regions_to_clean &= ~(1UL << cookie);
+ }
+
+ kbase_region_tracker_term(kctx);
+ kbase_gpu_vm_unlock(kctx);
+
+ /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
+ kbasep_js_kctx_term(kctx);
+
+ kbase_jd_exit(kctx);
+
+ kbase_pm_context_idle(kbdev);
+
+ kbase_dma_fence_term(kctx);
+
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
+ kbase_ctx_sched_remove_ctx(kctx);
+ spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ kbase_mmu_term(kctx);
+
+ pages = atomic_read(&kctx->used_pages);
+ if (pages != 0)
+ dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
+
+ kbase_mem_evictable_deinit(kctx);
+ kbase_mem_pool_term(&kctx->mem_pool);
+ kbase_mem_pool_term(&kctx->lp_mem_pool);
+ WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
+
+ vfree(kctx);
+}
+KBASE_EXPORT_SYMBOL(kbase_destroy_context);
+
+/**
+ * kbase_context_set_create_flags - Set creation flags on a context
+ * @kctx: Kbase context
+ * @flags: Flags to set
+ *
+ * Return: 0 on success
+ */
+int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)
+{
+ int err = 0;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ unsigned long irq_flags;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* Validate flags */
+ if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
+
+ /* Translate the flags */
+ if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
+ kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
+
+ /* Latch the initial attributes into the Job Scheduler */
+ kbasep_js_ctx_attr_set_initial_attrs(kctx->kbdev, kctx);
+
+ spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ out:
+ return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_context_set_create_flags);
diff --git a/drivers/gpu/arm_gpu/mali_kbase_context.h b/drivers/gpu/arm_gpu/mali_kbase_context.h
new file mode 100644
index 000000000000..a3f5bb0ce0da
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_context.h
@@ -0,0 +1,90 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_CONTEXT_H_
+#define _KBASE_CONTEXT_H_
+
+#include <linux/atomic.h>
+
+
+int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags);
+
+/**
+ * kbase_ctx_flag - Check if @flag is set on @kctx
+ * @kctx: Pointer to kbase context to check
+ * @flag: Flag to check
+ *
+ * Return: true if @flag is set on @kctx, false if not.
+ */
+static inline bool kbase_ctx_flag(struct kbase_context *kctx,
+ enum kbase_context_flags flag)
+{
+ return atomic_read(&kctx->flags) & flag;
+}
+
+/**
+ * kbase_ctx_flag_clear - Clear @flag on @kctx
+ * @kctx: Pointer to kbase context
+ * @flag: Flag to clear
+ *
+ * Clear the @flag on @kctx. This is done atomically, so other flags being
+ * cleared or set at the same time will be safe.
+ *
+ * Some flags have locking requirements, check the documentation for the
+ * respective flags.
+ */
+static inline void kbase_ctx_flag_clear(struct kbase_context *kctx,
+ enum kbase_context_flags flag)
+{
+#if KERNEL_VERSION(4, 3, 0) > LINUX_VERSION_CODE
+ /*
+ * Earlier kernel versions doesn't have atomic_andnot() or
+ * atomic_and(). atomic_clear_mask() was only available on some
+ * architectures and removed on arm in v3.13 on arm and arm64.
+ *
+ * Use a compare-exchange loop to clear the flag on pre 4.3 kernels,
+ * when atomic_andnot() becomes available.
+ */
+ int old, new;
+
+ do {
+ old = atomic_read(&kctx->flags);
+ new = old & ~flag;
+
+ } while (atomic_cmpxchg(&kctx->flags, old, new) != old);
+#else
+ atomic_andnot(flag, &kctx->flags);
+#endif
+}
+
+/**
+ * kbase_ctx_flag_set - Set @flag on @kctx
+ * @kctx: Pointer to kbase context
+ * @flag: Flag to clear
+ *
+ * Set the @flag on @kctx. This is done atomically, so other flags being
+ * cleared or set at the same time will be safe.
+ *
+ * Some flags have locking requirements, check the documentation for the
+ * respective flags.
+ */
+static inline void kbase_ctx_flag_set(struct kbase_context *kctx,
+ enum kbase_context_flags flag)
+{
+ atomic_or(flag, &kctx->flags);
+}
+#endif /* _KBASE_CONTEXT_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_core_linux.c b/drivers/gpu/arm_gpu/mali_kbase_core_linux.c
new file mode 100644
index 000000000000..a2b1030f1856
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_core_linux.c
@@ -0,0 +1,4875 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_kbase_uku.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_gator.h>
+#include <mali_kbase_mem_linux.h>
+#ifdef CONFIG_MALI_DEVFREQ
+#include <linux/devfreq.h>
+#include <backend/gpu/mali_kbase_devfreq.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <ipa/mali_kbase_ipa_debugfs.h>
+#endif /* CONFIG_DEVFREQ_THERMAL */
+#endif /* CONFIG_MALI_DEVFREQ */
+#ifdef CONFIG_MALI_NO_MALI
+#include "mali_kbase_model_linux.h"
+#endif /* CONFIG_MALI_NO_MALI */
+#include "mali_kbase_mem_profile_debugfs_buf_size.h"
+#include "mali_kbase_debug_mem_view.h"
+#include "mali_kbase_mem.h"
+#include "mali_kbase_mem_pool_debugfs.h"
+#if !MALI_CUSTOMER_RELEASE
+#include "mali_kbase_regs_dump_debugfs.h"
+#endif /* !MALI_CUSTOMER_RELEASE */
+#include "mali_kbase_regs_history_debugfs.h"
+#include <mali_kbase_hwaccess_backend.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_ctx_sched.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include "mali_kbase_ioctl.h"
+
+#ifdef CONFIG_KDS
+#include <linux/kds.h>
+#include <linux/syscalls.h>
+#endif /* CONFIG_KDS */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/semaphore.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/compat.h> /* is_compat_task */
+#include <linux/mman.h>
+#include <linux/version.h>
+#include <mali_kbase_hw.h>
+#include <platform/mali_kbase_platform_common.h>
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+#include <platform/mali_kbase_platform_fake.h>
+#endif /*CONFIG_MALI_PLATFORM_FAKE */
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#include <mali_kbase_sync.h>
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+#include <linux/clk.h>
+#include <linux/delay.h>
+
+#include <mali_kbase_config.h>
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+#include <linux/pm_opp.h>
+#else
+#include <linux/opp.h>
+#endif
+
+#include <mali_kbase_tlstream.h>
+
+#include <mali_kbase_as_fault_debugfs.h>
+
+/* GPU IRQ Tags */
+#define JOB_IRQ_TAG 0
+#define MMU_IRQ_TAG 1
+#define GPU_IRQ_TAG 2
+
+#if MALI_UNIT_TEST
+static struct kbase_exported_test_data shared_kernel_test_data;
+EXPORT_SYMBOL(shared_kernel_test_data);
+#endif /* MALI_UNIT_TEST */
+
+static int kbase_dev_nr;
+
+static DEFINE_MUTEX(kbase_dev_list_lock);
+static LIST_HEAD(kbase_dev_list);
+
+#define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
+static inline void __compile_time_asserts(void)
+{
+ CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
+}
+
+static int kbase_api_handshake(struct kbase_context *kctx,
+ struct kbase_ioctl_version_check *version)
+{
+ switch (version->major) {
+ case BASE_UK_VERSION_MAJOR:
+ /* set minor to be the lowest common */
+ version->minor = min_t(int, BASE_UK_VERSION_MINOR,
+ (int)version->minor);
+ break;
+ default:
+ /* We return our actual version regardless if it
+ * matches the version returned by userspace -
+ * userspace can bail if it can't handle this
+ * version */
+ version->major = BASE_UK_VERSION_MAJOR;
+ version->minor = BASE_UK_VERSION_MINOR;
+ break;
+ }
+
+ /* save the proposed version number for later use */
+ kctx->api_version = KBASE_API_VERSION(version->major, version->minor);
+
+ return 0;
+}
+
+/**
+ * enum mali_error - Mali error codes shared with userspace
+ *
+ * This is subset of those common Mali errors that can be returned to userspace.
+ * Values of matching user and kernel space enumerators MUST be the same.
+ * MALI_ERROR_NONE is guaranteed to be 0.
+ *
+ * @MALI_ERROR_NONE: Success
+ * @MALI_ERROR_OUT_OF_GPU_MEMORY: Not used in the kernel driver
+ * @MALI_ERROR_OUT_OF_MEMORY: Memory allocation failure
+ * @MALI_ERROR_FUNCTION_FAILED: Generic error code
+ */
+enum mali_error {
+ MALI_ERROR_NONE = 0,
+ MALI_ERROR_OUT_OF_GPU_MEMORY,
+ MALI_ERROR_OUT_OF_MEMORY,
+ MALI_ERROR_FUNCTION_FAILED,
+};
+
+enum {
+ inited_mem = (1u << 0),
+ inited_js = (1u << 1),
+ inited_pm_runtime_init = (1u << 2),
+#ifdef CONFIG_MALI_DEVFREQ
+ inited_devfreq = (1u << 3),
+#endif /* CONFIG_MALI_DEVFREQ */
+ inited_tlstream = (1u << 4),
+ inited_backend_early = (1u << 5),
+ inited_backend_late = (1u << 6),
+ inited_device = (1u << 7),
+ inited_vinstr = (1u << 8),
+
+ inited_job_fault = (1u << 10),
+ inited_sysfs_group = (1u << 11),
+ inited_misc_register = (1u << 12),
+ inited_get_device = (1u << 13),
+ inited_dev_list = (1u << 14),
+ inited_debugfs = (1u << 15),
+ inited_gpu_device = (1u << 16),
+ inited_registers_map = (1u << 17),
+ inited_io_history = (1u << 18),
+ inited_power_control = (1u << 19),
+ inited_buslogger = (1u << 20),
+ inited_protected = (1u << 21),
+ inited_ctx_sched = (1u << 22)
+};
+
+
+#ifdef CONFIG_MALI_DEBUG
+#define INACTIVE_WAIT_MS (5000)
+
+void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
+{
+ kbdev->driver_inactive = inactive;
+ wake_up(&kbdev->driver_inactive_wait);
+
+ /* Wait for any running IOCTLs to complete */
+ if (inactive)
+ msleep(INACTIVE_WAIT_MS);
+}
+KBASE_EXPORT_TEST_API(kbase_set_driver_inactive);
+#endif /* CONFIG_MALI_DEBUG */
+
+/**
+ * kbase_legacy_dispatch - UKK dispatch function
+ *
+ * This is the dispatch function for the legacy UKK ioctl interface. No new
+ * ioctls should be added to this function, see kbase_ioctl instead.
+ *
+ * @kctx: The kernel context structure
+ * @args: Pointer to the data structure passed from/to user space
+ * @args_size: Size of the data structure
+ */
+static int kbase_legacy_dispatch(struct kbase_context *kctx,
+ void * const args, u32 args_size)
+{
+ struct kbase_device *kbdev;
+ union uk_header *ukh = args;
+ u32 id;
+ int ret = 0;
+
+ KBASE_DEBUG_ASSERT(ukh != NULL);
+
+ kbdev = kctx->kbdev;
+ id = ukh->id;
+ ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
+
+#ifdef CONFIG_MALI_DEBUG
+ wait_event(kbdev->driver_inactive_wait,
+ kbdev->driver_inactive == false);
+#endif /* CONFIG_MALI_DEBUG */
+
+ if (UKP_FUNC_ID_CHECK_VERSION == id) {
+ struct uku_version_check_args *version_check;
+ struct kbase_ioctl_version_check version;
+
+ if (args_size != sizeof(struct uku_version_check_args)) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ return 0;
+ }
+ version_check = (struct uku_version_check_args *)args;
+ version.minor = version_check->minor;
+ version.major = version_check->major;
+
+ kbase_api_handshake(kctx, &version);
+
+ version_check->minor = version.minor;
+ version_check->major = version.major;
+ ukh->ret = MALI_ERROR_NONE;
+ return 0;
+ }
+
+ /* block calls until version handshake */
+ if (kctx->api_version == 0)
+ return -EINVAL;
+
+ if (!atomic_read(&kctx->setup_complete)) {
+ struct kbase_uk_set_flags *kbase_set_flags;
+
+ /* setup pending, try to signal that we'll do the setup,
+ * if setup was already in progress, err this call
+ */
+ if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
+ return -EINVAL;
+
+ /* if unexpected call, will stay stuck in setup mode
+ * (is it the only call we accept?)
+ */
+ if (id != KBASE_FUNC_SET_FLAGS)
+ return -EINVAL;
+
+ kbase_set_flags = (struct kbase_uk_set_flags *)args;
+
+ /* if not matching the expected call, stay in setup mode */
+ if (sizeof(*kbase_set_flags) != args_size)
+ goto bad_size;
+
+ /* if bad flags, will stay stuck in setup mode */
+ if (kbase_context_set_create_flags(kctx,
+ kbase_set_flags->create_flags) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+
+ atomic_set(&kctx->setup_complete, 1);
+ return 0;
+ }
+
+ /* setup complete, perform normal operation */
+ switch (id) {
+ case KBASE_FUNC_MEM_JIT_INIT:
+ {
+ struct kbase_uk_mem_jit_init *jit_init = args;
+
+ if (sizeof(*jit_init) != args_size)
+ goto bad_size;
+
+ if (kbase_region_tracker_init_jit(kctx,
+ jit_init->va_pages))
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+ case KBASE_FUNC_MEM_ALLOC:
+ {
+ struct kbase_uk_mem_alloc *mem = args;
+ struct kbase_va_region *reg;
+
+ if (sizeof(*mem) != args_size)
+ goto bad_size;
+
+#if defined(CONFIG_64BIT)
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+ /* force SAME_VA if a 64-bit client */
+ mem->flags |= BASE_MEM_SAME_VA;
+ }
+#endif
+
+ reg = kbase_mem_alloc(kctx, mem->va_pages,
+ mem->commit_pages, mem->extent,
+ &mem->flags, &mem->gpu_va);
+ mem->va_alignment = 0;
+
+ if (!reg)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+ case KBASE_FUNC_MEM_IMPORT: {
+ struct kbase_uk_mem_import *mem_import = args;
+ void __user *phandle;
+
+ if (sizeof(*mem_import) != args_size)
+ goto bad_size;
+#ifdef CONFIG_COMPAT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ phandle = compat_ptr(mem_import->phandle);
+ else
+#endif
+ phandle = u64_to_user_ptr(mem_import->phandle);
+
+ if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+ if (kbase_mem_import(kctx,
+ (enum base_mem_import_type)
+ mem_import->type,
+ phandle,
+ 0,
+ &mem_import->gpu_va,
+ &mem_import->va_pages,
+ &mem_import->flags)) {
+ mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ }
+ break;
+ }
+ case KBASE_FUNC_MEM_ALIAS: {
+ struct kbase_uk_mem_alias *alias = args;
+ struct base_mem_aliasing_info __user *user_ai;
+ struct base_mem_aliasing_info *ai;
+
+ if (sizeof(*alias) != args_size)
+ goto bad_size;
+
+ if (alias->nents > 2048) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+ if (!alias->nents) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+#ifdef CONFIG_COMPAT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ user_ai = compat_ptr(alias->ai);
+ else
+#endif
+ user_ai = u64_to_user_ptr(alias->ai);
+
+ ai = vmalloc(sizeof(*ai) * alias->nents);
+
+ if (!ai) {
+ ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
+ break;
+ }
+
+ if (copy_from_user(ai, user_ai,
+ sizeof(*ai) * alias->nents)) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ goto copy_failed;
+ }
+
+ alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
+ alias->stride,
+ alias->nents, ai,
+ &alias->va_pages);
+ if (!alias->gpu_va) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ goto no_alias;
+ }
+no_alias:
+copy_failed:
+ vfree(ai);
+ break;
+ }
+ case KBASE_FUNC_MEM_COMMIT:
+ {
+ struct kbase_uk_mem_commit *commit = args;
+ int ret;
+
+ if (sizeof(*commit) != args_size)
+ goto bad_size;
+
+ ret = kbase_mem_commit(kctx, commit->gpu_addr,
+ commit->pages);
+
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ commit->result_subcode =
+ BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS;
+
+ if (ret == 0) {
+ ukh->ret = MALI_ERROR_NONE;
+ commit->result_subcode =
+ BASE_BACKING_THRESHOLD_OK;
+ } else if (ret == -ENOMEM) {
+ commit->result_subcode =
+ BASE_BACKING_THRESHOLD_ERROR_OOM;
+ }
+
+ break;
+ }
+
+ case KBASE_FUNC_MEM_QUERY:
+ {
+ struct kbase_uk_mem_query *query = args;
+
+ if (sizeof(*query) != args_size)
+ goto bad_size;
+
+ if (kbase_mem_query(kctx, query->gpu_addr,
+ query->query, &query->value) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ else
+ ukh->ret = MALI_ERROR_NONE;
+ break;
+ }
+ break;
+
+ case KBASE_FUNC_MEM_FLAGS_CHANGE:
+ {
+ struct kbase_uk_mem_flags_change *fc = args;
+
+ if (sizeof(*fc) != args_size)
+ goto bad_size;
+
+ if (kbase_mem_flags_change(kctx, fc->gpu_va,
+ fc->flags, fc->mask) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+
+ break;
+ }
+ case KBASE_FUNC_MEM_FREE:
+ {
+ struct kbase_uk_mem_free *mem = args;
+
+ if (sizeof(*mem) != args_size)
+ goto bad_size;
+
+ if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+ case KBASE_FUNC_JOB_SUBMIT:
+ {
+ struct kbase_uk_job_submit *job = args;
+ char __user *user_buf;
+
+ if (sizeof(*job) != args_size)
+ goto bad_size;
+
+#ifdef CONFIG_COMPAT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ user_buf = compat_ptr(job->addr);
+ else
+#endif
+ user_buf = u64_to_user_ptr(job->addr);
+
+ if (kbase_jd_submit(kctx, user_buf,
+ job->nr_atoms,
+ job->stride,
+ false) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+ case KBASE_FUNC_SYNC:
+ {
+ struct kbase_uk_sync_now *sn = args;
+
+ if (sizeof(*sn) != args_size)
+ goto bad_size;
+
+ if (kbase_sync_now(kctx, &sn->sset.basep_sset) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+ case KBASE_FUNC_DISJOINT_QUERY:
+ {
+ struct kbase_uk_disjoint_query *dquery = args;
+
+ if (sizeof(*dquery) != args_size)
+ goto bad_size;
+
+ /* Get the disjointness counter value. */
+ dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
+ break;
+ }
+
+ case KBASE_FUNC_POST_TERM:
+ {
+ kbase_event_close(kctx);
+ break;
+ }
+
+ case KBASE_FUNC_HWCNT_SETUP:
+ {
+ struct kbase_uk_hwcnt_setup *setup = args;
+
+ if (sizeof(*setup) != args_size)
+ goto bad_size;
+
+ mutex_lock(&kctx->vinstr_cli_lock);
+ if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
+ &kctx->vinstr_cli, setup) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ mutex_unlock(&kctx->vinstr_cli_lock);
+ break;
+ }
+
+ case KBASE_FUNC_HWCNT_DUMP:
+ {
+ /* args ignored */
+ mutex_lock(&kctx->vinstr_cli_lock);
+ if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
+ BASE_HWCNT_READER_EVENT_MANUAL) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ mutex_unlock(&kctx->vinstr_cli_lock);
+ break;
+ }
+
+ case KBASE_FUNC_HWCNT_CLEAR:
+ {
+ /* args ignored */
+ mutex_lock(&kctx->vinstr_cli_lock);
+ if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ mutex_unlock(&kctx->vinstr_cli_lock);
+ break;
+ }
+
+ case KBASE_FUNC_HWCNT_READER_SETUP:
+ {
+ struct kbase_uk_hwcnt_reader_setup *setup = args;
+
+ if (sizeof(*setup) != args_size)
+ goto bad_size;
+
+ mutex_lock(&kctx->vinstr_cli_lock);
+ if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
+ setup) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ mutex_unlock(&kctx->vinstr_cli_lock);
+ break;
+ }
+
+ case KBASE_FUNC_GPU_PROPS_REG_DUMP:
+ {
+ struct kbase_uk_gpuprops *setup = args;
+
+ if (sizeof(*setup) != args_size)
+ goto bad_size;
+
+ if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+ case KBASE_FUNC_FIND_CPU_OFFSET:
+ {
+ struct kbase_uk_find_cpu_offset *find = args;
+
+ if (sizeof(*find) != args_size)
+ goto bad_size;
+
+ if (find->gpu_addr & ~PAGE_MASK) {
+ dev_warn(kbdev->dev, "kbase_legacy_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
+ goto out_bad;
+ }
+
+ if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ } else {
+ int err;
+
+ err = kbasep_find_enclosing_cpu_mapping_offset(
+ kctx,
+ find->cpu_addr,
+ find->size,
+ &find->offset);
+
+ if (err)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ }
+ break;
+ }
+ case KBASE_FUNC_GET_VERSION:
+ {
+ struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
+
+ if (sizeof(*get_version) != args_size)
+ goto bad_size;
+
+ /* version buffer size check is made in compile time assert */
+ memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
+ get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
+ break;
+ }
+
+ case KBASE_FUNC_STREAM_CREATE:
+ {
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
+
+ if (sizeof(*screate) != args_size)
+ goto bad_size;
+
+ if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
+ /* not NULL terminated */
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+ if (kbase_sync_fence_stream_create(screate->name,
+ &screate->fd) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ else
+ ukh->ret = MALI_ERROR_NONE;
+#else /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+ break;
+ }
+ case KBASE_FUNC_FENCE_VALIDATE:
+ {
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
+
+ if (sizeof(*fence_validate) != args_size)
+ goto bad_size;
+
+ if (kbase_sync_fence_validate(fence_validate->fd) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ else
+ ukh->ret = MALI_ERROR_NONE;
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+ break;
+ }
+
+ case KBASE_FUNC_SET_TEST_DATA:
+ {
+#if MALI_UNIT_TEST
+ struct kbase_uk_set_test_data *set_data = args;
+
+ shared_kernel_test_data = set_data->test_data;
+ shared_kernel_test_data.kctx = (uintptr_t)kctx;
+ shared_kernel_test_data.mm = (uintptr_t)current->mm;
+ ukh->ret = MALI_ERROR_NONE;
+#endif /* MALI_UNIT_TEST */
+ break;
+ }
+
+ case KBASE_FUNC_INJECT_ERROR:
+ {
+#ifdef CONFIG_MALI_ERROR_INJECT
+ unsigned long flags;
+ struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
+
+ /*mutex lock */
+ spin_lock_irqsave(&kbdev->reg_op_lock, flags);
+ if (job_atom_inject_error(&params) != 0)
+ ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
+ else
+ ukh->ret = MALI_ERROR_NONE;
+ spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
+ /*mutex unlock */
+#endif /* CONFIG_MALI_ERROR_INJECT */
+ break;
+ }
+
+ case KBASE_FUNC_MODEL_CONTROL:
+ {
+#ifdef CONFIG_MALI_NO_MALI
+ unsigned long flags;
+ struct kbase_model_control_params params =
+ ((struct kbase_uk_model_control_params *)args)->params;
+
+ /*mutex lock */
+ spin_lock_irqsave(&kbdev->reg_op_lock, flags);
+ if (gpu_model_control(kbdev->model, &params) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ else
+ ukh->ret = MALI_ERROR_NONE;
+ spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
+ /*mutex unlock */
+#endif /* CONFIG_MALI_NO_MALI */
+ break;
+ }
+
+ case KBASE_FUNC_GET_PROFILING_CONTROLS:
+ {
+ struct kbase_uk_profiling_controls *controls =
+ (struct kbase_uk_profiling_controls *)args;
+ u32 i;
+
+ if (sizeof(*controls) != args_size)
+ goto bad_size;
+
+ for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
+ controls->profiling_controls[i] =
+ kbdev->kbase_profiling_controls[i];
+
+ break;
+ }
+
+ /* used only for testing purposes; these controls are to be set by gator through gator API */
+ case KBASE_FUNC_SET_PROFILING_CONTROLS:
+ {
+ struct kbase_uk_profiling_controls *controls =
+ (struct kbase_uk_profiling_controls *)args;
+ u32 i;
+
+ if (sizeof(*controls) != args_size)
+ goto bad_size;
+
+ for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
+ _mali_profiling_control(i, controls->profiling_controls[i]);
+
+ break;
+ }
+
+ case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
+ {
+ struct kbase_uk_debugfs_mem_profile_add *add_data =
+ (struct kbase_uk_debugfs_mem_profile_add *)args;
+ char *buf;
+ char __user *user_buf;
+
+ if (sizeof(*add_data) != args_size)
+ goto bad_size;
+
+ if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
+ dev_err(kbdev->dev, "buffer too big\n");
+ goto out_bad;
+ }
+
+#ifdef CONFIG_COMPAT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ user_buf = compat_ptr(add_data->buf);
+ else
+#endif
+ user_buf = u64_to_user_ptr(add_data->buf);
+
+ buf = kmalloc(add_data->len, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf))
+ goto out_bad;
+
+ if (0 != copy_from_user(buf, user_buf, add_data->len)) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ kfree(buf);
+ goto out_bad;
+ }
+
+ if (kbasep_mem_profile_debugfs_insert(kctx, buf,
+ add_data->len)) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ goto out_bad;
+ }
+
+ break;
+ }
+
+#ifdef CONFIG_MALI_NO_MALI
+ case KBASE_FUNC_SET_PRFCNT_VALUES:
+ {
+
+ struct kbase_uk_prfcnt_values *params =
+ ((struct kbase_uk_prfcnt_values *)args);
+ gpu_model_set_dummy_prfcnt_sample(params->data,
+ params->size);
+
+ break;
+ }
+#endif /* CONFIG_MALI_NO_MALI */
+#ifdef BASE_LEGACY_UK10_4_SUPPORT
+ case KBASE_FUNC_TLSTREAM_ACQUIRE_V10_4:
+ {
+ struct kbase_uk_tlstream_acquire_v10_4 *tlstream_acquire
+ = args;
+ int ret;
+
+ if (sizeof(*tlstream_acquire) != args_size)
+ goto bad_size;
+
+ ret = kbase_tlstream_acquire(
+ kctx, 0);
+ if (ret < 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ else
+ tlstream_acquire->fd = ret;
+ break;
+ }
+#endif /* BASE_LEGACY_UK10_4_SUPPORT */
+ case KBASE_FUNC_TLSTREAM_ACQUIRE:
+ {
+ struct kbase_uk_tlstream_acquire *tlstream_acquire =
+ args;
+ int ret;
+
+ if (sizeof(*tlstream_acquire) != args_size)
+ goto bad_size;
+
+ if (tlstream_acquire->flags & ~BASE_TLSTREAM_FLAGS_MASK)
+ goto out_bad;
+
+ ret = kbase_tlstream_acquire(
+ kctx, tlstream_acquire->flags);
+ if (ret < 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ else
+ tlstream_acquire->fd = ret;
+ break;
+ }
+ case KBASE_FUNC_TLSTREAM_FLUSH:
+ {
+ struct kbase_uk_tlstream_flush *tlstream_flush =
+ args;
+
+ if (sizeof(*tlstream_flush) != args_size)
+ goto bad_size;
+
+ kbase_tlstream_flush_streams();
+ break;
+ }
+#if MALI_UNIT_TEST
+ case KBASE_FUNC_TLSTREAM_TEST:
+ {
+ struct kbase_uk_tlstream_test *tlstream_test = args;
+
+ if (sizeof(*tlstream_test) != args_size)
+ goto bad_size;
+
+ kbase_tlstream_test(
+ tlstream_test->tpw_count,
+ tlstream_test->msg_delay,
+ tlstream_test->msg_count,
+ tlstream_test->aux_msg);
+ break;
+ }
+ case KBASE_FUNC_TLSTREAM_STATS:
+ {
+ struct kbase_uk_tlstream_stats *tlstream_stats = args;
+
+ if (sizeof(*tlstream_stats) != args_size)
+ goto bad_size;
+
+ kbase_tlstream_stats(
+ &tlstream_stats->bytes_collected,
+ &tlstream_stats->bytes_generated);
+ break;
+ }
+#endif /* MALI_UNIT_TEST */
+
+ case KBASE_FUNC_GET_CONTEXT_ID:
+ {
+ struct kbase_uk_context_id *info = args;
+
+ info->id = kctx->id;
+ break;
+ }
+
+ case KBASE_FUNC_SOFT_EVENT_UPDATE:
+ {
+ struct kbase_uk_soft_event_update *update = args;
+
+ if (sizeof(*update) != args_size)
+ goto bad_size;
+
+ if (((update->new_status != BASE_JD_SOFT_EVENT_SET) &&
+ (update->new_status != BASE_JD_SOFT_EVENT_RESET)) ||
+ (update->flags != 0))
+ goto out_bad;
+
+ if (kbase_soft_event_update(kctx, update->evt,
+ update->new_status))
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+
+ break;
+ }
+
+ default:
+ dev_err(kbdev->dev, "unknown ioctl %u\n", id);
+ goto out_bad;
+ }
+
+ return ret;
+
+ bad_size:
+ dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
+ out_bad:
+ return -EINVAL;
+}
+
+static struct kbase_device *to_kbase_device(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+
+static int assign_irqs(struct platform_device *pdev)
+{
+ struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+ int i;
+
+ if (!kbdev)
+ return -ENODEV;
+
+ /* 3 IRQ resources */
+ for (i = 0; i < 3; i++) {
+ struct resource *irq_res;
+ int irqtag;
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (!irq_res) {
+ dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
+ return -ENOENT;
+ }
+
+#ifdef CONFIG_OF
+ if (!strncmp(irq_res->name, "JOB", 4)) {
+ irqtag = JOB_IRQ_TAG;
+ } else if (!strncmp(irq_res->name, "MMU", 4)) {
+ irqtag = MMU_IRQ_TAG;
+ } else if (!strncmp(irq_res->name, "GPU", 4)) {
+ irqtag = GPU_IRQ_TAG;
+ } else {
+ dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
+ irq_res->name);
+ return -EINVAL;
+ }
+#else
+ irqtag = i;
+#endif /* CONFIG_OF */
+ kbdev->irqs[irqtag].irq = irq_res->start;
+ kbdev->irqs[irqtag].flags = irq_res->flags & IRQF_TRIGGER_MASK;
+ }
+
+ return 0;
+}
+
+/*
+ * API to acquire device list mutex and
+ * return pointer to the device list head
+ */
+const struct list_head *kbase_dev_list_get(void)
+{
+ mutex_lock(&kbase_dev_list_lock);
+ return &kbase_dev_list;
+}
+KBASE_EXPORT_TEST_API(kbase_dev_list_get);
+
+/* API to release the device list mutex */
+void kbase_dev_list_put(const struct list_head *dev_list)
+{
+ mutex_unlock(&kbase_dev_list_lock);
+}
+KBASE_EXPORT_TEST_API(kbase_dev_list_put);
+
+/* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
+struct kbase_device *kbase_find_device(int minor)
+{
+ struct kbase_device *kbdev = NULL;
+ struct list_head *entry;
+ const struct list_head *dev_list = kbase_dev_list_get();
+
+ list_for_each(entry, dev_list) {
+ struct kbase_device *tmp;
+
+ tmp = list_entry(entry, struct kbase_device, entry);
+ if (tmp->mdev.minor == minor || minor == -1) {
+ kbdev = tmp;
+ get_device(kbdev->dev);
+ break;
+ }
+ }
+ kbase_dev_list_put(dev_list);
+
+ return kbdev;
+}
+EXPORT_SYMBOL(kbase_find_device);
+
+void kbase_release_device(struct kbase_device *kbdev)
+{
+ put_device(kbdev->dev);
+}
+EXPORT_SYMBOL(kbase_release_device);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && \
+ !(LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 28) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+/*
+ * Older versions, before v4.6, of the kernel doesn't have
+ * kstrtobool_from_user(), except longterm 4.4.y which had it added in 4.4.28
+ */
+static int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
+{
+ char buf[32];
+
+ count = min(sizeof(buf), count);
+
+ if (copy_from_user(buf, s, count))
+ return -EFAULT;
+ buf[count] = '\0';
+
+ return strtobool(buf, res);
+}
+#endif
+
+static ssize_t write_ctx_infinite_cache(struct file *f, const char __user *ubuf, size_t size, loff_t *off)
+{
+ struct kbase_context *kctx = f->private_data;
+ int err;
+ bool value;
+
+ err = kstrtobool_from_user(ubuf, size, &value);
+ if (err)
+ return err;
+
+ if (value)
+ kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
+ else
+ kbase_ctx_flag_clear(kctx, KCTX_INFINITE_CACHE);
+
+ return size;
+}
+
+static ssize_t read_ctx_infinite_cache(struct file *f, char __user *ubuf, size_t size, loff_t *off)
+{
+ struct kbase_context *kctx = f->private_data;
+ char buf[32];
+ int count;
+ bool value;
+
+ value = kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE);
+
+ count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
+
+ return simple_read_from_buffer(ubuf, size, off, buf, count);
+}
+
+static const struct file_operations kbase_infinite_cache_fops = {
+ .open = simple_open,
+ .write = write_ctx_infinite_cache,
+ .read = read_ctx_infinite_cache,
+};
+
+static int kbase_open(struct inode *inode, struct file *filp)
+{
+ struct kbase_device *kbdev = NULL;
+ struct kbase_context *kctx;
+ int ret = 0;
+#ifdef CONFIG_DEBUG_FS
+ char kctx_name[64];
+#endif
+
+ kbdev = kbase_find_device(iminor(inode));
+
+ if (!kbdev)
+ return -ENODEV;
+
+ kctx = kbase_create_context(kbdev, is_compat_task());
+ if (!kctx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ init_waitqueue_head(&kctx->event_queue);
+ filp->private_data = kctx;
+ kctx->filp = filp;
+
+ if (kbdev->infinite_cache_active_default)
+ kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
+
+#ifdef CONFIG_DEBUG_FS
+ snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
+
+ kctx->kctx_dentry = debugfs_create_dir(kctx_name,
+ kbdev->debugfs_ctx_directory);
+
+ if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry,
+ kctx, &kbase_infinite_cache_fops);
+
+ mutex_init(&kctx->mem_profile_lock);
+
+ kbasep_jd_debugfs_ctx_init(kctx);
+ kbase_debug_mem_view_init(filp);
+
+ kbase_debug_job_fault_context_init(kctx);
+
+ kbase_mem_pool_debugfs_init(kctx->kctx_dentry, &kctx->mem_pool);
+
+ kbase_jit_debugfs_init(kctx);
+#endif /* CONFIG_DEBUG_FS */
+
+ dev_dbg(kbdev->dev, "created base context\n");
+
+ {
+ struct kbasep_kctx_list_element *element;
+
+ element = kzalloc(sizeof(*element), GFP_KERNEL);
+ if (element) {
+ mutex_lock(&kbdev->kctx_list_lock);
+ element->kctx = kctx;
+ list_add(&element->link, &kbdev->kctx_list);
+ KBASE_TLSTREAM_TL_NEW_CTX(
+ element->kctx,
+ (u32)(element->kctx->id),
+ (u32)(element->kctx->tgid));
+ mutex_unlock(&kbdev->kctx_list_lock);
+ } else {
+ /* we don't treat this as a fail - just warn about it */
+ dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
+ }
+ }
+ return 0;
+
+ out:
+ kbase_release_device(kbdev);
+ return ret;
+}
+
+static int kbase_release(struct inode *inode, struct file *filp)
+{
+ struct kbase_context *kctx = filp->private_data;
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct kbasep_kctx_list_element *element, *tmp;
+ bool found_element = false;
+
+ KBASE_TLSTREAM_TL_DEL_CTX(kctx);
+
+#ifdef CONFIG_DEBUG_FS
+ kbasep_mem_profile_debugfs_remove(kctx);
+ kbase_debug_job_fault_context_term(kctx);
+#endif
+
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
+ if (element->kctx == kctx) {
+ list_del(&element->link);
+ kfree(element);
+ found_element = true;
+ }
+ }
+ mutex_unlock(&kbdev->kctx_list_lock);
+ if (!found_element)
+ dev_warn(kbdev->dev, "kctx not in kctx_list\n");
+
+ filp->private_data = NULL;
+
+ mutex_lock(&kctx->vinstr_cli_lock);
+ /* If this client was performing hwcnt dumping and did not explicitly
+ * detach itself, remove it from the vinstr core now */
+ if (kctx->vinstr_cli) {
+ struct kbase_uk_hwcnt_setup setup;
+
+ setup.dump_buffer = 0llu;
+ kbase_vinstr_legacy_hwc_setup(
+ kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
+ }
+ mutex_unlock(&kctx->vinstr_cli_lock);
+
+ kbase_destroy_context(kctx);
+
+ dev_dbg(kbdev->dev, "deleted base context\n");
+ kbase_release_device(kbdev);
+ return 0;
+}
+
+#define CALL_MAX_SIZE 536
+
+static long kbase_legacy_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull }; /* alignment fixup */
+ u32 size = _IOC_SIZE(cmd);
+ struct kbase_context *kctx = filp->private_data;
+
+ if (size > CALL_MAX_SIZE)
+ return -ENOTTY;
+
+ if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
+ dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
+ return -EFAULT;
+ }
+
+ if (kbase_legacy_dispatch(kctx, &msg, size) != 0)
+ return -EFAULT;
+
+ if (0 != copy_to_user((void __user *)arg, &msg, size)) {
+ dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int kbase_api_set_flags(struct kbase_context *kctx,
+ struct kbase_ioctl_set_flags *flags)
+{
+ int err;
+
+ /* setup pending, try to signal that we'll do the setup,
+ * if setup was already in progress, err this call
+ */
+ if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
+ return -EINVAL;
+
+ err = kbase_context_set_create_flags(kctx, flags->create_flags);
+ /* if bad flags, will stay stuck in setup mode */
+ if (err)
+ return err;
+
+ atomic_set(&kctx->setup_complete, 1);
+ return 0;
+}
+
+static int kbase_api_job_submit(struct kbase_context *kctx,
+ struct kbase_ioctl_job_submit *submit)
+{
+ return kbase_jd_submit(kctx, u64_to_user_ptr(submit->addr),
+ submit->nr_atoms,
+ submit->stride, false);
+}
+
+static int kbase_api_get_gpuprops(struct kbase_context *kctx,
+ struct kbase_ioctl_get_gpuprops *get_props)
+{
+ struct kbase_gpu_props *kprops = &kctx->kbdev->gpu_props;
+ int err;
+
+ if (get_props->flags != 0) {
+ dev_err(kctx->kbdev->dev, "Unsupported flags to get_gpuprops");
+ return -EINVAL;
+ }
+
+ if (get_props->size == 0)
+ return kprops->prop_buffer_size;
+ if (get_props->size < kprops->prop_buffer_size)
+ return -EINVAL;
+
+ err = copy_to_user(u64_to_user_ptr(get_props->buffer),
+ kprops->prop_buffer,
+ kprops->prop_buffer_size);
+ if (err)
+ return -EFAULT;
+ return kprops->prop_buffer_size;
+}
+
+static int kbase_api_post_term(struct kbase_context *kctx)
+{
+ kbase_event_close(kctx);
+ return 0;
+}
+
+static int kbase_api_mem_alloc(struct kbase_context *kctx,
+ union kbase_ioctl_mem_alloc *alloc)
+{
+ struct kbase_va_region *reg;
+ u64 flags = alloc->in.flags;
+ u64 gpu_va;
+
+#if defined(CONFIG_64BIT)
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+ /* force SAME_VA if a 64-bit client */
+ flags |= BASE_MEM_SAME_VA;
+ }
+#endif
+
+ reg = kbase_mem_alloc(kctx, alloc->in.va_pages,
+ alloc->in.commit_pages,
+ alloc->in.extent,
+ &flags, &gpu_va);
+
+ if (!reg)
+ return -ENOMEM;
+
+ alloc->out.flags = flags;
+ alloc->out.gpu_va = gpu_va;
+
+ return 0;
+}
+
+static int kbase_api_mem_query(struct kbase_context *kctx,
+ union kbase_ioctl_mem_query *query)
+{
+ return kbase_mem_query(kctx, query->in.gpu_addr,
+ query->in.query, &query->out.value);
+}
+
+static int kbase_api_mem_free(struct kbase_context *kctx,
+ struct kbase_ioctl_mem_free *free)
+{
+ return kbase_mem_free(kctx, free->gpu_addr);
+}
+
+static int kbase_api_hwcnt_reader_setup(struct kbase_context *kctx,
+ struct kbase_ioctl_hwcnt_reader_setup *setup)
+{
+ int ret;
+ struct kbase_uk_hwcnt_reader_setup args = {
+ .buffer_count = setup->buffer_count,
+ .jm_bm = setup->jm_bm,
+ .shader_bm = setup->shader_bm,
+ .tiler_bm = setup->tiler_bm,
+ .mmu_l2_bm = setup->mmu_l2_bm
+ };
+
+ mutex_lock(&kctx->vinstr_cli_lock);
+ ret = kbase_vinstr_hwcnt_reader_setup(kctx->kbdev->vinstr_ctx, &args);
+ mutex_unlock(&kctx->vinstr_cli_lock);
+
+ if (ret)
+ return ret;
+ return args.fd;
+}
+
+static int kbase_api_hwcnt_enable(struct kbase_context *kctx,
+ struct kbase_ioctl_hwcnt_enable *enable)
+{
+ int ret;
+ struct kbase_uk_hwcnt_setup args = {
+ .dump_buffer = enable->dump_buffer,
+ .jm_bm = enable->jm_bm,
+ .shader_bm = enable->shader_bm,
+ .tiler_bm = enable->tiler_bm,
+ .mmu_l2_bm = enable->mmu_l2_bm
+ };
+
+ mutex_lock(&kctx->vinstr_cli_lock);
+ ret = kbase_vinstr_legacy_hwc_setup(kctx->kbdev->vinstr_ctx,
+ &kctx->vinstr_cli, &args);
+ mutex_unlock(&kctx->vinstr_cli_lock);
+
+ return ret;
+}
+
+static int kbase_api_hwcnt_dump(struct kbase_context *kctx)
+{
+ int ret;
+
+ mutex_lock(&kctx->vinstr_cli_lock);
+ ret = kbase_vinstr_hwc_dump(kctx->vinstr_cli,
+ BASE_HWCNT_READER_EVENT_MANUAL);
+ mutex_unlock(&kctx->vinstr_cli_lock);
+
+ return ret;
+}
+
+static int kbase_api_hwcnt_clear(struct kbase_context *kctx)
+{
+ int ret;
+
+ mutex_lock(&kctx->vinstr_cli_lock);
+ ret = kbase_vinstr_hwc_clear(kctx->vinstr_cli);
+ mutex_unlock(&kctx->vinstr_cli_lock);
+
+ return ret;
+}
+
+static int kbase_api_disjoint_query(struct kbase_context *kctx,
+ struct kbase_ioctl_disjoint_query *query)
+{
+ query->counter = kbase_disjoint_event_get(kctx->kbdev);
+
+ return 0;
+}
+
+static int kbase_api_get_ddk_version(struct kbase_context *kctx,
+ struct kbase_ioctl_get_ddk_version *version)
+{
+ int ret;
+ int len = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
+
+ if (version->version_buffer == 0)
+ return len;
+
+ if (version->size < len)
+ return -EOVERFLOW;
+
+ ret = copy_to_user(u64_to_user_ptr(version->version_buffer),
+ KERNEL_SIDE_DDK_VERSION_STRING,
+ sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
+
+ if (ret)
+ return -EFAULT;
+
+ return len;
+}
+
+static int kbase_api_mem_jit_init(struct kbase_context *kctx,
+ struct kbase_ioctl_mem_jit_init *jit_init)
+{
+ return kbase_region_tracker_init_jit(kctx, jit_init->va_pages);
+}
+
+static int kbase_api_mem_sync(struct kbase_context *kctx,
+ struct kbase_ioctl_mem_sync *sync)
+{
+ struct basep_syncset sset = {
+ .mem_handle.basep.handle = sync->handle,
+ .user_addr = sync->user_addr,
+ .size = sync->size,
+ .type = sync->type
+ };
+
+ return kbase_sync_now(kctx, &sset);
+}
+
+static int kbase_api_mem_find_cpu_offset(struct kbase_context *kctx,
+ union kbase_ioctl_mem_find_cpu_offset *find)
+{
+ return kbasep_find_enclosing_cpu_mapping_offset(
+ kctx,
+ find->in.cpu_addr,
+ find->in.size,
+ &find->out.offset);
+}
+
+static int kbase_api_get_context_id(struct kbase_context *kctx,
+ struct kbase_ioctl_get_context_id *info)
+{
+ info->id = kctx->id;
+
+ return 0;
+}
+
+static int kbase_api_tlstream_acquire(struct kbase_context *kctx,
+ struct kbase_ioctl_tlstream_acquire *acquire)
+{
+ return kbase_tlstream_acquire(kctx, acquire->flags);
+}
+
+static int kbase_api_tlstream_flush(struct kbase_context *kctx)
+{
+ kbase_tlstream_flush_streams();
+
+ return 0;
+}
+
+static int kbase_api_mem_commit(struct kbase_context *kctx,
+ struct kbase_ioctl_mem_commit *commit)
+{
+ return kbase_mem_commit(kctx, commit->gpu_addr, commit->pages);
+}
+
+static int kbase_api_mem_alias(struct kbase_context *kctx,
+ union kbase_ioctl_mem_alias *alias)
+{
+ struct base_mem_aliasing_info *ai;
+ u64 flags;
+ int err;
+
+ if (alias->in.nents == 0 || alias->in.nents > 2048)
+ return -EINVAL;
+
+ ai = vmalloc(sizeof(*ai) * alias->in.nents);
+ if (!ai)
+ return -ENOMEM;
+
+ err = copy_from_user(ai,
+ u64_to_user_ptr(alias->in.aliasing_info),
+ sizeof(*ai) * alias->in.nents);
+ if (err) {
+ vfree(ai);
+ return -EFAULT;
+ }
+
+ flags = alias->in.flags;
+
+ alias->out.gpu_va = kbase_mem_alias(kctx, &flags,
+ alias->in.stride, alias->in.nents,
+ ai, &alias->out.va_pages);
+
+ alias->out.flags = flags;
+
+ vfree(ai);
+
+ if (alias->out.gpu_va == 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int kbase_api_mem_import(struct kbase_context *kctx,
+ union kbase_ioctl_mem_import *import)
+{
+ int ret;
+ u64 flags = import->in.flags;
+
+ ret = kbase_mem_import(kctx,
+ import->in.type,
+ u64_to_user_ptr(import->in.phandle),
+ import->in.padding,
+ &import->out.gpu_va,
+ &import->out.va_pages,
+ &flags);
+
+ import->out.flags = flags;
+
+ return ret;
+}
+
+static int kbase_api_mem_flags_change(struct kbase_context *kctx,
+ struct kbase_ioctl_mem_flags_change *change)
+{
+ return kbase_mem_flags_change(kctx, change->gpu_va,
+ change->flags, change->mask);
+}
+
+static int kbase_api_stream_create(struct kbase_context *kctx,
+ struct kbase_ioctl_stream_create *stream)
+{
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ int fd, ret;
+
+ /* Name must be NULL-terminated and padded with NULLs, so check last
+ * character is NULL
+ */
+ if (stream->name[sizeof(stream->name)-1] != 0)
+ return -EINVAL;
+
+ ret = kbase_sync_fence_stream_create(stream->name, &fd);
+
+ if (ret)
+ return ret;
+ return fd;
+#else
+ return -ENOENT;
+#endif
+}
+
+static int kbase_api_fence_validate(struct kbase_context *kctx,
+ struct kbase_ioctl_fence_validate *validate)
+{
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ return kbase_sync_fence_validate(validate->fd);
+#else
+ return -ENOENT;
+#endif
+}
+
+static int kbase_api_get_profiling_controls(struct kbase_context *kctx,
+ struct kbase_ioctl_get_profiling_controls *controls)
+{
+ int ret;
+
+ if (controls->count > (FBDUMP_CONTROL_MAX - FBDUMP_CONTROL_MIN))
+ return -EINVAL;
+
+ ret = copy_to_user(u64_to_user_ptr(controls->buffer),
+ &kctx->kbdev->kbase_profiling_controls[
+ FBDUMP_CONTROL_MIN],
+ controls->count * sizeof(u32));
+
+ if (ret)
+ return -EFAULT;
+ return 0;
+}
+
+static int kbase_api_mem_profile_add(struct kbase_context *kctx,
+ struct kbase_ioctl_mem_profile_add *data)
+{
+ char *buf;
+ int err;
+
+ if (data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
+ dev_err(kctx->kbdev->dev, "mem_profile_add: buffer too big\n");
+ return -EINVAL;
+ }
+
+ buf = kmalloc(data->len, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf))
+ return -ENOMEM;
+
+ err = copy_from_user(buf, u64_to_user_ptr(data->buffer),
+ data->len);
+ if (err) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ return kbasep_mem_profile_debugfs_insert(kctx, buf, data->len);
+}
+
+static int kbase_api_soft_event_update(struct kbase_context *kctx,
+ struct kbase_ioctl_soft_event_update *update)
+{
+ if (update->flags != 0)
+ return -EINVAL;
+
+ return kbase_soft_event_update(kctx, update->event, update->new_status);
+}
+
+#if MALI_UNIT_TEST
+static int kbase_api_tlstream_test(struct kbase_context *kctx,
+ struct kbase_ioctl_tlstream_test *test)
+{
+ kbase_tlstream_test(
+ test->tpw_count,
+ test->msg_delay,
+ test->msg_count,
+ test->aux_msg);
+
+ return 0;
+}
+
+static int kbase_api_tlstream_stats(struct kbase_context *kctx,
+ struct kbase_ioctl_tlstream_stats *stats)
+{
+ kbase_tlstream_stats(
+ &stats->bytes_collected,
+ &stats->bytes_generated);
+
+ return 0;
+}
+#endif /* MALI_UNIT_TEST */
+
+#define KBASE_HANDLE_IOCTL(cmd, function) \
+ case cmd: \
+ do { \
+ BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_NONE); \
+ return function(kctx); \
+ } while (0)
+
+#define KBASE_HANDLE_IOCTL_IN(cmd, function, type) \
+ case cmd: \
+ do { \
+ type param; \
+ int err; \
+ BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_WRITE); \
+ BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
+ err = copy_from_user(&param, uarg, sizeof(param)); \
+ if (err) \
+ return -EFAULT; \
+ return function(kctx, &param); \
+ } while (0)
+
+#define KBASE_HANDLE_IOCTL_OUT(cmd, function, type) \
+ case cmd: \
+ do { \
+ type param; \
+ int ret, err; \
+ BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_READ); \
+ BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
+ ret = function(kctx, &param); \
+ err = copy_to_user(uarg, &param, sizeof(param)); \
+ if (err) \
+ return -EFAULT; \
+ return ret; \
+ } while (0)
+
+#define KBASE_HANDLE_IOCTL_INOUT(cmd, function, type) \
+ case cmd: \
+ do { \
+ type param; \
+ int ret, err; \
+ BUILD_BUG_ON(_IOC_DIR(cmd) != (_IOC_WRITE|_IOC_READ)); \
+ BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
+ err = copy_from_user(&param, uarg, sizeof(param)); \
+ if (err) \
+ return -EFAULT; \
+ ret = function(kctx, &param); \
+ err = copy_to_user(uarg, &param, sizeof(param)); \
+ if (err) \
+ return -EFAULT; \
+ return ret; \
+ } while (0)
+
+static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct kbase_context *kctx = filp->private_data;
+ struct kbase_device *kbdev = kctx->kbdev;
+ void __user *uarg = (void __user *)arg;
+
+ /* The UK ioctl values overflow the cmd field causing the type to be
+ * incremented
+ */
+ if (_IOC_TYPE(cmd) == LINUX_UK_BASE_MAGIC+2)
+ return kbase_legacy_ioctl(filp, cmd, arg);
+
+ /* The UK version check IOCTL doesn't overflow the cmd field, so is
+ * handled separately here
+ */
+ if (cmd == _IOC(_IOC_READ|_IOC_WRITE, LINUX_UK_BASE_MAGIC,
+ UKP_FUNC_ID_CHECK_VERSION,
+ sizeof(struct uku_version_check_args)))
+ return kbase_legacy_ioctl(filp, cmd, arg);
+
+ /* Only these ioctls are available until setup is complete */
+ switch (cmd) {
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK,
+ kbase_api_handshake,
+ struct kbase_ioctl_version_check);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_FLAGS,
+ kbase_api_set_flags,
+ struct kbase_ioctl_set_flags);
+ }
+
+ /* Block call until version handshake and setup is complete */
+ if (kctx->api_version == 0 || !atomic_read(&kctx->setup_complete))
+ return -EINVAL;
+
+ /* Normal ioctls */
+ switch (cmd) {
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_JOB_SUBMIT,
+ kbase_api_job_submit,
+ struct kbase_ioctl_job_submit);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_GPUPROPS,
+ kbase_api_get_gpuprops,
+ struct kbase_ioctl_get_gpuprops);
+ KBASE_HANDLE_IOCTL(KBASE_IOCTL_POST_TERM,
+ kbase_api_post_term);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC,
+ kbase_api_mem_alloc,
+ union kbase_ioctl_mem_alloc);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_QUERY,
+ kbase_api_mem_query,
+ union kbase_ioctl_mem_query);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FREE,
+ kbase_api_mem_free,
+ struct kbase_ioctl_mem_free);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_READER_SETUP,
+ kbase_api_hwcnt_reader_setup,
+ struct kbase_ioctl_hwcnt_reader_setup);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_ENABLE,
+ kbase_api_hwcnt_enable,
+ struct kbase_ioctl_hwcnt_enable);
+ KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_DUMP,
+ kbase_api_hwcnt_dump);
+ KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_CLEAR,
+ kbase_api_hwcnt_clear);
+ KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_DISJOINT_QUERY,
+ kbase_api_disjoint_query,
+ struct kbase_ioctl_disjoint_query);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_DDK_VERSION,
+ kbase_api_get_ddk_version,
+ struct kbase_ioctl_get_ddk_version);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT,
+ kbase_api_mem_jit_init,
+ struct kbase_ioctl_mem_jit_init);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_SYNC,
+ kbase_api_mem_sync,
+ struct kbase_ioctl_mem_sync);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_CPU_OFFSET,
+ kbase_api_mem_find_cpu_offset,
+ union kbase_ioctl_mem_find_cpu_offset);
+ KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_GET_CONTEXT_ID,
+ kbase_api_get_context_id,
+ struct kbase_ioctl_get_context_id);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_ACQUIRE,
+ kbase_api_tlstream_acquire,
+ struct kbase_ioctl_tlstream_acquire);
+ KBASE_HANDLE_IOCTL(KBASE_IOCTL_TLSTREAM_FLUSH,
+ kbase_api_tlstream_flush);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_COMMIT,
+ kbase_api_mem_commit,
+ struct kbase_ioctl_mem_commit);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALIAS,
+ kbase_api_mem_alias,
+ union kbase_ioctl_mem_alias);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_IMPORT,
+ kbase_api_mem_import,
+ union kbase_ioctl_mem_import);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FLAGS_CHANGE,
+ kbase_api_mem_flags_change,
+ struct kbase_ioctl_mem_flags_change);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STREAM_CREATE,
+ kbase_api_stream_create,
+ struct kbase_ioctl_stream_create);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_FENCE_VALIDATE,
+ kbase_api_fence_validate,
+ struct kbase_ioctl_fence_validate);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_PROFILING_CONTROLS,
+ kbase_api_get_profiling_controls,
+ struct kbase_ioctl_get_profiling_controls);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_PROFILE_ADD,
+ kbase_api_mem_profile_add,
+ struct kbase_ioctl_mem_profile_add);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SOFT_EVENT_UPDATE,
+ kbase_api_soft_event_update,
+ struct kbase_ioctl_soft_event_update);
+
+#if MALI_UNIT_TEST
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_TEST,
+ kbase_api_tlstream_test,
+ struct kbase_ioctl_tlstream_test);
+ KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_TLSTREAM_STATS,
+ kbase_api_tlstream_stats,
+ struct kbase_ioctl_tlstream_stats);
+#endif
+ }
+
+ dev_warn(kbdev->dev, "Unknown ioctl 0x%x nr:%d", cmd, _IOC_NR(cmd));
+
+ return -ENOIOCTLCMD;
+}
+
+static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
+{
+ struct kbase_context *kctx = filp->private_data;
+ struct base_jd_event_v2 uevent;
+ int out_count = 0;
+
+ if (count < sizeof(uevent))
+ return -ENOBUFS;
+
+ do {
+ while (kbase_event_dequeue(kctx, &uevent)) {
+ if (out_count > 0)
+ goto out;
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(kctx->event_queue,
+ kbase_event_pending(kctx)) != 0)
+ return -ERESTARTSYS;
+ }
+ if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
+ if (out_count == 0)
+ return -EPIPE;
+ goto out;
+ }
+
+ if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
+ return -EFAULT;
+
+ buf += sizeof(uevent);
+ out_count++;
+ count -= sizeof(uevent);
+ } while (count >= sizeof(uevent));
+
+ out:
+ return out_count * sizeof(uevent);
+}
+
+static unsigned int kbase_poll(struct file *filp, poll_table *wait)
+{
+ struct kbase_context *kctx = filp->private_data;
+
+ poll_wait(filp, &kctx->event_queue, wait);
+ if (kbase_event_pending(kctx))
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+void kbase_event_wakeup(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx);
+
+ wake_up_interruptible(&kctx->event_queue);
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_wakeup);
+
+static int kbase_check_flags(int flags)
+{
+ /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
+ * closes the file descriptor in a child process.
+ */
+ if (0 == (flags & O_CLOEXEC))
+ return -EINVAL;
+
+ return 0;
+}
+
+
+/**
+ * align_and_check - Align the specified pointer to the provided alignment and
+ * check that it is still in range.
+ * @gap_end: Highest possible start address for allocation (end of gap in
+ * address space)
+ * @gap_start: Start address of current memory area / gap in address space
+ * @info: vm_unmapped_area_info structure passed to caller, containing
+ * alignment, length and limits for the allocation
+ * @is_shader_code: True if the allocation is for shader code (which has
+ * additional alignment requirements)
+ *
+ * Return: true if gap_end is now aligned correctly and is still in range,
+ * false otherwise
+ */
+static bool align_and_check(unsigned long *gap_end, unsigned long gap_start,
+ struct vm_unmapped_area_info *info, bool is_shader_code)
+{
+ /* Compute highest gap address at the desired alignment */
+ (*gap_end) -= info->length;
+ (*gap_end) -= (*gap_end - info->align_offset) & info->align_mask;
+
+ if (is_shader_code) {
+ /* Check for 4GB boundary */
+ if (0 == (*gap_end & BASE_MEM_MASK_4GB))
+ (*gap_end) -= (info->align_offset ? info->align_offset :
+ info->length);
+ if (0 == ((*gap_end + info->length) & BASE_MEM_MASK_4GB))
+ (*gap_end) -= (info->align_offset ? info->align_offset :
+ info->length);
+
+ if (!(*gap_end & BASE_MEM_MASK_4GB) || !((*gap_end +
+ info->length) & BASE_MEM_MASK_4GB))
+ return false;
+ }
+
+
+ if ((*gap_end < info->low_limit) || (*gap_end < gap_start))
+ return false;
+
+
+ return true;
+}
+
+/* The following function is taken from the kernel and just
+ * renamed. As it's not exported to modules we must copy-paste it here.
+ */
+
+static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
+ *info, bool is_shader_code)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long length, low_limit, high_limit, gap_start, gap_end;
+
+ /* Adjust search length to account for worst case alignment overhead */
+ length = info->length + info->align_mask;
+ if (length < info->length)
+ return -ENOMEM;
+
+ /*
+ * Adjust search limits by the desired length.
+ * See implementation comment at top of unmapped_area().
+ */
+ gap_end = info->high_limit;
+ if (gap_end < length)
+ return -ENOMEM;
+ high_limit = gap_end - length;
+
+ if (info->low_limit > high_limit)
+ return -ENOMEM;
+ low_limit = info->low_limit + length;
+
+ /* Check highest gap, which does not precede any rbtree node */
+ gap_start = mm->highest_vm_end;
+ if (gap_start <= high_limit) {
+ if (align_and_check(&gap_end, gap_start, info, is_shader_code))
+ return gap_end;
+ }
+
+ /* Check if rbtree root looks promising */
+ if (RB_EMPTY_ROOT(&mm->mm_rb))
+ return -ENOMEM;
+ vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
+ if (vma->rb_subtree_gap < length)
+ return -ENOMEM;
+
+ while (true) {
+ /* Visit right subtree if it looks promising */
+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ if (gap_start <= high_limit && vma->vm_rb.rb_right) {
+ struct vm_area_struct *right =
+ rb_entry(vma->vm_rb.rb_right,
+ struct vm_area_struct, vm_rb);
+ if (right->rb_subtree_gap >= length) {
+ vma = right;
+ continue;
+ }
+ }
+
+check_current:
+ /* Check if current node has a suitable gap */
+ gap_end = vma->vm_start;
+ if (gap_end < low_limit)
+ return -ENOMEM;
+ if (gap_start <= high_limit && gap_end - gap_start >= length) {
+ /* We found a suitable gap. Clip it with the original
+ * high_limit. */
+ if (gap_end > info->high_limit)
+ gap_end = info->high_limit;
+
+ if (align_and_check(&gap_end, gap_start, info,
+ is_shader_code))
+ return gap_end;
+ }
+
+ /* Visit left subtree if it looks promising */
+ if (vma->vm_rb.rb_left) {
+ struct vm_area_struct *left =
+ rb_entry(vma->vm_rb.rb_left,
+ struct vm_area_struct, vm_rb);
+ if (left->rb_subtree_gap >= length) {
+ vma = left;
+ continue;
+ }
+ }
+
+ /* Go back up the rbtree to find next candidate node */
+ while (true) {
+ struct rb_node *prev = &vma->vm_rb;
+ if (!rb_parent(prev))
+ return -ENOMEM;
+ vma = rb_entry(rb_parent(prev),
+ struct vm_area_struct, vm_rb);
+ if (prev == vma->vm_rb.rb_right) {
+ gap_start = vma->vm_prev ?
+ vma->vm_prev->vm_end : 0;
+ goto check_current;
+ }
+ }
+ }
+
+ return -ENOMEM;
+}
+
+static unsigned long kbase_get_unmapped_area(struct file *filp,
+ const unsigned long addr, const unsigned long len,
+ const unsigned long pgoff, const unsigned long flags)
+{
+ /* based on get_unmapped_area, but simplified slightly due to that some
+ * values are known in advance */
+ struct kbase_context *kctx = filp->private_data;
+ struct mm_struct *mm = current->mm;
+ struct vm_unmapped_area_info info;
+ unsigned long align_offset = 0;
+ unsigned long align_mask = 0;
+ unsigned long high_limit = mm->mmap_base;
+ unsigned long low_limit = PAGE_SIZE;
+ int cpu_va_bits = BITS_PER_LONG;
+ int gpu_pc_bits =
+ kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
+ bool is_shader_code = false;
+ unsigned long ret;
+
+ /* err on fixed address */
+ if ((flags & MAP_FIXED) || addr)
+ return -EINVAL;
+
+#ifdef CONFIG_64BIT
+ /* too big? */
+ if (len > TASK_SIZE - SZ_2M)
+ return -ENOMEM;
+
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+
+ if (kbase_hw_has_feature(kctx->kbdev,
+ BASE_HW_FEATURE_33BIT_VA)) {
+ high_limit = kctx->same_va_end << PAGE_SHIFT;
+ } else {
+ high_limit = min_t(unsigned long, mm->mmap_base,
+ (kctx->same_va_end << PAGE_SHIFT));
+ if (len >= SZ_2M) {
+ align_offset = SZ_2M;
+ align_mask = SZ_2M - 1;
+ }
+ }
+
+ low_limit = SZ_2M;
+ } else {
+ cpu_va_bits = 32;
+ }
+#endif /* CONFIG_64BIT */
+ if ((PFN_DOWN(BASE_MEM_COOKIE_BASE) <= pgoff) &&
+ (PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) > pgoff)) {
+ int cookie = pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
+
+ if (!kctx->pending_regions[cookie])
+ return -EINVAL;
+
+ if (!(kctx->pending_regions[cookie]->flags &
+ KBASE_REG_GPU_NX)) {
+ if (cpu_va_bits > gpu_pc_bits) {
+ align_offset = 1ULL << gpu_pc_bits;
+ align_mask = align_offset - 1;
+ is_shader_code = true;
+ }
+ }
+#ifndef CONFIG_64BIT
+ } else {
+ return current->mm->get_unmapped_area(filp, addr, len, pgoff,
+ flags);
+#endif
+ }
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = low_limit;
+ info.high_limit = high_limit;
+ info.align_offset = align_offset;
+ info.align_mask = align_mask;
+
+ ret = kbase_unmapped_area_topdown(&info, is_shader_code);
+
+ if (IS_ERR_VALUE(ret) && high_limit == mm->mmap_base &&
+ high_limit < (kctx->same_va_end << PAGE_SHIFT)) {
+ /* Retry above mmap_base */
+ info.low_limit = mm->mmap_base;
+ info.high_limit = min_t(u64, TASK_SIZE,
+ (kctx->same_va_end << PAGE_SHIFT));
+
+ ret = kbase_unmapped_area_topdown(&info, is_shader_code);
+ }
+
+ return ret;
+}
+
+static const struct file_operations kbase_fops = {
+ .owner = THIS_MODULE,
+ .open = kbase_open,
+ .release = kbase_release,
+ .read = kbase_read,
+ .poll = kbase_poll,
+ .unlocked_ioctl = kbase_ioctl,
+ .compat_ioctl = kbase_ioctl,
+ .mmap = kbase_mmap,
+ .check_flags = kbase_check_flags,
+ .get_unmapped_area = kbase_get_unmapped_area,
+};
+
+#ifndef CONFIG_MALI_NO_MALI
+void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
+{
+ writel(value, kbdev->reg + offset);
+}
+
+u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
+{
+ return readl(kbdev->reg + offset);
+}
+#endif /* !CONFIG_MALI_NO_MALI */
+
+/**
+ * show_policy - Show callback for the power_policy sysfs file.
+ *
+ * This function is called to get the contents of the power_policy sysfs
+ * file. This is a list of the available policies with the currently active one
+ * surrounded by square brackets.
+ *
+ * @dev: The device this sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
+{
+ struct kbase_device *kbdev;
+ const struct kbase_pm_policy *current_policy;
+ const struct kbase_pm_policy *const *policy_list;
+ int policy_count;
+ int i;
+ ssize_t ret = 0;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ current_policy = kbase_pm_get_policy(kbdev);
+
+ policy_count = kbase_pm_list_policies(&policy_list);
+
+ for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
+ if (policy_list[i] == current_policy)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
+ else
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
+ }
+
+ if (ret < PAGE_SIZE - 1) {
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ } else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+/**
+ * set_policy - Store callback for the power_policy sysfs file.
+ *
+ * This function is called when the power_policy sysfs file is written to.
+ * It matches the requested policy against the available policies and if a
+ * matching policy is found calls kbase_pm_set_policy() to change the
+ * policy.
+ *
+ * @dev: The device with sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ const struct kbase_pm_policy *new_policy = NULL;
+ const struct kbase_pm_policy *const *policy_list;
+ int policy_count;
+ int i;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ policy_count = kbase_pm_list_policies(&policy_list);
+
+ for (i = 0; i < policy_count; i++) {
+ if (sysfs_streq(policy_list[i]->name, buf)) {
+ new_policy = policy_list[i];
+ break;
+ }
+ }
+
+ if (!new_policy) {
+ dev_err(dev, "power_policy: policy not found\n");
+ return -EINVAL;
+ }
+
+ kbase_pm_set_policy(kbdev, new_policy);
+
+ return count;
+}
+
+/*
+ * The sysfs file power_policy.
+ *
+ * This is used for obtaining information about the available policies,
+ * determining which policy is currently active, and changing the active
+ * policy.
+ */
+static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
+
+/**
+ * show_ca_policy - Show callback for the core_availability_policy sysfs file.
+ *
+ * This function is called to get the contents of the core_availability_policy
+ * sysfs file. This is a list of the available policies with the currently
+ * active one surrounded by square brackets.
+ *
+ * @dev: The device this sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ const struct kbase_pm_ca_policy *current_policy;
+ const struct kbase_pm_ca_policy *const *policy_list;
+ int policy_count;
+ int i;
+ ssize_t ret = 0;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ current_policy = kbase_pm_ca_get_policy(kbdev);
+
+ policy_count = kbase_pm_ca_list_policies(&policy_list);
+
+ for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
+ if (policy_list[i] == current_policy)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
+ else
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
+ }
+
+ if (ret < PAGE_SIZE - 1) {
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ } else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+/**
+ * set_ca_policy - Store callback for the core_availability_policy sysfs file.
+ *
+ * This function is called when the core_availability_policy sysfs file is
+ * written to. It matches the requested policy against the available policies
+ * and if a matching policy is found calls kbase_pm_set_policy() to change
+ * the policy.
+ *
+ * @dev: The device with sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ const struct kbase_pm_ca_policy *new_policy = NULL;
+ const struct kbase_pm_ca_policy *const *policy_list;
+ int policy_count;
+ int i;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ policy_count = kbase_pm_ca_list_policies(&policy_list);
+
+ for (i = 0; i < policy_count; i++) {
+ if (sysfs_streq(policy_list[i]->name, buf)) {
+ new_policy = policy_list[i];
+ break;
+ }
+ }
+
+ if (!new_policy) {
+ dev_err(dev, "core_availability_policy: policy not found\n");
+ return -EINVAL;
+ }
+
+ kbase_pm_ca_set_policy(kbdev, new_policy);
+
+ return count;
+}
+
+/*
+ * The sysfs file core_availability_policy
+ *
+ * This is used for obtaining information about the available policies,
+ * determining which policy is currently active, and changing the active
+ * policy.
+ */
+static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
+
+/*
+ * show_core_mask - Show callback for the core_mask sysfs file.
+ *
+ * This function is called to get the contents of the core_mask sysfs file.
+ *
+ * @dev: The device this sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret = 0;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "Current core mask (JS0) : 0x%llX\n",
+ kbdev->pm.debug_core_mask[0]);
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "Current core mask (JS1) : 0x%llX\n",
+ kbdev->pm.debug_core_mask[1]);
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "Current core mask (JS2) : 0x%llX\n",
+ kbdev->pm.debug_core_mask[2]);
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "Available core mask : 0x%llX\n",
+ kbdev->gpu_props.props.raw_props.shader_present);
+
+ return ret;
+}
+
+/**
+ * set_core_mask - Store callback for the core_mask sysfs file.
+ *
+ * This function is called when the core_mask sysfs file is written to.
+ *
+ * @dev: The device with sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ u64 new_core_mask[3];
+ int items;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ items = sscanf(buf, "%llx %llx %llx",
+ &new_core_mask[0], &new_core_mask[1],
+ &new_core_mask[2]);
+
+ if (items == 1)
+ new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
+
+ if (items == 1 || items == 3) {
+ u64 shader_present =
+ kbdev->gpu_props.props.raw_props.shader_present;
+ u64 group0_core_mask =
+ kbdev->gpu_props.props.coherency_info.group[0].
+ core_mask;
+
+ if ((new_core_mask[0] & shader_present) != new_core_mask[0] ||
+ !(new_core_mask[0] & group0_core_mask) ||
+ (new_core_mask[1] & shader_present) !=
+ new_core_mask[1] ||
+ !(new_core_mask[1] & group0_core_mask) ||
+ (new_core_mask[2] & shader_present) !=
+ new_core_mask[2] ||
+ !(new_core_mask[2] & group0_core_mask)) {
+ dev_err(dev, "power_policy: invalid core specification\n");
+ return -EINVAL;
+ }
+
+ if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
+ kbdev->pm.debug_core_mask[1] !=
+ new_core_mask[1] ||
+ kbdev->pm.debug_core_mask[2] !=
+ new_core_mask[2]) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
+ new_core_mask[1], new_core_mask[2]);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+
+ return count;
+ }
+
+ dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n"
+ "Use format <core_mask>\n"
+ "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
+ return -EINVAL;
+}
+
+/*
+ * The sysfs file core_mask.
+ *
+ * This is used to restrict shader core availability for debugging purposes.
+ * Reading it will show the current core mask and the mask of cores available.
+ * Writing to it will set the current core mask.
+ */
+static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
+
+/**
+ * set_soft_job_timeout - Store callback for the soft_job_timeout sysfs
+ * file.
+ *
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The value written to the sysfs file.
+ * @count: The number of bytes written to the sysfs file.
+ *
+ * This allows setting the timeout for software jobs. Waiting soft event wait
+ * jobs will be cancelled after this period expires, while soft fence wait jobs
+ * will print debug information if the fence debug feature is enabled.
+ *
+ * This is expressed in milliseconds.
+ *
+ * Return: count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_soft_job_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int soft_job_timeout_ms;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ if ((kstrtoint(buf, 0, &soft_job_timeout_ms) != 0) ||
+ (soft_job_timeout_ms <= 0))
+ return -EINVAL;
+
+ atomic_set(&kbdev->js_data.soft_job_timeout_ms,
+ soft_job_timeout_ms);
+
+ return count;
+}
+
+/**
+ * show_soft_job_timeout - Show callback for the soft_job_timeout sysfs
+ * file.
+ *
+ * This will return the timeout for the software jobs.
+ *
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer for the sysfs file contents.
+ *
+ * Return: The number of bytes output to buf.
+ */
+static ssize_t show_soft_job_timeout(struct device *dev,
+ struct device_attribute *attr,
+ char * const buf)
+{
+ struct kbase_device *kbdev;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ return scnprintf(buf, PAGE_SIZE, "%i\n",
+ atomic_read(&kbdev->js_data.soft_job_timeout_ms));
+}
+
+static DEVICE_ATTR(soft_job_timeout, S_IRUGO | S_IWUSR,
+ show_soft_job_timeout, set_soft_job_timeout);
+
+static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms,
+ int default_ticks, u32 old_ticks)
+{
+ if (timeout_ms > 0) {
+ u64 ticks = timeout_ms * 1000000ULL;
+ do_div(ticks, kbdev->js_data.scheduling_period_ns);
+ if (!ticks)
+ return 1;
+ return ticks;
+ } else if (timeout_ms < 0) {
+ return default_ticks;
+ } else {
+ return old_ticks;
+ }
+}
+
+/**
+ * set_js_timeouts - Store callback for the js_timeouts sysfs file.
+ *
+ * This function is called to get the contents of the js_timeouts sysfs
+ * file. This file contains five values separated by whitespace. The values
+ * are basically the same as %JS_SOFT_STOP_TICKS, %JS_HARD_STOP_TICKS_SS,
+ * %JS_HARD_STOP_TICKS_DUMPING, %JS_RESET_TICKS_SS, %JS_RESET_TICKS_DUMPING
+ * configuration values (in that order), with the difference that the js_timeout
+ * values are expressed in MILLISECONDS.
+ *
+ * The js_timeouts sysfile file allows the current values in
+ * use by the job scheduler to get override. Note that a value needs to
+ * be other than 0 for it to override the current job scheduler value.
+ *
+ * @dev: The device with sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int items;
+ long js_soft_stop_ms;
+ long js_soft_stop_ms_cl;
+ long js_hard_stop_ms_ss;
+ long js_hard_stop_ms_cl;
+ long js_hard_stop_ms_dumping;
+ long js_reset_ms_ss;
+ long js_reset_ms_cl;
+ long js_reset_ms_dumping;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
+ &js_soft_stop_ms, &js_soft_stop_ms_cl,
+ &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
+ &js_hard_stop_ms_dumping, &js_reset_ms_ss,
+ &js_reset_ms_cl, &js_reset_ms_dumping);
+
+ if (items == 8) {
+ struct kbasep_js_device_data *js_data = &kbdev->js_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+#define UPDATE_TIMEOUT(ticks_name, ms_name, default) do {\
+ js_data->ticks_name = timeout_ms_to_ticks(kbdev, ms_name, \
+ default, js_data->ticks_name); \
+ dev_dbg(kbdev->dev, "Overriding " #ticks_name \
+ " with %lu ticks (%lu ms)\n", \
+ (unsigned long)js_data->ticks_name, \
+ ms_name); \
+ } while (0)
+
+ UPDATE_TIMEOUT(soft_stop_ticks, js_soft_stop_ms,
+ DEFAULT_JS_SOFT_STOP_TICKS);
+ UPDATE_TIMEOUT(soft_stop_ticks_cl, js_soft_stop_ms_cl,
+ DEFAULT_JS_SOFT_STOP_TICKS_CL);
+ UPDATE_TIMEOUT(hard_stop_ticks_ss, js_hard_stop_ms_ss,
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
+ DEFAULT_JS_HARD_STOP_TICKS_SS_8408 :
+ DEFAULT_JS_HARD_STOP_TICKS_SS);
+ UPDATE_TIMEOUT(hard_stop_ticks_cl, js_hard_stop_ms_cl,
+ DEFAULT_JS_HARD_STOP_TICKS_CL);
+ UPDATE_TIMEOUT(hard_stop_ticks_dumping,
+ js_hard_stop_ms_dumping,
+ DEFAULT_JS_HARD_STOP_TICKS_DUMPING);
+ UPDATE_TIMEOUT(gpu_reset_ticks_ss, js_reset_ms_ss,
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
+ DEFAULT_JS_RESET_TICKS_SS_8408 :
+ DEFAULT_JS_RESET_TICKS_SS);
+ UPDATE_TIMEOUT(gpu_reset_ticks_cl, js_reset_ms_cl,
+ DEFAULT_JS_RESET_TICKS_CL);
+ UPDATE_TIMEOUT(gpu_reset_ticks_dumping, js_reset_ms_dumping,
+ DEFAULT_JS_RESET_TICKS_DUMPING);
+
+ kbase_js_set_timeouts(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return count;
+ }
+
+ dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
+ "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
+ "Write 0 for no change, -1 to restore default timeout\n");
+ return -EINVAL;
+}
+
+static unsigned long get_js_timeout_in_ms(
+ u32 scheduling_period_ns,
+ u32 ticks)
+{
+ u64 ms = (u64)ticks * scheduling_period_ns;
+
+ do_div(ms, 1000000UL);
+ return ms;
+}
+
+/**
+ * show_js_timeouts - Show callback for the js_timeouts sysfs file.
+ *
+ * This function is called to get the contents of the js_timeouts sysfs
+ * file. It returns the last set values written to the js_timeouts sysfs file.
+ * If the file didn't get written yet, the values will be current setting in
+ * use.
+ * @dev: The device this sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+ unsigned long js_soft_stop_ms;
+ unsigned long js_soft_stop_ms_cl;
+ unsigned long js_hard_stop_ms_ss;
+ unsigned long js_hard_stop_ms_cl;
+ unsigned long js_hard_stop_ms_dumping;
+ unsigned long js_reset_ms_ss;
+ unsigned long js_reset_ms_cl;
+ unsigned long js_reset_ms_dumping;
+ u32 scheduling_period_ns;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
+
+#define GET_TIMEOUT(name) get_js_timeout_in_ms(\
+ scheduling_period_ns, \
+ kbdev->js_data.name)
+
+ js_soft_stop_ms = GET_TIMEOUT(soft_stop_ticks);
+ js_soft_stop_ms_cl = GET_TIMEOUT(soft_stop_ticks_cl);
+ js_hard_stop_ms_ss = GET_TIMEOUT(hard_stop_ticks_ss);
+ js_hard_stop_ms_cl = GET_TIMEOUT(hard_stop_ticks_cl);
+ js_hard_stop_ms_dumping = GET_TIMEOUT(hard_stop_ticks_dumping);
+ js_reset_ms_ss = GET_TIMEOUT(gpu_reset_ticks_ss);
+ js_reset_ms_cl = GET_TIMEOUT(gpu_reset_ticks_cl);
+ js_reset_ms_dumping = GET_TIMEOUT(gpu_reset_ticks_dumping);
+
+#undef GET_TIMEOUT
+
+ ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
+ js_soft_stop_ms, js_soft_stop_ms_cl,
+ js_hard_stop_ms_ss, js_hard_stop_ms_cl,
+ js_hard_stop_ms_dumping, js_reset_ms_ss,
+ js_reset_ms_cl, js_reset_ms_dumping);
+
+ if (ret >= PAGE_SIZE) {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+/*
+ * The sysfs file js_timeouts.
+ *
+ * This is used to override the current job scheduler values for
+ * JS_STOP_STOP_TICKS_SS
+ * JS_STOP_STOP_TICKS_CL
+ * JS_HARD_STOP_TICKS_SS
+ * JS_HARD_STOP_TICKS_CL
+ * JS_HARD_STOP_TICKS_DUMPING
+ * JS_RESET_TICKS_SS
+ * JS_RESET_TICKS_CL
+ * JS_RESET_TICKS_DUMPING.
+ */
+static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
+
+static u32 get_new_js_timeout(
+ u32 old_period,
+ u32 old_ticks,
+ u32 new_scheduling_period_ns)
+{
+ u64 ticks = (u64)old_period * (u64)old_ticks;
+ do_div(ticks, new_scheduling_period_ns);
+ return ticks?ticks:1;
+}
+
+/**
+ * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
+ * file
+ * @dev: The device the sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the js_scheduling_period sysfs file is written
+ * to. It checks the data written, and if valid updates the js_scheduling_period
+ * value
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_js_scheduling_period(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int ret;
+ unsigned int js_scheduling_period;
+ u32 new_scheduling_period_ns;
+ u32 old_period;
+ struct kbasep_js_device_data *js_data;
+ unsigned long flags;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ js_data = &kbdev->js_data;
+
+ ret = kstrtouint(buf, 0, &js_scheduling_period);
+ if (ret || !js_scheduling_period) {
+ dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
+ "Use format <js_scheduling_period_ms>\n");
+ return -EINVAL;
+ }
+
+ new_scheduling_period_ns = js_scheduling_period * 1000000;
+
+ /* Update scheduling timeouts */
+ mutex_lock(&js_data->runpool_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* If no contexts have been scheduled since js_timeouts was last written
+ * to, the new timeouts might not have been latched yet. So check if an
+ * update is pending and use the new values if necessary. */
+
+ /* Use previous 'new' scheduling period as a base if present. */
+ old_period = js_data->scheduling_period_ns;
+
+#define SET_TIMEOUT(name) \
+ (js_data->name = get_new_js_timeout(\
+ old_period, \
+ kbdev->js_data.name, \
+ new_scheduling_period_ns))
+
+ SET_TIMEOUT(soft_stop_ticks);
+ SET_TIMEOUT(soft_stop_ticks_cl);
+ SET_TIMEOUT(hard_stop_ticks_ss);
+ SET_TIMEOUT(hard_stop_ticks_cl);
+ SET_TIMEOUT(hard_stop_ticks_dumping);
+ SET_TIMEOUT(gpu_reset_ticks_ss);
+ SET_TIMEOUT(gpu_reset_ticks_cl);
+ SET_TIMEOUT(gpu_reset_ticks_dumping);
+
+#undef SET_TIMEOUT
+
+ js_data->scheduling_period_ns = new_scheduling_period_ns;
+
+ kbase_js_set_timeouts(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&js_data->runpool_mutex);
+
+ dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
+ js_scheduling_period);
+
+ return count;
+}
+
+/**
+ * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
+ * entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current period used for the JS scheduling
+ * period.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_js_scheduling_period(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ u32 period;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ period = kbdev->js_data.scheduling_period_ns;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n",
+ period / 1000000);
+
+ return ret;
+}
+
+static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
+ show_js_scheduling_period, set_js_scheduling_period);
+
+#if !MALI_CUSTOMER_RELEASE
+/**
+ * set_force_replay - Store callback for the force_replay sysfs file.
+ *
+ * @dev: The device with sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ if (!strncmp("limit=", buf, MIN(6, count))) {
+ int force_replay_limit;
+ int items = sscanf(buf, "limit=%u", &force_replay_limit);
+
+ if (items == 1) {
+ kbdev->force_replay_random = false;
+ kbdev->force_replay_limit = force_replay_limit;
+ kbdev->force_replay_count = 0;
+
+ return count;
+ }
+ } else if (!strncmp("random_limit", buf, MIN(12, count))) {
+ kbdev->force_replay_random = true;
+ kbdev->force_replay_count = 0;
+
+ return count;
+ } else if (!strncmp("norandom_limit", buf, MIN(14, count))) {
+ kbdev->force_replay_random = false;
+ kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED;
+ kbdev->force_replay_count = 0;
+
+ return count;
+ } else if (!strncmp("core_req=", buf, MIN(9, count))) {
+ unsigned int core_req;
+ int items = sscanf(buf, "core_req=%x", &core_req);
+
+ if (items == 1) {
+ kbdev->force_replay_core_req = (base_jd_core_req)core_req;
+
+ return count;
+ }
+ }
+ dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=<limit>, random_limit, norandom_limit, core_req=<core_req>\n");
+ return -EINVAL;
+}
+
+/**
+ * show_force_replay - Show callback for the force_replay sysfs file.
+ *
+ * This function is called to get the contents of the force_replay sysfs
+ * file. It returns the last set value written to the force_replay sysfs file.
+ * If the file didn't get written yet, the values will be 0.
+ *
+ * @dev: The device this sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_force_replay(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ if (kbdev->force_replay_random)
+ ret = scnprintf(buf, PAGE_SIZE,
+ "limit=0\nrandom_limit\ncore_req=%x\n",
+ kbdev->force_replay_core_req);
+ else
+ ret = scnprintf(buf, PAGE_SIZE,
+ "limit=%u\nnorandom_limit\ncore_req=%x\n",
+ kbdev->force_replay_limit,
+ kbdev->force_replay_core_req);
+
+ if (ret >= PAGE_SIZE) {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+/*
+ * The sysfs file force_replay.
+ */
+static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay,
+ set_force_replay);
+#endif /* !MALI_CUSTOMER_RELEASE */
+
+#ifdef CONFIG_MALI_DEBUG
+static ssize_t set_js_softstop_always(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int ret;
+ int softstop_always;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = kstrtoint(buf, 0, &softstop_always);
+ if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
+ dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
+ "Use format <soft_stop_always>\n");
+ return -EINVAL;
+ }
+
+ kbdev->js_data.softstop_always = (bool) softstop_always;
+ dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
+ (kbdev->js_data.softstop_always) ?
+ "Enabled" : "Disabled");
+ return count;
+}
+
+static ssize_t show_js_softstop_always(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
+
+ if (ret >= PAGE_SIZE) {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+/*
+ * By default, soft-stops are disabled when only a single context is present.
+ * The ability to enable soft-stop when only a single context is present can be
+ * used for debug and unit-testing purposes.
+ * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
+ */
+static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
+#endif /* CONFIG_MALI_DEBUG */
+
+#ifdef CONFIG_MALI_DEBUG
+typedef void (kbasep_debug_command_func) (struct kbase_device *);
+
+enum kbasep_debug_command_code {
+ KBASEP_DEBUG_COMMAND_DUMPTRACE,
+
+ /* This must be the last enum */
+ KBASEP_DEBUG_COMMAND_COUNT
+};
+
+struct kbasep_debug_command {
+ char *str;
+ kbasep_debug_command_func *func;
+};
+
+/* Debug commands supported by the driver */
+static const struct kbasep_debug_command debug_commands[] = {
+ {
+ .str = "dumptrace",
+ .func = &kbasep_trace_dump,
+ }
+};
+
+/**
+ * show_debug - Show callback for the debug_command sysfs file.
+ *
+ * This function is called to get the contents of the debug_command sysfs
+ * file. This is a list of the available debug commands, separated by newlines.
+ *
+ * @dev: The device this sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ int i;
+ ssize_t ret = 0;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
+
+ if (ret >= PAGE_SIZE) {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+/**
+ * issue_debug - Store callback for the debug_command sysfs file.
+ *
+ * This function is called when the debug_command sysfs file is written to.
+ * It matches the requested command against the available commands, and if
+ * a matching command is found calls the associated function from
+ * @debug_commands to issue the command.
+ *
+ * @dev: The device with sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int i;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
+ if (sysfs_streq(debug_commands[i].str, buf)) {
+ debug_commands[i].func(kbdev);
+ return count;
+ }
+ }
+
+ /* Debug Command not found */
+ dev_err(dev, "debug_command: command not known\n");
+ return -EINVAL;
+}
+
+/* The sysfs file debug_command.
+ *
+ * This is used to issue general debug commands to the device driver.
+ * Reading it will produce a list of debug commands, separated by newlines.
+ * Writing to it with one of those commands will issue said command.
+ */
+static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
+#endif /* CONFIG_MALI_DEBUG */
+
+/**
+ * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer to receive the GPU information.
+ *
+ * This function is called to get a description of the present Mali
+ * GPU via the gpuinfo sysfs entry. This includes the GPU family, the
+ * number of cores, the hardware version and the raw product id. For
+ * example
+ *
+ * Mali-T60x MP4 r0p0 0x6956
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t kbase_show_gpuinfo(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const struct gpu_product_id_name {
+ unsigned id;
+ char *name;
+ } gpu_product_id_names[] = {
+ { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
+ { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
+ { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
+ { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
+ { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
+ { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
+ { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
+ { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
+ { .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-G71" },
+ { .id = GPU_ID2_PRODUCT_THEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-THEx" },
+ { .id = GPU_ID2_PRODUCT_TSIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-G51" },
+ };
+ const char *product_name = "(Unknown Mali GPU)";
+ struct kbase_device *kbdev;
+ u32 gpu_id;
+ unsigned product_id, product_id_mask;
+ unsigned i;
+ bool is_new_format;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+ is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
+ product_id_mask =
+ (is_new_format ?
+ GPU_ID2_PRODUCT_MODEL :
+ GPU_ID_VERSION_PRODUCT_ID) >>
+ GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+ for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
+ const struct gpu_product_id_name *p = &gpu_product_id_names[i];
+
+ if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
+ (p->id & product_id_mask) ==
+ (product_id & product_id_mask)) {
+ product_name = p->name;
+ break;
+ }
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%s %d cores r%dp%d 0x%04X\n",
+ product_name, kbdev->gpu_props.num_cores,
+ (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
+ (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
+ product_id);
+}
+static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
+
+/**
+ * set_dvfs_period - Store callback for the dvfs_period sysfs file.
+ * @dev: The device with sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the dvfs_period sysfs file is written to. It
+ * checks the data written, and if valid updates the DVFS period variable,
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_dvfs_period(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int ret;
+ int dvfs_period;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = kstrtoint(buf, 0, &dvfs_period);
+ if (ret || dvfs_period <= 0) {
+ dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
+ "Use format <dvfs_period_ms>\n");
+ return -EINVAL;
+ }
+
+ kbdev->pm.dvfs_period = dvfs_period;
+ dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
+
+ return count;
+}
+
+/**
+ * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current period used for the DVFS sample
+ * timer.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_dvfs_period(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
+
+ return ret;
+}
+
+static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
+ set_dvfs_period);
+
+/**
+ * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
+ * @dev: The device with sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the pm_poweroff sysfs file is written to.
+ *
+ * This file contains three values separated by whitespace. The values
+ * are gpu_poweroff_time (the period of the poweroff timer, in ns),
+ * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
+ * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
+ * ticks before the GPU is powered off), in that order.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_pm_poweroff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int items;
+ s64 gpu_poweroff_time;
+ int poweroff_shader_ticks, poweroff_gpu_ticks;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
+ &poweroff_shader_ticks,
+ &poweroff_gpu_ticks);
+ if (items != 3) {
+ dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
+ "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
+ return -EINVAL;
+ }
+
+ kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
+ kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks;
+ kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks;
+
+ return count;
+}
+
+/**
+ * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current period used for the DVFS sample
+ * timer.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_pm_poweroff(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n",
+ ktime_to_ns(kbdev->pm.gpu_poweroff_time),
+ kbdev->pm.poweroff_shader_ticks,
+ kbdev->pm.poweroff_gpu_ticks);
+
+ return ret;
+}
+
+static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
+ set_pm_poweroff);
+
+/**
+ * set_reset_timeout - Store callback for the reset_timeout sysfs file.
+ * @dev: The device with sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the reset_timeout sysfs file is written to. It
+ * checks the data written, and if valid updates the reset timeout.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_reset_timeout(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int ret;
+ int reset_timeout;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = kstrtoint(buf, 0, &reset_timeout);
+ if (ret || reset_timeout <= 0) {
+ dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
+ "Use format <reset_timeout_ms>\n");
+ return -EINVAL;
+ }
+
+ kbdev->reset_timeout_ms = reset_timeout;
+ dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
+
+ return count;
+}
+
+/**
+ * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current reset timeout.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_reset_timeout(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
+
+ return ret;
+}
+
+static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
+ set_reset_timeout);
+
+
+
+static ssize_t show_mem_pool_size(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
+ kbase_mem_pool_size(&kbdev->mem_pool));
+
+ return ret;
+}
+
+static ssize_t set_mem_pool_size(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ size_t new_size;
+ int err;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ err = kstrtoul(buf, 0, (unsigned long *)&new_size);
+ if (err)
+ return err;
+
+ kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
+
+ return count;
+}
+
+static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
+ set_mem_pool_size);
+
+static ssize_t show_mem_pool_max_size(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
+ kbase_mem_pool_max_size(&kbdev->mem_pool));
+
+ return ret;
+}
+
+static ssize_t set_mem_pool_max_size(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ size_t new_max_size;
+ int err;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
+ if (err)
+ return -EINVAL;
+
+ kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
+
+ return count;
+}
+
+static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
+ set_mem_pool_max_size);
+
+#ifdef CONFIG_DEBUG_FS
+
+/* Number of entries in serialize_jobs_settings[] */
+#define NR_SERIALIZE_JOBS_SETTINGS 5
+/* Maximum string length in serialize_jobs_settings[].name */
+#define MAX_SERIALIZE_JOBS_NAME_LEN 16
+
+static struct
+{
+ char *name;
+ u8 setting;
+} serialize_jobs_settings[NR_SERIALIZE_JOBS_SETTINGS] = {
+ {"none", 0},
+ {"intra-slot", KBASE_SERIALIZE_INTRA_SLOT},
+ {"inter-slot", KBASE_SERIALIZE_INTER_SLOT},
+ {"full", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT},
+ {"full-reset", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT |
+ KBASE_SERIALIZE_RESET}
+};
+
+/**
+ * kbasep_serialize_jobs_seq_show - Show callback for the serialize_jobs debugfs
+ * file
+ * @sfile: seq_file pointer
+ * @data: Private callback data
+ *
+ * This function is called to get the contents of the serialize_jobs debugfs
+ * file. This is a list of the available settings with the currently active one
+ * surrounded by square brackets.
+ *
+ * Return: 0 on success, or an error code on error
+ */
+static int kbasep_serialize_jobs_seq_show(struct seq_file *sfile, void *data)
+{
+ struct kbase_device *kbdev = sfile->private;
+ int i;
+
+ CSTD_UNUSED(data);
+
+ for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
+ if (kbdev->serialize_jobs == serialize_jobs_settings[i].setting)
+ seq_printf(sfile, "[%s] ",
+ serialize_jobs_settings[i].name);
+ else
+ seq_printf(sfile, "%s ",
+ serialize_jobs_settings[i].name);
+ }
+
+ seq_puts(sfile, "\n");
+
+ return 0;
+}
+
+/**
+ * kbasep_serialize_jobs_debugfs_write - Store callback for the serialize_jobs
+ * debugfs file.
+ * @file: File pointer
+ * @ubuf: User buffer containing data to store
+ * @count: Number of bytes in user buffer
+ * @ppos: File position
+ *
+ * This function is called when the serialize_jobs debugfs file is written to.
+ * It matches the requested setting against the available settings and if a
+ * matching setting is found updates kbdev->serialize_jobs.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t kbasep_serialize_jobs_debugfs_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct kbase_device *kbdev = s->private;
+ char buf[MAX_SERIALIZE_JOBS_NAME_LEN];
+ int i;
+ bool valid = false;
+
+ CSTD_UNUSED(ppos);
+
+ count = min_t(size_t, sizeof(buf) - 1, count);
+ if (copy_from_user(buf, ubuf, count))
+ return -EFAULT;
+
+ buf[count] = 0;
+
+ for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
+ if (sysfs_streq(serialize_jobs_settings[i].name, buf)) {
+ kbdev->serialize_jobs =
+ serialize_jobs_settings[i].setting;
+ valid = true;
+ break;
+ }
+ }
+
+ if (!valid) {
+ dev_err(kbdev->dev, "serialize_jobs: invalid setting\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+/**
+ * kbasep_serialize_jobs_debugfs_open - Open callback for the serialize_jobs
+ * debugfs file
+ * @in: inode pointer
+ * @file: file pointer
+ *
+ * Return: Zero on success, error code on failure
+ */
+static int kbasep_serialize_jobs_debugfs_open(struct inode *in,
+ struct file *file)
+{
+ return single_open(file, kbasep_serialize_jobs_seq_show, in->i_private);
+}
+
+static const struct file_operations kbasep_serialize_jobs_debugfs_fops = {
+ .open = kbasep_serialize_jobs_debugfs_open,
+ .read = seq_read,
+ .write = kbasep_serialize_jobs_debugfs_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int kbasep_protected_mode_init(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_OF
+ struct device_node *protected_node;
+ struct platform_device *pdev;
+ struct protected_mode_device *protected_dev;
+#endif
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
+ /* Use native protected ops */
+ kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev),
+ GFP_KERNEL);
+ if (!kbdev->protected_dev)
+ return -ENOMEM;
+ kbdev->protected_dev->data = kbdev;
+ kbdev->protected_ops = &kbase_native_protected_ops;
+ kbdev->protected_mode_support = true;
+ return 0;
+ }
+
+ kbdev->protected_mode_support = false;
+
+#ifdef CONFIG_OF
+ protected_node = of_parse_phandle(kbdev->dev->of_node,
+ "protected-mode-switcher", 0);
+
+ if (!protected_node)
+ protected_node = of_parse_phandle(kbdev->dev->of_node,
+ "secure-mode-switcher", 0);
+
+ if (!protected_node) {
+ /* If protected_node cannot be looked up then we assume
+ * protected mode is not supported on this platform. */
+ dev_info(kbdev->dev, "Protected mode not available\n");
+ return 0;
+ }
+
+ pdev = of_find_device_by_node(protected_node);
+ if (!pdev)
+ return -EINVAL;
+
+ protected_dev = platform_get_drvdata(pdev);
+ if (!protected_dev)
+ return -EPROBE_DEFER;
+
+ kbdev->protected_ops = &protected_dev->ops;
+ kbdev->protected_dev = protected_dev;
+
+ if (kbdev->protected_ops) {
+ int err;
+
+ /* Make sure protected mode is disabled on startup */
+ mutex_lock(&kbdev->pm.lock);
+ err = kbdev->protected_ops->protected_mode_disable(
+ kbdev->protected_dev);
+ mutex_unlock(&kbdev->pm.lock);
+
+ /* protected_mode_disable() returns -EINVAL if not supported */
+ kbdev->protected_mode_support = (err != -EINVAL);
+ }
+#endif
+ return 0;
+}
+
+static void kbasep_protected_mode_term(struct kbase_device *kbdev)
+{
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE))
+ kfree(kbdev->protected_dev);
+}
+
+#ifdef CONFIG_MALI_NO_MALI
+static int kbase_common_reg_map(struct kbase_device *kbdev)
+{
+ return 0;
+}
+static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
+{
+}
+#else /* CONFIG_MALI_NO_MALI */
+static int kbase_common_reg_map(struct kbase_device *kbdev)
+{
+ int err = -ENOMEM;
+
+ if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
+ dev_err(kbdev->dev, "Register window unavailable\n");
+ err = -EIO;
+ goto out_region;
+ }
+
+ kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
+ if (!kbdev->reg) {
+ dev_err(kbdev->dev, "Can't remap register window\n");
+ err = -EINVAL;
+ goto out_ioremap;
+ }
+
+ return 0;
+
+ out_ioremap:
+ release_mem_region(kbdev->reg_start, kbdev->reg_size);
+ out_region:
+ return err;
+}
+
+static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
+{
+ if (kbdev->reg) {
+ iounmap(kbdev->reg);
+ release_mem_region(kbdev->reg_start, kbdev->reg_size);
+ kbdev->reg = NULL;
+ kbdev->reg_start = 0;
+ kbdev->reg_size = 0;
+ }
+}
+#endif /* CONFIG_MALI_NO_MALI */
+
+static int registers_map(struct kbase_device * const kbdev)
+{
+
+ /* the first memory resource is the physical address of the GPU
+ * registers */
+ struct platform_device *pdev = to_platform_device(kbdev->dev);
+ struct resource *reg_res;
+ int err;
+
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!reg_res) {
+ dev_err(kbdev->dev, "Invalid register resource\n");
+ return -ENOENT;
+ }
+
+ kbdev->reg_start = reg_res->start;
+ kbdev->reg_size = resource_size(reg_res);
+
+ err = kbase_common_reg_map(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Failed to map registers\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void registers_unmap(struct kbase_device *kbdev)
+{
+ kbase_common_reg_unmap(kbdev);
+}
+
+static int power_control_init(struct platform_device *pdev)
+{
+ struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+ int err = 0;
+
+ if (!kbdev)
+ return -ENODEV;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
+ && defined(CONFIG_REGULATOR)
+ kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
+ if (IS_ERR_OR_NULL(kbdev->regulator)) {
+ err = PTR_ERR(kbdev->regulator);
+ kbdev->regulator = NULL;
+ if (err == -EPROBE_DEFER) {
+ dev_err(&pdev->dev, "Failed to get regulator\n");
+ return err;
+ }
+ dev_info(kbdev->dev,
+ "Continuing without Mali regulator control\n");
+ /* Allow probe to continue without regulator */
+ }
+#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
+
+ kbdev->clock = clk_get(kbdev->dev, "clk_mali");
+ if (IS_ERR_OR_NULL(kbdev->clock)) {
+ err = PTR_ERR(kbdev->clock);
+ kbdev->clock = NULL;
+ if (err == -EPROBE_DEFER) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ goto fail;
+ }
+ dev_info(kbdev->dev, "Continuing without Mali clock control\n");
+ /* Allow probe to continue without clock. */
+ } else {
+ err = clk_prepare_enable(kbdev->clock);
+ if (err) {
+ dev_err(kbdev->dev,
+ "Failed to prepare and enable clock (%d)\n",
+ err);
+ goto fail;
+ }
+ }
+
+#if defined(CONFIG_OF) && defined(CONFIG_PM_OPP)
+ /* Register the OPPs if they are available in device tree */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) \
+ || defined(LSK_OPPV2_BACKPORT)
+ err = dev_pm_opp_of_add_table(kbdev->dev);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ err = of_init_opp_table(kbdev->dev);
+#else
+ err = 0;
+#endif /* LINUX_VERSION_CODE */
+ if (err)
+ dev_dbg(kbdev->dev, "OPP table not found\n");
+#endif /* CONFIG_OF && CONFIG_PM_OPP */
+
+ return 0;
+
+fail:
+
+if (kbdev->clock != NULL) {
+ clk_put(kbdev->clock);
+ kbdev->clock = NULL;
+}
+
+#ifdef CONFIG_REGULATOR
+ if (NULL != kbdev->regulator) {
+ regulator_put(kbdev->regulator);
+ kbdev->regulator = NULL;
+ }
+#endif
+
+ return err;
+}
+
+static void power_control_term(struct kbase_device *kbdev)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \
+ defined(LSK_OPPV2_BACKPORT)
+ dev_pm_opp_of_remove_table(kbdev->dev);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+ of_free_opp_table(kbdev->dev);
+#endif
+
+ if (kbdev->clock) {
+ clk_disable_unprepare(kbdev->clock);
+ clk_put(kbdev->clock);
+ kbdev->clock = NULL;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
+ && defined(CONFIG_REGULATOR)
+ if (kbdev->regulator) {
+ regulator_put(kbdev->regulator);
+ kbdev->regulator = NULL;
+ }
+#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#if KBASE_GPU_RESET_EN
+#include <mali_kbase_hwaccess_jm.h>
+
+static void trigger_quirks_reload(struct kbase_device *kbdev)
+{
+ kbase_pm_context_active(kbdev);
+ if (kbase_prepare_to_reset_gpu(kbdev))
+ kbase_reset_gpu(kbdev);
+ kbase_pm_context_idle(kbdev);
+}
+
+#define MAKE_QUIRK_ACCESSORS(type) \
+static int type##_quirks_set(void *data, u64 val) \
+{ \
+ struct kbase_device *kbdev; \
+ kbdev = (struct kbase_device *)data; \
+ kbdev->hw_quirks_##type = (u32)val; \
+ trigger_quirks_reload(kbdev); \
+ return 0;\
+} \
+\
+static int type##_quirks_get(void *data, u64 *val) \
+{ \
+ struct kbase_device *kbdev;\
+ kbdev = (struct kbase_device *)data;\
+ *val = kbdev->hw_quirks_##type;\
+ return 0;\
+} \
+DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
+ type##_quirks_set, "%llu\n")
+
+MAKE_QUIRK_ACCESSORS(sc);
+MAKE_QUIRK_ACCESSORS(tiler);
+MAKE_QUIRK_ACCESSORS(mmu);
+MAKE_QUIRK_ACCESSORS(jm);
+
+#endif /* KBASE_GPU_RESET_EN */
+
+/**
+ * debugfs_protected_debug_mode_read - "protected_debug_mode" debugfs read
+ * @file: File object to read is for
+ * @buf: User buffer to populate with data
+ * @len: Length of user buffer
+ * @ppos: Offset within file object
+ *
+ * Retrieves the current status of protected debug mode
+ * (0 = disabled, 1 = enabled)
+ *
+ * Return: Number of bytes added to user buffer
+ */
+static ssize_t debugfs_protected_debug_mode_read(struct file *file,
+ char __user *buf, size_t len, loff_t *ppos)
+{
+ struct kbase_device *kbdev = (struct kbase_device *)file->private_data;
+ u32 gpu_status;
+ ssize_t ret_val;
+
+ kbase_pm_context_active(kbdev);
+ gpu_status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS), NULL);
+ kbase_pm_context_idle(kbdev);
+
+ if (gpu_status & GPU_DBGEN)
+ ret_val = simple_read_from_buffer(buf, len, ppos, "1\n", 2);
+ else
+ ret_val = simple_read_from_buffer(buf, len, ppos, "0\n", 2);
+
+ return ret_val;
+}
+
+/*
+ * struct fops_protected_debug_mode - "protected_debug_mode" debugfs fops
+ *
+ * Contains the file operations for the "protected_debug_mode" debugfs file
+ */
+static const struct file_operations fops_protected_debug_mode = {
+ .open = simple_open,
+ .read = debugfs_protected_debug_mode_read,
+ .llseek = default_llseek,
+};
+
+static int kbase_device_debugfs_init(struct kbase_device *kbdev)
+{
+ struct dentry *debugfs_ctx_defaults_directory;
+ int err;
+
+ kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
+ NULL);
+ if (!kbdev->mali_debugfs_directory) {
+ dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
+ kbdev->mali_debugfs_directory);
+ if (!kbdev->debugfs_ctx_directory) {
+ dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
+ kbdev->debugfs_ctx_directory);
+ if (!debugfs_ctx_defaults_directory) {
+ dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+#if !MALI_CUSTOMER_RELEASE
+ kbasep_regs_dump_debugfs_init(kbdev);
+#endif /* !MALI_CUSTOMER_RELEASE */
+ kbasep_regs_history_debugfs_init(kbdev);
+
+ kbase_debug_job_fault_debugfs_init(kbdev);
+ kbasep_gpu_memory_debugfs_init(kbdev);
+ kbase_as_fault_debugfs_init(kbdev);
+#if KBASE_GPU_RESET_EN
+ /* fops_* variables created by invocations of macro
+ * MAKE_QUIRK_ACCESSORS() above. */
+ debugfs_create_file("quirks_sc", 0644,
+ kbdev->mali_debugfs_directory, kbdev,
+ &fops_sc_quirks);
+ debugfs_create_file("quirks_tiler", 0644,
+ kbdev->mali_debugfs_directory, kbdev,
+ &fops_tiler_quirks);
+ debugfs_create_file("quirks_mmu", 0644,
+ kbdev->mali_debugfs_directory, kbdev,
+ &fops_mmu_quirks);
+ debugfs_create_file("quirks_jm", 0644,
+ kbdev->mali_debugfs_directory, kbdev,
+ &fops_jm_quirks);
+#endif /* KBASE_GPU_RESET_EN */
+
+ debugfs_create_bool("infinite_cache", 0644,
+ debugfs_ctx_defaults_directory,
+ &kbdev->infinite_cache_active_default);
+
+ debugfs_create_size_t("mem_pool_max_size", 0644,
+ debugfs_ctx_defaults_directory,
+ &kbdev->mem_pool_max_size_default);
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
+ debugfs_create_file("protected_debug_mode", S_IRUGO,
+ kbdev->mali_debugfs_directory, kbdev,
+ &fops_protected_debug_mode);
+ }
+
+#if KBASE_TRACE_ENABLE
+ kbasep_trace_debugfs_init(kbdev);
+#endif /* KBASE_TRACE_ENABLE */
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ kbasep_trace_timeline_debugfs_init(kbdev);
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
+
+#ifdef CONFIG_MALI_DEVFREQ
+#ifdef CONFIG_DEVFREQ_THERMAL
+ if (kbdev->inited_subsys & inited_devfreq)
+ kbase_ipa_debugfs_init(kbdev);
+#endif /* CONFIG_DEVFREQ_THERMAL */
+#endif /* CONFIG_MALI_DEVFREQ */
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_file("serialize_jobs", S_IRUGO | S_IWUSR,
+ kbdev->mali_debugfs_directory, kbdev,
+ &kbasep_serialize_jobs_debugfs_fops);
+#endif /* CONFIG_DEBUG_FS */
+
+ return 0;
+
+out:
+ debugfs_remove_recursive(kbdev->mali_debugfs_directory);
+ return err;
+}
+
+static void kbase_device_debugfs_term(struct kbase_device *kbdev)
+{
+ debugfs_remove_recursive(kbdev->mali_debugfs_directory);
+}
+
+#else /* CONFIG_DEBUG_FS */
+static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
+{
+ return 0;
+}
+
+static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
+#endif /* CONFIG_DEBUG_FS */
+
+static void kbase_device_coherency_init(struct kbase_device *kbdev,
+ unsigned prod_id)
+{
+#ifdef CONFIG_OF
+ u32 supported_coherency_bitmap =
+ kbdev->gpu_props.props.raw_props.coherency_mode;
+ const void *coherency_override_dts;
+ u32 override_coherency;
+
+ /* Only for tMIx :
+ * (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
+ * documented for tMIx so force correct value here.
+ */
+ if (GPU_ID_IS_NEW_FORMAT(prod_id) &&
+ (GPU_ID2_MODEL_MATCH_VALUE(prod_id) ==
+ GPU_ID2_PRODUCT_TMIX))
+ if (supported_coherency_bitmap ==
+ COHERENCY_FEATURE_BIT(COHERENCY_ACE))
+ supported_coherency_bitmap |=
+ COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
+
+#endif /* CONFIG_OF */
+
+ kbdev->system_coherency = COHERENCY_NONE;
+
+ /* device tree may override the coherency */
+#ifdef CONFIG_OF
+ coherency_override_dts = of_get_property(kbdev->dev->of_node,
+ "system-coherency",
+ NULL);
+ if (coherency_override_dts) {
+
+ override_coherency = be32_to_cpup(coherency_override_dts);
+
+ if ((override_coherency <= COHERENCY_NONE) &&
+ (supported_coherency_bitmap &
+ COHERENCY_FEATURE_BIT(override_coherency))) {
+
+ kbdev->system_coherency = override_coherency;
+
+ dev_info(kbdev->dev,
+ "Using coherency mode %u set from dtb",
+ override_coherency);
+ } else
+ dev_warn(kbdev->dev,
+ "Ignoring unsupported coherency mode %u set from dtb",
+ override_coherency);
+ }
+
+#endif /* CONFIG_OF */
+
+ kbdev->gpu_props.props.raw_props.coherency_mode =
+ kbdev->system_coherency;
+}
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+
+/* Callback used by the kbase bus logger client, to initiate a GPU reset
+ * when the bus log is restarted. GPU reset is used as reference point
+ * in HW bus log analyses.
+ */
+static void kbase_logging_started_cb(void *data)
+{
+ struct kbase_device *kbdev = (struct kbase_device *)data;
+
+ if (kbase_prepare_to_reset_gpu(kbdev))
+ kbase_reset_gpu(kbdev);
+ dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
+}
+#endif
+
+static struct attribute *kbase_attrs[] = {
+#ifdef CONFIG_MALI_DEBUG
+ &dev_attr_debug_command.attr,
+ &dev_attr_js_softstop_always.attr,
+#endif
+#if !MALI_CUSTOMER_RELEASE
+ &dev_attr_force_replay.attr,
+#endif
+ &dev_attr_js_timeouts.attr,
+ &dev_attr_soft_job_timeout.attr,
+ &dev_attr_gpuinfo.attr,
+ &dev_attr_dvfs_period.attr,
+ &dev_attr_pm_poweroff.attr,
+ &dev_attr_reset_timeout.attr,
+ &dev_attr_js_scheduling_period.attr,
+ &dev_attr_power_policy.attr,
+ &dev_attr_core_availability_policy.attr,
+ &dev_attr_core_mask.attr,
+ &dev_attr_mem_pool_size.attr,
+ &dev_attr_mem_pool_max_size.attr,
+ NULL
+};
+
+static const struct attribute_group kbase_attr_group = {
+ .attrs = kbase_attrs,
+};
+
+static int kbase_platform_device_remove(struct platform_device *pdev)
+{
+ struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+ const struct list_head *dev_list;
+
+ if (!kbdev)
+ return -ENODEV;
+
+ kfree(kbdev->gpu_props.prop_buffer);
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+ if (kbdev->inited_subsys & inited_buslogger) {
+ bl_core_client_unregister(kbdev->buslogger);
+ kbdev->inited_subsys &= ~inited_buslogger;
+ }
+#endif
+
+
+ if (kbdev->inited_subsys & inited_dev_list) {
+ dev_list = kbase_dev_list_get();
+ list_del(&kbdev->entry);
+ kbase_dev_list_put(dev_list);
+ kbdev->inited_subsys &= ~inited_dev_list;
+ }
+
+ if (kbdev->inited_subsys & inited_misc_register) {
+ misc_deregister(&kbdev->mdev);
+ kbdev->inited_subsys &= ~inited_misc_register;
+ }
+
+ if (kbdev->inited_subsys & inited_sysfs_group) {
+ sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
+ kbdev->inited_subsys &= ~inited_sysfs_group;
+ }
+
+ if (kbdev->inited_subsys & inited_get_device) {
+ put_device(kbdev->dev);
+ kbdev->inited_subsys &= ~inited_get_device;
+ }
+
+ if (kbdev->inited_subsys & inited_debugfs) {
+ kbase_device_debugfs_term(kbdev);
+ kbdev->inited_subsys &= ~inited_debugfs;
+ }
+
+ if (kbdev->inited_subsys & inited_job_fault) {
+ kbase_debug_job_fault_dev_term(kbdev);
+ kbdev->inited_subsys &= ~inited_job_fault;
+ }
+ if (kbdev->inited_subsys & inited_vinstr) {
+ kbase_vinstr_term(kbdev->vinstr_ctx);
+ kbdev->inited_subsys &= ~inited_vinstr;
+ }
+
+#ifdef CONFIG_MALI_DEVFREQ
+ if (kbdev->inited_subsys & inited_devfreq) {
+ kbase_devfreq_term(kbdev);
+ kbdev->inited_subsys &= ~inited_devfreq;
+ }
+#endif
+
+ if (kbdev->inited_subsys & inited_backend_late) {
+ kbase_backend_late_term(kbdev);
+ kbdev->inited_subsys &= ~inited_backend_late;
+ }
+
+ if (kbdev->inited_subsys & inited_tlstream) {
+ kbase_tlstream_term();
+ kbdev->inited_subsys &= ~inited_tlstream;
+ }
+
+ /* Bring job and mem sys to a halt before we continue termination */
+
+ if (kbdev->inited_subsys & inited_js)
+ kbasep_js_devdata_halt(kbdev);
+
+ if (kbdev->inited_subsys & inited_mem)
+ kbase_mem_halt(kbdev);
+
+ if (kbdev->inited_subsys & inited_protected) {
+ kbasep_protected_mode_term(kbdev);
+ kbdev->inited_subsys &= ~inited_protected;
+ }
+
+ if (kbdev->inited_subsys & inited_js) {
+ kbasep_js_devdata_term(kbdev);
+ kbdev->inited_subsys &= ~inited_js;
+ }
+
+ if (kbdev->inited_subsys & inited_mem) {
+ kbase_mem_term(kbdev);
+ kbdev->inited_subsys &= ~inited_mem;
+ }
+
+ if (kbdev->inited_subsys & inited_pm_runtime_init) {
+ kbdev->pm.callback_power_runtime_term(kbdev);
+ kbdev->inited_subsys &= ~inited_pm_runtime_init;
+ }
+
+ if (kbdev->inited_subsys & inited_ctx_sched) {
+ kbase_ctx_sched_term(kbdev);
+ kbdev->inited_subsys &= ~inited_ctx_sched;
+ }
+
+ if (kbdev->inited_subsys & inited_device) {
+ kbase_device_term(kbdev);
+ kbdev->inited_subsys &= ~inited_device;
+ }
+
+ if (kbdev->inited_subsys & inited_backend_early) {
+ kbase_backend_early_term(kbdev);
+ kbdev->inited_subsys &= ~inited_backend_early;
+ }
+
+ if (kbdev->inited_subsys & inited_io_history) {
+ kbase_io_history_term(&kbdev->io_history);
+ kbdev->inited_subsys &= ~inited_io_history;
+ }
+
+ if (kbdev->inited_subsys & inited_power_control) {
+ power_control_term(kbdev);
+ kbdev->inited_subsys &= ~inited_power_control;
+ }
+
+ if (kbdev->inited_subsys & inited_registers_map) {
+ registers_unmap(kbdev);
+ kbdev->inited_subsys &= ~inited_registers_map;
+ }
+
+#ifdef CONFIG_MALI_NO_MALI
+ if (kbdev->inited_subsys & inited_gpu_device) {
+ gpu_device_destroy(kbdev);
+ kbdev->inited_subsys &= ~inited_gpu_device;
+ }
+#endif /* CONFIG_MALI_NO_MALI */
+
+ if (kbdev->inited_subsys != 0)
+ dev_err(kbdev->dev, "Missing sub system termination\n");
+
+ kbase_device_free(kbdev);
+
+ return 0;
+}
+
+
+/* Number of register accesses for the buffer that we allocate during
+ * initialization time. The buffer size can be changed later via debugfs. */
+#define KBASEP_DEFAULT_REGISTER_HISTORY_SIZE ((u16)512)
+
+static int kbase_platform_device_probe(struct platform_device *pdev)
+{
+ struct kbase_device *kbdev;
+ struct mali_base_gpu_core_props *core_props;
+ u32 gpu_id;
+ unsigned prod_id;
+ const struct list_head *dev_list;
+ int err = 0;
+
+#ifdef CONFIG_OF
+ err = kbase_platform_early_init();
+ if (err) {
+ dev_err(&pdev->dev, "Early platform initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+#endif
+ kbdev = kbase_device_alloc();
+ if (!kbdev) {
+ dev_err(&pdev->dev, "Allocate device failed\n");
+ kbase_platform_device_remove(pdev);
+ return -ENOMEM;
+ }
+
+ kbdev->dev = &pdev->dev;
+ dev_set_drvdata(kbdev->dev, kbdev);
+
+#ifdef CONFIG_MALI_NO_MALI
+ err = gpu_device_create(kbdev);
+ if (err) {
+ dev_err(&pdev->dev, "Dummy model initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_gpu_device;
+#endif /* CONFIG_MALI_NO_MALI */
+
+ err = assign_irqs(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "IRQ search failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+
+ err = registers_map(kbdev);
+ if (err) {
+ dev_err(&pdev->dev, "Register map failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_registers_map;
+
+ err = power_control_init(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Power control initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_power_control;
+
+ err = kbase_io_history_init(&kbdev->io_history,
+ KBASEP_DEFAULT_REGISTER_HISTORY_SIZE);
+ if (err) {
+ dev_err(&pdev->dev, "Register access history initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return -ENOMEM;
+ }
+ kbdev->inited_subsys |= inited_io_history;
+
+ err = kbase_backend_early_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Early backend initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_backend_early;
+
+ scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
+ kbase_dev_nr);
+
+ kbase_disjoint_init(kbdev);
+
+ /* obtain min/max configured gpu frequencies */
+ core_props = &(kbdev->gpu_props.props.core_props);
+ core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
+ core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
+
+ err = kbase_device_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Device initialization failed (%d)\n", err);
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_device;
+
+ err = kbase_ctx_sched_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Context scheduler initialization failed (%d)\n",
+ err);
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_ctx_sched;
+
+ if (kbdev->pm.callback_power_runtime_init) {
+ err = kbdev->pm.callback_power_runtime_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev,
+ "Runtime PM initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_pm_runtime_init;
+ }
+
+ err = kbase_mem_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Memory subsystem initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_mem;
+
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
+ prod_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+ kbase_device_coherency_init(kbdev, prod_id);
+
+ err = kbasep_protected_mode_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Protected mode subsystem initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_protected;
+
+ dev_list = kbase_dev_list_get();
+ list_add(&kbdev->entry, &kbase_dev_list);
+ kbase_dev_list_put(dev_list);
+ kbdev->inited_subsys |= inited_dev_list;
+
+ err = kbasep_js_devdata_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Job JS devdata initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_js;
+
+ err = kbase_tlstream_init();
+ if (err) {
+ dev_err(kbdev->dev, "Timeline stream initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_tlstream;
+
+ err = kbase_backend_late_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Late backend initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_backend_late;
+
+ /* Initialize the kctx list. This is used by vinstr. */
+ mutex_init(&kbdev->kctx_list_lock);
+ INIT_LIST_HEAD(&kbdev->kctx_list);
+
+ kbdev->vinstr_ctx = kbase_vinstr_init(kbdev);
+ if (!kbdev->vinstr_ctx) {
+ dev_err(kbdev->dev,
+ "Virtual instrumentation initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return -EINVAL;
+ }
+ kbdev->inited_subsys |= inited_vinstr;
+
+#ifdef CONFIG_MALI_DEVFREQ
+ /* Devfreq uses vinstr, so must be initialized after it. */
+ err = kbase_devfreq_init(kbdev);
+ if (!err)
+ kbdev->inited_subsys |= inited_devfreq;
+ else
+ dev_err(kbdev->dev, "Continuing without devfreq\n");
+#endif /* CONFIG_MALI_DEVFREQ */
+
+ err = kbase_debug_job_fault_dev_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Job fault debug initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_job_fault;
+
+ err = kbase_device_debugfs_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "DebugFS initialization failed");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_debugfs;
+
+ kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
+ kbdev->mdev.name = kbdev->devname;
+ kbdev->mdev.fops = &kbase_fops;
+ kbdev->mdev.parent = get_device(kbdev->dev);
+ kbdev->inited_subsys |= inited_get_device;
+
+ /* This needs to happen before registering the device with misc_register(),
+ * otherwise it causes a race condition between registering the device and a
+ * uevent event being generated for userspace, causing udev rules to run
+ * which might expect certain sysfs attributes present. As a result of the
+ * race condition we avoid, some Mali sysfs entries may have appeared to
+ * udev to not exist.
+
+ * For more information, see
+ * https://www.kernel.org/doc/Documentation/driver-model/device.txt, the
+ * paragraph that starts with "Word of warning", currently the second-last
+ * paragraph.
+ */
+ err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
+ if (err) {
+ dev_err(&pdev->dev, "SysFS group creation failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_sysfs_group;
+
+ err = misc_register(&kbdev->mdev);
+ if (err) {
+ dev_err(kbdev->dev, "Misc device registration failed for %s\n",
+ kbdev->devname);
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_misc_register;
+
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+ err = bl_core_client_register(kbdev->devname,
+ kbase_logging_started_cb,
+ kbdev, &kbdev->buslogger,
+ THIS_MODULE, NULL);
+ if (err == 0) {
+ kbdev->inited_subsys |= inited_buslogger;
+ bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
+ } else {
+ dev_warn(kbdev->dev, "Bus log client registration failed\n");
+ err = 0;
+ }
+#endif
+
+ err = kbase_gpuprops_populate_user_buffer(kbdev);
+ if (err) {
+ dev_err(&pdev->dev, "GPU property population failed");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+
+ dev_info(kbdev->dev,
+ "Probed as %s\n", dev_name(kbdev->mdev.this_device));
+
+ kbase_dev_nr++;
+
+ return err;
+}
+
+#undef KBASEP_DEFAULT_REGISTER_HISTORY_SIZE
+
+/**
+ * kbase_device_suspend - Suspend callback from the OS.
+ *
+ * This is called by Linux when the device should suspend.
+ *
+ * @dev: The device to suspend
+ *
+ * Return: A standard Linux error code
+ */
+static int kbase_device_suspend(struct device *dev)
+{
+ struct kbase_device *kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ if (kbdev->inited_subsys & inited_devfreq)
+ devfreq_suspend_device(kbdev->devfreq);
+#endif
+
+ kbase_pm_suspend(kbdev);
+ return 0;
+}
+
+/**
+ * kbase_device_resume - Resume callback from the OS.
+ *
+ * This is called by Linux when the device should resume from suspension.
+ *
+ * @dev: The device to resume
+ *
+ * Return: A standard Linux error code
+ */
+static int kbase_device_resume(struct device *dev)
+{
+ struct kbase_device *kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ kbase_pm_resume(kbdev);
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ if (kbdev->inited_subsys & inited_devfreq)
+ devfreq_resume_device(kbdev->devfreq);
+#endif
+ return 0;
+}
+
+/**
+ * kbase_device_runtime_suspend - Runtime suspend callback from the OS.
+ *
+ * This is called by Linux when the device should prepare for a condition in
+ * which it will not be able to communicate with the CPU(s) and RAM due to
+ * power management.
+ *
+ * @dev: The device to suspend
+ *
+ * Return: A standard Linux error code
+ */
+#ifdef KBASE_PM_RUNTIME
+static int kbase_device_runtime_suspend(struct device *dev)
+{
+ struct kbase_device *kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ if (kbdev->inited_subsys & inited_devfreq)
+ devfreq_suspend_device(kbdev->devfreq);
+#endif
+
+ if (kbdev->pm.backend.callback_power_runtime_off) {
+ kbdev->pm.backend.callback_power_runtime_off(kbdev);
+ dev_dbg(dev, "runtime suspend\n");
+ }
+ return 0;
+}
+#endif /* KBASE_PM_RUNTIME */
+
+/**
+ * kbase_device_runtime_resume - Runtime resume callback from the OS.
+ *
+ * This is called by Linux when the device should go into a fully active state.
+ *
+ * @dev: The device to suspend
+ *
+ * Return: A standard Linux error code
+ */
+
+#ifdef KBASE_PM_RUNTIME
+static int kbase_device_runtime_resume(struct device *dev)
+{
+ int ret = 0;
+ struct kbase_device *kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ if (kbdev->pm.backend.callback_power_runtime_on) {
+ ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
+ dev_dbg(dev, "runtime resume\n");
+ }
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ if (kbdev->inited_subsys & inited_devfreq)
+ devfreq_resume_device(kbdev->devfreq);
+#endif
+
+ return ret;
+}
+#endif /* KBASE_PM_RUNTIME */
+
+
+#ifdef KBASE_PM_RUNTIME
+/**
+ * kbase_device_runtime_idle - Runtime idle callback from the OS.
+ * @dev: The device to suspend
+ *
+ * This is called by Linux when the device appears to be inactive and it might
+ * be placed into a low power state.
+ *
+ * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
+ * otherwise a standard Linux error code
+ */
+static int kbase_device_runtime_idle(struct device *dev)
+{
+ struct kbase_device *kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ /* Use platform specific implementation if it exists. */
+ if (kbdev->pm.backend.callback_power_runtime_idle)
+ return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
+
+ return 0;
+}
+#endif /* KBASE_PM_RUNTIME */
+
+/* The power management operations for the platform driver.
+ */
+static const struct dev_pm_ops kbase_pm_ops = {
+ .suspend = kbase_device_suspend,
+ .resume = kbase_device_resume,
+#ifdef KBASE_PM_RUNTIME
+ .runtime_suspend = kbase_device_runtime_suspend,
+ .runtime_resume = kbase_device_runtime_resume,
+ .runtime_idle = kbase_device_runtime_idle,
+#endif /* KBASE_PM_RUNTIME */
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id kbase_dt_ids[] = {
+ { .compatible = "arm,malit6xx" },
+ { .compatible = "arm,mali-midgard" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, kbase_dt_ids);
+#endif
+
+static struct platform_driver kbase_platform_driver = {
+ .probe = kbase_platform_device_probe,
+ .remove = kbase_platform_device_remove,
+ .driver = {
+ .name = kbase_drv_name,
+ .owner = THIS_MODULE,
+ .pm = &kbase_pm_ops,
+ .of_match_table = of_match_ptr(kbase_dt_ids),
+ },
+};
+
+/*
+ * The driver will not provide a shortcut to create the Mali platform device
+ * anymore when using Device Tree.
+ */
+#ifdef CONFIG_OF
+module_platform_driver(kbase_platform_driver);
+#else
+
+static int __init kbase_driver_init(void)
+{
+ int ret;
+
+ ret = kbase_platform_early_init();
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+ ret = kbase_platform_fake_register();
+ if (ret)
+ return ret;
+#endif
+ ret = platform_driver_register(&kbase_platform_driver);
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+ if (ret)
+ kbase_platform_fake_unregister();
+#endif
+ return ret;
+}
+
+static void __exit kbase_driver_exit(void)
+{
+ platform_driver_unregister(&kbase_platform_driver);
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+ kbase_platform_fake_unregister();
+#endif
+}
+
+module_init(kbase_driver_init);
+module_exit(kbase_driver_exit);
+
+#endif /* CONFIG_OF */
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
+ __stringify(BASE_UK_VERSION_MAJOR) "." \
+ __stringify(BASE_UK_VERSION_MINOR) ")");
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
+#define CREATE_TRACE_POINTS
+#endif
+
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+/* Create the trace points (otherwise we just get code to call a tracepoint) */
+#include "mali_linux_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
+
+void kbase_trace_mali_pm_status(u32 event, u64 value)
+{
+ trace_mali_pm_status(event, value);
+}
+
+void kbase_trace_mali_pm_power_off(u32 event, u64 value)
+{
+ trace_mali_pm_power_off(event, value);
+}
+
+void kbase_trace_mali_pm_power_on(u32 event, u64 value)
+{
+ trace_mali_pm_power_on(event, value);
+}
+
+void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
+{
+ trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
+}
+
+void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
+{
+ trace_mali_page_fault_insert_pages(event, value);
+}
+
+void kbase_trace_mali_mmu_as_in_use(int event)
+{
+ trace_mali_mmu_as_in_use(event);
+}
+
+void kbase_trace_mali_mmu_as_released(int event)
+{
+ trace_mali_mmu_as_released(event);
+}
+
+void kbase_trace_mali_total_alloc_pages_change(long long int event)
+{
+ trace_mali_total_alloc_pages_change(event);
+}
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+#ifdef CONFIG_MALI_SYSTEM_TRACE
+#include "mali_linux_kbase_trace.h"
+#endif
diff --git a/drivers/gpu/arm_gpu/mali_kbase_ctx_sched.c b/drivers/gpu/arm_gpu/mali_kbase_ctx_sched.c
new file mode 100644
index 000000000000..e2f7baabad43
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_ctx_sched.c
@@ -0,0 +1,203 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
+
+#include "mali_kbase_ctx_sched.h"
+
+int kbase_ctx_sched_init(struct kbase_device *kbdev)
+{
+ int as_present = (1U << kbdev->nr_hw_address_spaces) - 1;
+
+ /* These two must be recalculated if nr_hw_address_spaces changes
+ * (e.g. for HW workarounds) */
+ kbdev->nr_user_address_spaces = kbdev->nr_hw_address_spaces;
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
+ bool use_workaround;
+
+ use_workaround = DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE;
+ if (use_workaround) {
+ dev_dbg(kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only");
+ kbdev->nr_user_address_spaces = 1;
+ }
+ }
+
+ kbdev->as_free = as_present; /* All ASs initially free */
+
+ memset(kbdev->as_to_kctx, 0, sizeof(kbdev->as_to_kctx));
+
+ return 0;
+}
+
+void kbase_ctx_sched_term(struct kbase_device *kbdev)
+{
+ s8 i;
+
+ /* Sanity checks */
+ for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) {
+ WARN_ON(kbdev->as_to_kctx[i] != NULL);
+ WARN_ON(!(kbdev->as_free & (1u << i)));
+ }
+}
+
+/* kbasep_ctx_sched_find_as_for_ctx - Find a free address space
+ *
+ * @kbdev: The context for which to find a free address space
+ *
+ * Return: A valid AS if successful, otherwise KBASEP_AS_NR_INVALID
+ *
+ * This function returns an address space available for use. It would prefer
+ * returning an AS that has been previously assigned to the context to
+ * avoid having to reprogram the MMU.
+ */
+static int kbasep_ctx_sched_find_as_for_ctx(struct kbase_context *kctx)
+{
+ struct kbase_device *const kbdev = kctx->kbdev;
+ int free_as;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* First check if the previously assigned AS is available */
+ if ((kctx->as_nr != KBASEP_AS_NR_INVALID) &&
+ (kbdev->as_free & (1u << kctx->as_nr)))
+ return kctx->as_nr;
+
+ /* The previously assigned AS was taken, we'll be returning any free
+ * AS at this point.
+ */
+ free_as = ffs(kbdev->as_free) - 1;
+ if (free_as >= 0 && free_as < kbdev->nr_hw_address_spaces)
+ return free_as;
+
+ return KBASEP_AS_NR_INVALID;
+}
+
+int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx)
+{
+ struct kbase_device *const kbdev = kctx->kbdev;
+
+ lockdep_assert_held(&kbdev->mmu_hw_mutex);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ WARN_ON(!kbdev->pm.backend.gpu_powered);
+
+ if (atomic_inc_return(&kctx->refcount) == 1) {
+ int const free_as = kbasep_ctx_sched_find_as_for_ctx(kctx);
+
+ if (free_as != KBASEP_AS_NR_INVALID) {
+ kbdev->as_free &= ~(1u << free_as);
+ /* Only program the MMU if the context has not been
+ * assigned the same address space before.
+ */
+ if (free_as != kctx->as_nr) {
+ struct kbase_context *const prev_kctx =
+ kbdev->as_to_kctx[free_as];
+
+ if (prev_kctx) {
+ WARN_ON(atomic_read(&prev_kctx->refcount) != 0);
+ kbase_mmu_disable(prev_kctx);
+ prev_kctx->as_nr = KBASEP_AS_NR_INVALID;
+ }
+
+ kctx->as_nr = free_as;
+ kbdev->as_to_kctx[free_as] = kctx;
+ kbase_mmu_update(kctx);
+ }
+ } else {
+ atomic_dec(&kctx->refcount);
+
+ /* Failed to find an available address space, we must
+ * be returning an error at this point.
+ */
+ WARN_ON(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ }
+ }
+
+ return kctx->as_nr;
+}
+
+void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx)
+{
+ struct kbase_device *const kbdev = kctx->kbdev;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ WARN_ON(atomic_read(&kctx->refcount) == 0);
+ WARN_ON(kctx->as_nr == KBASEP_AS_NR_INVALID);
+ WARN_ON(kbdev->as_to_kctx[kctx->as_nr] != kctx);
+
+ atomic_inc(&kctx->refcount);
+}
+
+void kbase_ctx_sched_release_ctx(struct kbase_context *kctx)
+{
+ struct kbase_device *const kbdev = kctx->kbdev;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (atomic_dec_return(&kctx->refcount) == 0)
+ kbdev->as_free |= (1u << kctx->as_nr);
+}
+
+void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx)
+{
+ struct kbase_device *const kbdev = kctx->kbdev;
+
+ lockdep_assert_held(&kbdev->mmu_hw_mutex);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ WARN_ON(atomic_read(&kctx->refcount) != 0);
+
+ if (kctx->as_nr != KBASEP_AS_NR_INVALID) {
+ if (kbdev->pm.backend.gpu_powered)
+ kbase_mmu_disable(kctx);
+
+ kbdev->as_to_kctx[kctx->as_nr] = NULL;
+ kctx->as_nr = KBASEP_AS_NR_INVALID;
+ }
+}
+
+void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev)
+{
+ s8 i;
+
+ lockdep_assert_held(&kbdev->mmu_hw_mutex);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ WARN_ON(!kbdev->pm.backend.gpu_powered);
+
+ for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) {
+ struct kbase_context *kctx;
+
+ kctx = kbdev->as_to_kctx[i];
+ if (kctx) {
+ if (atomic_read(&kctx->refcount)) {
+ WARN_ON(kctx->as_nr != i);
+
+ kbase_mmu_update(kctx);
+ } else {
+ /* This context might have been assigned an
+ * AS before, clear it.
+ */
+ kbdev->as_to_kctx[kctx->as_nr] = NULL;
+ kctx->as_nr = KBASEP_AS_NR_INVALID;
+ }
+ } else {
+ kbase_mmu_disable_as(kbdev, i);
+ }
+ }
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_ctx_sched.h b/drivers/gpu/arm_gpu/mali_kbase_ctx_sched.h
new file mode 100644
index 000000000000..e55152547c31
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_ctx_sched.h
@@ -0,0 +1,131 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_CTX_SCHED_H_
+#define _KBASE_CTX_SCHED_H_
+
+#include <mali_kbase.h>
+
+/* The Context Scheduler manages address space assignment and reference
+ * counting to kbase_context. The interface has been designed to minimise
+ * interactions between the Job Scheduler and Power Management/MMU to support
+ * both the existing Job Scheduler and Command Stream Frontend interface.
+ *
+ * The initial implementation of the Context Scheduler does not schedule
+ * contexts. Instead it relies on the Job Scheduler/CSF to make decisions of
+ * when to schedule/evict contexts if address spaces are starved. In the
+ * future, once an interface between the CS and JS/CSF have been devised to
+ * provide enough information about how each context is consuming GPU resources,
+ * those decisions can be made in the CS itself, thereby reducing duplicated
+ * code.
+ */
+
+/* base_ctx_sched_init - Initialise the context scheduler
+ *
+ * @kbdev: The device for which the context scheduler needs to be
+ * initialised
+ *
+ * Return: 0 for success, otherwise failure
+ *
+ * This must be called during device initilisation. The number of hardware
+ * address spaces must already be established before calling this function.
+ */
+int kbase_ctx_sched_init(struct kbase_device *kbdev);
+
+/* base_ctx_sched_term - Terminate the context scheduler
+ *
+ * @kbdev: The device for which the context scheduler needs to be
+ * terminated
+ *
+ * This must be called during device termination after all contexts have been
+ * destroyed.
+ */
+void kbase_ctx_sched_term(struct kbase_device *kbdev);
+
+/* kbase_ctx_sched_retain_ctx - Retain a reference to the @ref kbase_context
+ *
+ * @kctx: The context to which to retain a reference
+ *
+ * Return: The address space that the context has been assigned to or
+ * KBASEP_AS_NR_INVALID if no address space was available.
+ *
+ * This function should be called whenever an address space should be assigned
+ * to a context and programmed onto the MMU. It should typically be called
+ * when jobs are ready to be submitted to the GPU.
+ *
+ * It can be called as many times as necessary. The address space will be
+ * assigned to the context for as long as there is a reference to said context.
+ *
+ * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be
+ * held whilst calling this function.
+ */
+int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx);
+
+/* kbase_ctx_sched_retain_ctx_refcount
+ *
+ * @kctx: The context to which to retain a reference
+ *
+ * This function only retains a reference to the context. It must be called
+ * only when the context already has a reference.
+ *
+ * This is typically called inside an atomic session where we know the context
+ * is already scheduled in but want to take an extra reference to ensure that
+ * it doesn't get descheduled.
+ *
+ * The kbase_device::hwaccess_lock must be held whilst calling this function
+ */
+void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx);
+
+/* kbase_ctx_sched_release_ctx - Release a reference to the @ref kbase_context
+ *
+ * @kctx: The context from which to release a reference
+ *
+ * This function should be called whenever an address space could be unassigned
+ * from a context. When there are no more references to said context, the
+ * address space previously assigned to this context shall be reassigned to
+ * other contexts as needed.
+ *
+ * The kbase_device::hwaccess_lock must be held whilst calling this function
+ */
+void kbase_ctx_sched_release_ctx(struct kbase_context *kctx);
+
+/* kbase_ctx_sched_remove_ctx - Unassign previously assigned address space
+ *
+ * @kctx: The context to be removed
+ *
+ * This function should be called when a context is being destroyed. The
+ * context must no longer have any reference. If it has been assigned an
+ * address space before then the AS will be unprogrammed.
+ *
+ * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be
+ * held whilst calling this function.
+ */
+void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx);
+
+/* kbase_ctx_sched_restore_all_as - Reprogram all address spaces
+ *
+ * @kbdev: The device for which address spaces to be reprogrammed
+ *
+ * This function shall reprogram all address spaces previously assigned to
+ * contexts. It can be used after the GPU is reset.
+ *
+ * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be
+ * held whilst calling this function.
+ */
+void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev);
+
+#endif /* _KBASE_CTX_SCHED_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_debug.c b/drivers/gpu/arm_gpu/mali_kbase_debug.c
new file mode 100644
index 000000000000..fb57ac2e31ad
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_debug.c
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <mali_kbase.h>
+
+static struct kbasep_debug_assert_cb kbasep_debug_assert_registered_cb = {
+ NULL,
+ NULL
+};
+
+void kbase_debug_assert_register_hook(kbase_debug_assert_hook *func, void *param)
+{
+ kbasep_debug_assert_registered_cb.func = func;
+ kbasep_debug_assert_registered_cb.param = param;
+}
+
+void kbasep_debug_assert_call_hook(void)
+{
+ if (kbasep_debug_assert_registered_cb.func != NULL)
+ kbasep_debug_assert_registered_cb.func(kbasep_debug_assert_registered_cb.param);
+}
+KBASE_EXPORT_SYMBOL(kbasep_debug_assert_call_hook);
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_debug.h b/drivers/gpu/arm_gpu/mali_kbase_debug.h
new file mode 100644
index 000000000000..5fff2892bb55
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_debug.h
@@ -0,0 +1,164 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_DEBUG_H
+#define _KBASE_DEBUG_H
+
+#include <linux/bug.h>
+
+/** @brief If equals to 0, a trace containing the file, line, and function will be displayed before each message. */
+#define KBASE_DEBUG_SKIP_TRACE 0
+
+/** @brief If different from 0, the trace will only contain the file and line. */
+#define KBASE_DEBUG_SKIP_FUNCTION_NAME 0
+
+/** @brief Disable the asserts tests if set to 1. Default is to disable the asserts in release. */
+#ifndef KBASE_DEBUG_DISABLE_ASSERTS
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_DEBUG_DISABLE_ASSERTS 0
+#else
+#define KBASE_DEBUG_DISABLE_ASSERTS 1
+#endif
+#endif /* KBASE_DEBUG_DISABLE_ASSERTS */
+
+/** Function type that is called on an KBASE_DEBUG_ASSERT() or KBASE_DEBUG_ASSERT_MSG() */
+typedef void (kbase_debug_assert_hook) (void *);
+
+struct kbasep_debug_assert_cb {
+ kbase_debug_assert_hook *func;
+ void *param;
+};
+
+/**
+ * @def KBASEP_DEBUG_PRINT_TRACE
+ * @brief Private macro containing the format of the trace to display before every message
+ * @sa KBASE_DEBUG_SKIP_TRACE, KBASE_DEBUG_SKIP_FUNCTION_NAME
+ */
+#if !KBASE_DEBUG_SKIP_TRACE
+#define KBASEP_DEBUG_PRINT_TRACE \
+ "In file: " __FILE__ " line: " CSTD_STR2(__LINE__)
+#if !KBASE_DEBUG_SKIP_FUNCTION_NAME
+#define KBASEP_DEBUG_PRINT_FUNCTION __func__
+#else
+#define KBASEP_DEBUG_PRINT_FUNCTION ""
+#endif
+#else
+#define KBASEP_DEBUG_PRINT_TRACE ""
+#endif
+
+/**
+ * @def KBASEP_DEBUG_ASSERT_OUT(trace, function, ...)
+ * @brief (Private) system printing function associated to the @see KBASE_DEBUG_ASSERT_MSG event.
+ * @param trace location in the code from where the message is printed
+ * @param function function from where the message is printed
+ * @param ... Format string followed by format arguments.
+ * @note function parameter cannot be concatenated with other strings
+ */
+/* Select the correct system output function*/
+#ifdef CONFIG_MALI_DEBUG
+#define KBASEP_DEBUG_ASSERT_OUT(trace, function, ...)\
+ do { \
+ pr_err("Mali<ASSERT>: %s function:%s ", trace, function);\
+ pr_err(__VA_ARGS__);\
+ pr_err("\n");\
+ } while (false)
+#else
+#define KBASEP_DEBUG_ASSERT_OUT(trace, function, ...) CSTD_NOP()
+#endif
+
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_CALL_ASSERT_HOOK() kbasep_debug_assert_call_hook()
+#else
+#define KBASE_CALL_ASSERT_HOOK() CSTD_NOP()
+#endif
+
+/**
+ * @def KBASE_DEBUG_ASSERT(expr)
+ * @brief Calls @see KBASE_PRINT_ASSERT and prints the expression @a expr if @a expr is false
+ *
+ * @note This macro does nothing if the flag @see KBASE_DEBUG_DISABLE_ASSERTS is set to 1
+ *
+ * @param expr Boolean expression
+ */
+#define KBASE_DEBUG_ASSERT(expr) \
+ KBASE_DEBUG_ASSERT_MSG(expr, #expr)
+
+#if KBASE_DEBUG_DISABLE_ASSERTS
+#define KBASE_DEBUG_ASSERT_MSG(expr, ...) CSTD_NOP()
+#else
+ /**
+ * @def KBASE_DEBUG_ASSERT_MSG(expr, ...)
+ * @brief Calls @see KBASEP_DEBUG_ASSERT_OUT and prints the given message if @a expr is false
+ *
+ * @note This macro does nothing if the flag @see KBASE_DEBUG_DISABLE_ASSERTS is set to 1
+ *
+ * @param expr Boolean expression
+ * @param ... Message to display when @a expr is false, as a format string followed by format arguments.
+ */
+#define KBASE_DEBUG_ASSERT_MSG(expr, ...) \
+ do { \
+ if (!(expr)) { \
+ KBASEP_DEBUG_ASSERT_OUT(KBASEP_DEBUG_PRINT_TRACE, KBASEP_DEBUG_PRINT_FUNCTION, __VA_ARGS__);\
+ KBASE_CALL_ASSERT_HOOK();\
+ BUG();\
+ } \
+ } while (false)
+#endif /* KBASE_DEBUG_DISABLE_ASSERTS */
+
+/**
+ * @def KBASE_DEBUG_CODE( X )
+ * @brief Executes the code inside the macro only in debug mode
+ *
+ * @param X Code to compile only in debug mode.
+ */
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_DEBUG_CODE(X) X
+#else
+#define KBASE_DEBUG_CODE(X) CSTD_NOP()
+#endif /* CONFIG_MALI_DEBUG */
+
+/** @} */
+
+/**
+ * @brief Register a function to call on ASSERT
+ *
+ * Such functions will \b only be called during Debug mode, and for debugging
+ * features \b only. Do not rely on them to be called in general use.
+ *
+ * To disable the hook, supply NULL to \a func.
+ *
+ * @note This function is not thread-safe, and should only be used to
+ * register/deregister once in the module's lifetime.
+ *
+ * @param[in] func the function to call when an assert is triggered.
+ * @param[in] param the parameter to pass to \a func when calling it
+ */
+void kbase_debug_assert_register_hook(kbase_debug_assert_hook *func, void *param);
+
+/**
+ * @brief Call a debug assert hook previously registered with kbase_debug_assert_register_hook()
+ *
+ * @note This function is not thread-safe with respect to multiple threads
+ * registering functions and parameters with
+ * kbase_debug_assert_register_hook(). Otherwise, thread safety is the
+ * responsibility of the registered hook.
+ */
+void kbasep_debug_assert_call_hook(void);
+
+#endif /* _KBASE_DEBUG_H */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_debug_job_fault.c b/drivers/gpu/arm_gpu/mali_kbase_debug_job_fault.c
new file mode 100644
index 000000000000..f29430ddf8f9
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_debug_job_fault.c
@@ -0,0 +1,499 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <linux/spinlock.h>
+#include <mali_kbase_hwaccess_jm.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+static bool kbase_is_job_fault_event_pending(struct kbase_device *kbdev)
+{
+ struct list_head *event_list = &kbdev->job_fault_event_list;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ ret = !list_empty(event_list);
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+ return ret;
+}
+
+static bool kbase_ctx_has_no_event_pending(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct list_head *event_list = &kctx->kbdev->job_fault_event_list;
+ struct base_job_fault_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ if (list_empty(event_list)) {
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+ return true;
+ }
+ list_for_each_entry(event, event_list, head) {
+ if (event->katom->kctx == kctx) {
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock,
+ flags);
+ return false;
+ }
+ }
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+ return true;
+}
+
+/* wait until the fault happen and copy the event */
+static int kbase_job_fault_event_wait(struct kbase_device *kbdev,
+ struct base_job_fault_event *event)
+{
+ struct list_head *event_list = &kbdev->job_fault_event_list;
+ struct base_job_fault_event *event_in;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ if (list_empty(event_list)) {
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+ if (wait_event_interruptible(kbdev->job_fault_wq,
+ kbase_is_job_fault_event_pending(kbdev)))
+ return -ERESTARTSYS;
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ }
+
+ event_in = list_entry(event_list->next,
+ struct base_job_fault_event, head);
+ event->event_code = event_in->event_code;
+ event->katom = event_in->katom;
+
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+ return 0;
+
+}
+
+/* remove the event from the queue */
+static struct base_job_fault_event *kbase_job_fault_event_dequeue(
+ struct kbase_device *kbdev, struct list_head *event_list)
+{
+ struct base_job_fault_event *event;
+
+ event = list_entry(event_list->next,
+ struct base_job_fault_event, head);
+ list_del(event_list->next);
+
+ return event;
+
+}
+
+/* Remove all the following atoms after the failed atom in the same context
+ * Call the postponed bottom half of job done.
+ * Then, this context could be rescheduled.
+ */
+static void kbase_job_fault_resume_event_cleanup(struct kbase_context *kctx)
+{
+ struct list_head *event_list = &kctx->job_fault_resume_event_list;
+
+ while (!list_empty(event_list)) {
+ struct base_job_fault_event *event;
+
+ event = kbase_job_fault_event_dequeue(kctx->kbdev,
+ &kctx->job_fault_resume_event_list);
+ kbase_jd_done_worker(&event->katom->work);
+ }
+
+}
+
+/* Remove all the failed atoms that belong to different contexts
+ * Resume all the contexts that were suspend due to failed job
+ */
+static void kbase_job_fault_event_cleanup(struct kbase_device *kbdev)
+{
+ struct list_head *event_list = &kbdev->job_fault_event_list;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ while (!list_empty(event_list)) {
+ kbase_job_fault_event_dequeue(kbdev, event_list);
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+ wake_up(&kbdev->job_fault_resume_wq);
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ }
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+}
+
+static void kbase_job_fault_resume_worker(struct work_struct *data)
+{
+ struct base_job_fault_event *event = container_of(data,
+ struct base_job_fault_event, job_fault_work);
+ struct kbase_context *kctx;
+ struct kbase_jd_atom *katom;
+
+ katom = event->katom;
+ kctx = katom->kctx;
+
+ dev_info(kctx->kbdev->dev, "Job dumping wait\n");
+
+ /* When it was waked up, it need to check if queue is empty or the
+ * failed atom belongs to different context. If yes, wake up. Both
+ * of them mean the failed job has been dumped. Please note, it
+ * should never happen that the job_fault_event_list has the two
+ * atoms belong to the same context.
+ */
+ wait_event(kctx->kbdev->job_fault_resume_wq,
+ kbase_ctx_has_no_event_pending(kctx));
+
+ atomic_set(&kctx->job_fault_count, 0);
+ kbase_jd_done_worker(&katom->work);
+
+ /* In case the following atoms were scheduled during failed job dump
+ * the job_done_worker was held. We need to rerun it after the dump
+ * was finished
+ */
+ kbase_job_fault_resume_event_cleanup(kctx);
+
+ dev_info(kctx->kbdev->dev, "Job dumping finish, resume scheduler\n");
+}
+
+static struct base_job_fault_event *kbase_job_fault_event_queue(
+ struct list_head *event_list,
+ struct kbase_jd_atom *atom,
+ u32 completion_code)
+{
+ struct base_job_fault_event *event;
+
+ event = &atom->fault_event;
+
+ event->katom = atom;
+ event->event_code = completion_code;
+
+ list_add_tail(&event->head, event_list);
+
+ return event;
+
+}
+
+static void kbase_job_fault_event_post(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom, u32 completion_code)
+{
+ struct base_job_fault_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ event = kbase_job_fault_event_queue(&kbdev->job_fault_event_list,
+ katom, completion_code);
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+ wake_up_interruptible(&kbdev->job_fault_wq);
+
+ INIT_WORK(&event->job_fault_work, kbase_job_fault_resume_worker);
+ queue_work(kbdev->job_fault_resume_workq, &event->job_fault_work);
+
+ dev_info(katom->kctx->kbdev->dev, "Job fault happen, start dump: %d_%d",
+ katom->kctx->tgid, katom->kctx->id);
+
+}
+
+/*
+ * This function will process the job fault
+ * Get the register copy
+ * Send the failed job dump event
+ * Create a Wait queue to wait until the job dump finish
+ */
+
+bool kbase_debug_job_fault_process(struct kbase_jd_atom *katom,
+ u32 completion_code)
+{
+ struct kbase_context *kctx = katom->kctx;
+
+ /* Check if dumping is in the process
+ * only one atom of each context can be dumped at the same time
+ * If the atom belongs to different context, it can be dumped
+ */
+ if (atomic_read(&kctx->job_fault_count) > 0) {
+ kbase_job_fault_event_queue(
+ &kctx->job_fault_resume_event_list,
+ katom, completion_code);
+ dev_info(kctx->kbdev->dev, "queue:%d\n",
+ kbase_jd_atom_id(kctx, katom));
+ return true;
+ }
+
+ if (kctx->kbdev->job_fault_debug == true) {
+
+ if (completion_code != BASE_JD_EVENT_DONE) {
+
+ if (kbase_job_fault_get_reg_snapshot(kctx) == false) {
+ dev_warn(kctx->kbdev->dev, "get reg dump failed\n");
+ return false;
+ }
+
+ kbase_job_fault_event_post(kctx->kbdev, katom,
+ completion_code);
+ atomic_inc(&kctx->job_fault_count);
+ dev_info(kctx->kbdev->dev, "post:%d\n",
+ kbase_jd_atom_id(kctx, katom));
+ return true;
+
+ }
+ }
+ return false;
+
+}
+
+static int debug_job_fault_show(struct seq_file *m, void *v)
+{
+ struct kbase_device *kbdev = m->private;
+ struct base_job_fault_event *event = (struct base_job_fault_event *)v;
+ struct kbase_context *kctx = event->katom->kctx;
+ int i;
+
+ dev_info(kbdev->dev, "debug job fault seq show:%d_%d, %d",
+ kctx->tgid, kctx->id, event->reg_offset);
+
+ if (kctx->reg_dump == NULL) {
+ dev_warn(kbdev->dev, "reg dump is NULL");
+ return -1;
+ }
+
+ if (kctx->reg_dump[event->reg_offset] ==
+ REGISTER_DUMP_TERMINATION_FLAG) {
+ /* Return the error here to stop the read. And the
+ * following next() will not be called. The stop can
+ * get the real event resource and release it
+ */
+ return -1;
+ }
+
+ if (event->reg_offset == 0)
+ seq_printf(m, "%d_%d\n", kctx->tgid, kctx->id);
+
+ for (i = 0; i < 50; i++) {
+ if (kctx->reg_dump[event->reg_offset] ==
+ REGISTER_DUMP_TERMINATION_FLAG) {
+ break;
+ }
+ seq_printf(m, "%08x: %08x\n",
+ kctx->reg_dump[event->reg_offset],
+ kctx->reg_dump[1+event->reg_offset]);
+ event->reg_offset += 2;
+
+ }
+
+
+ return 0;
+}
+static void *debug_job_fault_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct kbase_device *kbdev = m->private;
+ struct base_job_fault_event *event = (struct base_job_fault_event *)v;
+
+ dev_info(kbdev->dev, "debug job fault seq next:%d, %d",
+ event->reg_offset, (int)*pos);
+
+ return event;
+}
+
+static void *debug_job_fault_start(struct seq_file *m, loff_t *pos)
+{
+ struct kbase_device *kbdev = m->private;
+ struct base_job_fault_event *event;
+
+ dev_info(kbdev->dev, "fault job seq start:%d", (int)*pos);
+
+ /* The condition is trick here. It needs make sure the
+ * fault hasn't happened and the dumping hasn't been started,
+ * or the dumping has finished
+ */
+ if (*pos == 0) {
+ event = kmalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return NULL;
+ event->reg_offset = 0;
+ if (kbase_job_fault_event_wait(kbdev, event)) {
+ kfree(event);
+ return NULL;
+ }
+
+ /* The cache flush workaround is called in bottom half of
+ * job done but we delayed it. Now we should clean cache
+ * earlier. Then the GPU memory dump should be correct.
+ */
+ kbase_backend_cacheclean(kbdev, event->katom);
+ } else
+ return NULL;
+
+ return event;
+}
+
+static void debug_job_fault_stop(struct seq_file *m, void *v)
+{
+ struct kbase_device *kbdev = m->private;
+
+ /* here we wake up the kbase_jd_done_worker after stop, it needs
+ * get the memory dump before the register dump in debug daemon,
+ * otherwise, the memory dump may be incorrect.
+ */
+
+ if (v != NULL) {
+ kfree(v);
+ dev_info(kbdev->dev, "debug job fault seq stop stage 1");
+
+ } else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ if (!list_empty(&kbdev->job_fault_event_list)) {
+ kbase_job_fault_event_dequeue(kbdev,
+ &kbdev->job_fault_event_list);
+ wake_up(&kbdev->job_fault_resume_wq);
+ }
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+ dev_info(kbdev->dev, "debug job fault seq stop stage 2");
+ }
+
+}
+
+static const struct seq_operations ops = {
+ .start = debug_job_fault_start,
+ .next = debug_job_fault_next,
+ .stop = debug_job_fault_stop,
+ .show = debug_job_fault_show,
+};
+
+static int debug_job_fault_open(struct inode *in, struct file *file)
+{
+ struct kbase_device *kbdev = in->i_private;
+
+ seq_open(file, &ops);
+
+ ((struct seq_file *)file->private_data)->private = kbdev;
+ dev_info(kbdev->dev, "debug job fault seq open");
+
+ kbdev->job_fault_debug = true;
+
+ return 0;
+
+}
+
+static int debug_job_fault_release(struct inode *in, struct file *file)
+{
+ struct kbase_device *kbdev = in->i_private;
+
+ seq_release(in, file);
+
+ kbdev->job_fault_debug = false;
+
+ /* Clean the unprocessed job fault. After that, all the suspended
+ * contexts could be rescheduled.
+ */
+ kbase_job_fault_event_cleanup(kbdev);
+
+ dev_info(kbdev->dev, "debug job fault seq close");
+
+ return 0;
+}
+
+static const struct file_operations kbasep_debug_job_fault_fops = {
+ .open = debug_job_fault_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = debug_job_fault_release,
+};
+
+/*
+ * Initialize debugfs entry for job fault dump
+ */
+void kbase_debug_job_fault_debugfs_init(struct kbase_device *kbdev)
+{
+ debugfs_create_file("job_fault", S_IRUGO,
+ kbdev->mali_debugfs_directory, kbdev,
+ &kbasep_debug_job_fault_fops);
+}
+
+
+int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev)
+{
+
+ INIT_LIST_HEAD(&kbdev->job_fault_event_list);
+
+ init_waitqueue_head(&(kbdev->job_fault_wq));
+ init_waitqueue_head(&(kbdev->job_fault_resume_wq));
+ spin_lock_init(&kbdev->job_fault_event_lock);
+
+ kbdev->job_fault_resume_workq = alloc_workqueue(
+ "kbase_job_fault_resume_work_queue", WQ_MEM_RECLAIM, 1);
+ if (!kbdev->job_fault_resume_workq)
+ return -ENOMEM;
+
+ kbdev->job_fault_debug = false;
+
+ return 0;
+}
+
+/*
+ * Release the relevant resource per device
+ */
+void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev)
+{
+ destroy_workqueue(kbdev->job_fault_resume_workq);
+}
+
+
+/*
+ * Initialize the relevant data structure per context
+ */
+void kbase_debug_job_fault_context_init(struct kbase_context *kctx)
+{
+
+ /* We need allocate double size register range
+ * Because this memory will keep the register address and value
+ */
+ kctx->reg_dump = vmalloc(0x4000 * 2);
+ if (kctx->reg_dump == NULL)
+ return;
+
+ if (kbase_debug_job_fault_reg_snapshot_init(kctx, 0x4000) == false) {
+ vfree(kctx->reg_dump);
+ kctx->reg_dump = NULL;
+ }
+ INIT_LIST_HEAD(&kctx->job_fault_resume_event_list);
+ atomic_set(&kctx->job_fault_count, 0);
+
+}
+
+/*
+ * release the relevant resource per context
+ */
+void kbase_debug_job_fault_context_term(struct kbase_context *kctx)
+{
+ vfree(kctx->reg_dump);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev)
+{
+ kbdev->job_fault_debug = false;
+
+ return 0;
+}
+
+void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_debug_job_fault.h b/drivers/gpu/arm_gpu/mali_kbase_debug_job_fault.h
new file mode 100644
index 000000000000..a2bf8983c37c
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_debug_job_fault.h
@@ -0,0 +1,96 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_DEBUG_JOB_FAULT_H
+#define _KBASE_DEBUG_JOB_FAULT_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define REGISTER_DUMP_TERMINATION_FLAG 0xFFFFFFFF
+
+/**
+ * kbase_debug_job_fault_dev_init - Create the fault event wait queue
+ * per device and initialize the required lists.
+ * @kbdev: Device pointer
+ *
+ * Return: Zero on success or a negative error code.
+ */
+int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_debug_job_fault_debugfs_init - Initialize job fault debug sysfs
+ * @kbdev: Device pointer
+ */
+void kbase_debug_job_fault_debugfs_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_debug_job_fault_dev_term - Clean up resources created in
+ * kbase_debug_job_fault_dev_init.
+ * @kbdev: Device pointer
+ */
+void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_debug_job_fault_context_init - Initialize the relevant
+ * data structure per context
+ * @kctx: KBase context pointer
+ */
+void kbase_debug_job_fault_context_init(struct kbase_context *kctx);
+
+/**
+ * kbase_debug_job_fault_context_term - Release the relevant
+ * resource per context
+ * @kctx: KBase context pointer
+ */
+void kbase_debug_job_fault_context_term(struct kbase_context *kctx);
+
+/**
+ * kbase_debug_job_fault_process - Process the failed job.
+ * It will send a event and wake up the job fault waiting queue
+ * Then create a work queue to wait for job dump finish
+ * This function should be called in the interrupt handler and before
+ * jd_done that make sure the jd_done_worker will be delayed until the
+ * job dump finish
+ * @katom: The failed atom pointer
+ * @completion_code: the job status
+ * @return true if dump is going on
+ */
+bool kbase_debug_job_fault_process(struct kbase_jd_atom *katom,
+ u32 completion_code);
+
+
+/**
+ * kbase_debug_job_fault_reg_snapshot_init - Set the interested registers
+ * address during the job fault process, the relevant registers will
+ * be saved when a job fault happen
+ * @kctx: KBase context pointer
+ * @reg_range: Maximum register address space
+ * @return true if initializing successfully
+ */
+bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx,
+ int reg_range);
+
+/**
+ * kbase_job_fault_get_reg_snapshot - Read the interested registers for
+ * failed job dump
+ * @kctx: KBase context pointer
+ * @return true if getting registers successfully
+ */
+bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx);
+
+#endif /*_KBASE_DEBUG_JOB_FAULT_H*/
diff --git a/drivers/gpu/arm_gpu/mali_kbase_debug_mem_view.c b/drivers/gpu/arm_gpu/mali_kbase_debug_mem_view.c
new file mode 100644
index 000000000000..aa271566e917
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_debug_mem_view.c
@@ -0,0 +1,306 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Debugfs interface to dump the memory visible to the GPU
+ */
+
+#include "mali_kbase_debug_mem_view.h"
+#include "mali_kbase.h"
+
+#include <linux/list.h>
+#include <linux/file.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+struct debug_mem_mapping {
+ struct list_head node;
+
+ struct kbase_mem_phy_alloc *alloc;
+ unsigned long flags;
+
+ u64 start_pfn;
+ size_t nr_pages;
+};
+
+struct debug_mem_data {
+ struct list_head mapping_list;
+ struct kbase_context *kctx;
+};
+
+struct debug_mem_seq_off {
+ struct list_head *lh;
+ size_t offset;
+};
+
+static void *debug_mem_start(struct seq_file *m, loff_t *_pos)
+{
+ struct debug_mem_data *mem_data = m->private;
+ struct debug_mem_seq_off *data;
+ struct debug_mem_mapping *map;
+ loff_t pos = *_pos;
+
+ list_for_each_entry(map, &mem_data->mapping_list, node) {
+ if (pos >= map->nr_pages) {
+ pos -= map->nr_pages;
+ } else {
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+ data->lh = &map->node;
+ data->offset = pos;
+ return data;
+ }
+ }
+
+ /* Beyond the end */
+ return NULL;
+}
+
+static void debug_mem_stop(struct seq_file *m, void *v)
+{
+ kfree(v);
+}
+
+static void *debug_mem_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct debug_mem_data *mem_data = m->private;
+ struct debug_mem_seq_off *data = v;
+ struct debug_mem_mapping *map;
+
+ map = list_entry(data->lh, struct debug_mem_mapping, node);
+
+ if (data->offset < map->nr_pages - 1) {
+ data->offset++;
+ ++*pos;
+ return data;
+ }
+
+ if (list_is_last(data->lh, &mem_data->mapping_list)) {
+ kfree(data);
+ return NULL;
+ }
+
+ data->lh = data->lh->next;
+ data->offset = 0;
+ ++*pos;
+
+ return data;
+}
+
+static int debug_mem_show(struct seq_file *m, void *v)
+{
+ struct debug_mem_data *mem_data = m->private;
+ struct debug_mem_seq_off *data = v;
+ struct debug_mem_mapping *map;
+ int i, j;
+ struct page *page;
+ uint32_t *mapping;
+ pgprot_t prot = PAGE_KERNEL;
+
+ map = list_entry(data->lh, struct debug_mem_mapping, node);
+
+ kbase_gpu_vm_lock(mem_data->kctx);
+
+ if (data->offset >= map->alloc->nents) {
+ seq_printf(m, "%016llx: Unbacked page\n\n", (map->start_pfn +
+ data->offset) << PAGE_SHIFT);
+ goto out;
+ }
+
+ if (!(map->flags & KBASE_REG_CPU_CACHED))
+ prot = pgprot_writecombine(prot);
+
+ page = phys_to_page(as_phys_addr_t(map->alloc->pages[data->offset]));
+ mapping = vmap(&page, 1, VM_MAP, prot);
+ if (!mapping)
+ goto out;
+
+ for (i = 0; i < PAGE_SIZE; i += 4*sizeof(*mapping)) {
+ seq_printf(m, "%016llx:", i + ((map->start_pfn +
+ data->offset) << PAGE_SHIFT));
+
+ for (j = 0; j < 4*sizeof(*mapping); j += sizeof(*mapping))
+ seq_printf(m, " %08x", mapping[(i+j)/sizeof(*mapping)]);
+ seq_putc(m, '\n');
+ }
+
+ vunmap(mapping);
+
+ seq_putc(m, '\n');
+
+out:
+ kbase_gpu_vm_unlock(mem_data->kctx);
+ return 0;
+}
+
+static const struct seq_operations ops = {
+ .start = debug_mem_start,
+ .next = debug_mem_next,
+ .stop = debug_mem_stop,
+ .show = debug_mem_show,
+};
+
+static int debug_mem_zone_open(struct rb_root *rbtree,
+ struct debug_mem_data *mem_data)
+{
+ int ret = 0;
+ struct rb_node *p;
+ struct kbase_va_region *reg;
+ struct debug_mem_mapping *mapping;
+
+ for (p = rb_first(rbtree); p; p = rb_next(p)) {
+ reg = rb_entry(p, struct kbase_va_region, rblink);
+
+ if (reg->gpu_alloc == NULL)
+ /* Empty region - ignore */
+ continue;
+
+ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mapping->alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+ mapping->start_pfn = reg->start_pfn;
+ mapping->nr_pages = reg->nr_pages;
+ mapping->flags = reg->flags;
+ list_add_tail(&mapping->node, &mem_data->mapping_list);
+ }
+
+out:
+ return ret;
+}
+
+static int debug_mem_open(struct inode *i, struct file *file)
+{
+ struct file *kctx_file = i->i_private;
+ struct kbase_context *kctx = kctx_file->private_data;
+ struct debug_mem_data *mem_data;
+ int ret;
+
+ ret = seq_open(file, &ops);
+ if (ret)
+ return ret;
+
+ mem_data = kmalloc(sizeof(*mem_data), GFP_KERNEL);
+ if (!mem_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mem_data->kctx = kctx;
+
+ INIT_LIST_HEAD(&mem_data->mapping_list);
+
+ get_file(kctx_file);
+
+ kbase_gpu_vm_lock(kctx);
+
+ ret = debug_mem_zone_open(&kctx->reg_rbtree_same, mem_data);
+ if (0 != ret) {
+ kbase_gpu_vm_unlock(kctx);
+ goto out;
+ }
+
+ ret = debug_mem_zone_open(&kctx->reg_rbtree_exec, mem_data);
+ if (0 != ret) {
+ kbase_gpu_vm_unlock(kctx);
+ goto out;
+ }
+
+ ret = debug_mem_zone_open(&kctx->reg_rbtree_custom, mem_data);
+ if (0 != ret) {
+ kbase_gpu_vm_unlock(kctx);
+ goto out;
+ }
+
+ kbase_gpu_vm_unlock(kctx);
+
+ ((struct seq_file *)file->private_data)->private = mem_data;
+
+ return 0;
+
+out:
+ if (mem_data) {
+ while (!list_empty(&mem_data->mapping_list)) {
+ struct debug_mem_mapping *mapping;
+
+ mapping = list_first_entry(&mem_data->mapping_list,
+ struct debug_mem_mapping, node);
+ kbase_mem_phy_alloc_put(mapping->alloc);
+ list_del(&mapping->node);
+ kfree(mapping);
+ }
+ fput(kctx_file);
+ kfree(mem_data);
+ }
+ seq_release(i, file);
+ return ret;
+}
+
+static int debug_mem_release(struct inode *inode, struct file *file)
+{
+ struct file *kctx_file = inode->i_private;
+ struct seq_file *sfile = file->private_data;
+ struct debug_mem_data *mem_data = sfile->private;
+ struct debug_mem_mapping *mapping;
+
+ seq_release(inode, file);
+
+ while (!list_empty(&mem_data->mapping_list)) {
+ mapping = list_first_entry(&mem_data->mapping_list,
+ struct debug_mem_mapping, node);
+ kbase_mem_phy_alloc_put(mapping->alloc);
+ list_del(&mapping->node);
+ kfree(mapping);
+ }
+
+ kfree(mem_data);
+
+ fput(kctx_file);
+
+ return 0;
+}
+
+static const struct file_operations kbase_debug_mem_view_fops = {
+ .open = debug_mem_open,
+ .release = debug_mem_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+/**
+ * kbase_debug_mem_view_init - Initialise the mem_view sysfs file
+ * @kctx_file: The /dev/mali0 file instance for the context
+ *
+ * This function creates a "mem_view" file which can be used to get a view of
+ * the context's memory as the GPU sees it (i.e. using the GPU's page tables).
+ *
+ * The file is cleaned up by a call to debugfs_remove_recursive() deleting the
+ * parent directory.
+ */
+void kbase_debug_mem_view_init(struct file *kctx_file)
+{
+ struct kbase_context *kctx = kctx_file->private_data;
+
+ debugfs_create_file("mem_view", S_IRUGO, kctx->kctx_dentry, kctx_file,
+ &kbase_debug_mem_view_fops);
+}
+
+#endif
diff --git a/drivers/gpu/arm_gpu/mali_kbase_debug_mem_view.h b/drivers/gpu/arm_gpu/mali_kbase_debug_mem_view.h
new file mode 100644
index 000000000000..20ab51a776c6
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_debug_mem_view.h
@@ -0,0 +1,25 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_DEBUG_MEM_VIEW_H
+#define _KBASE_DEBUG_MEM_VIEW_H
+
+#include <mali_kbase.h>
+
+void kbase_debug_mem_view_init(struct file *kctx_file);
+
+#endif
diff --git a/drivers/gpu/arm_gpu/mali_kbase_defs.h b/drivers/gpu/arm_gpu/mali_kbase_defs.h
new file mode 100644
index 000000000000..fae88f609266
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_defs.h
@@ -0,0 +1,1625 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_defs.h
+ *
+ * Defintions (types, defines, etcs) common to Kbase. They are placed here to
+ * allow the hierarchy of header files to work.
+ */
+
+#ifndef _KBASE_DEFS_H_
+#define _KBASE_DEFS_H_
+
+#include <mali_kbase_config.h>
+#include <mali_base_hwconfig_features.h>
+#include <mali_base_hwconfig_issues.h>
+#include <mali_kbase_mem_lowlevel.h>
+#include <mali_kbase_mmu_hw.h>
+#include <mali_kbase_mmu_mode.h>
+#include <mali_kbase_instr_defs.h>
+#include <mali_kbase_pm.h>
+#include <protected_mode_switcher.h>
+
+#include <linux/atomic.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/sizes.h>
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+#include <linux/bus_logger.h>
+#endif
+
+
+#ifdef CONFIG_KDS
+#include <linux/kds.h>
+#endif /* CONFIG_KDS */
+
+#if defined(CONFIG_SYNC)
+#include <sync.h>
+#else
+#include "mali_kbase_fence_defs.h"
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif /* CONFIG_DEBUG_FS */
+
+#ifdef CONFIG_MALI_DEVFREQ
+#include <linux/devfreq.h>
+#endif /* CONFIG_MALI_DEVFREQ */
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#if defined(CONFIG_PM_RUNTIME) || \
+ (defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+#define KBASE_PM_RUNTIME 1
+#endif
+
+/** Enable SW tracing when set */
+#ifdef CONFIG_MALI_MIDGARD_ENABLE_TRACE
+#define KBASE_TRACE_ENABLE 1
+#endif
+
+#ifndef KBASE_TRACE_ENABLE
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_TRACE_ENABLE 1
+#else
+#define KBASE_TRACE_ENABLE 0
+#endif /* CONFIG_MALI_DEBUG */
+#endif /* KBASE_TRACE_ENABLE */
+
+/** Dump Job slot trace on error (only active if KBASE_TRACE_ENABLE != 0) */
+#define KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR 1
+
+/**
+ * Number of milliseconds before resetting the GPU when a job cannot be "zapped" from the hardware.
+ * Note that the time is actually ZAP_TIMEOUT+SOFT_STOP_RESET_TIMEOUT between the context zap starting and the GPU
+ * actually being reset to give other contexts time for their jobs to be soft-stopped and removed from the hardware
+ * before resetting.
+ */
+#define ZAP_TIMEOUT 1000
+
+/** Number of milliseconds before we time out on a GPU soft/hard reset */
+#define RESET_TIMEOUT 500
+
+/**
+ * Prevent soft-stops from occuring in scheduling situations
+ *
+ * This is not due to HW issues, but when scheduling is desired to be more predictable.
+ *
+ * Therefore, soft stop may still be disabled due to HW issues.
+ *
+ * @note Soft stop will still be used for non-scheduling purposes e.g. when terminating a context.
+ *
+ * @note if not in use, define this value to 0 instead of \#undef'ing it
+ */
+#define KBASE_DISABLE_SCHEDULING_SOFT_STOPS 0
+
+/**
+ * Prevent hard-stops from occuring in scheduling situations
+ *
+ * This is not due to HW issues, but when scheduling is desired to be more predictable.
+ *
+ * @note Hard stop will still be used for non-scheduling purposes e.g. when terminating a context.
+ *
+ * @note if not in use, define this value to 0 instead of \#undef'ing it
+ */
+#define KBASE_DISABLE_SCHEDULING_HARD_STOPS 0
+
+/**
+ * The maximum number of Job Slots to support in the Hardware.
+ *
+ * You can optimize this down if your target devices will only ever support a
+ * small number of job slots.
+ */
+#define BASE_JM_MAX_NR_SLOTS 3
+
+/**
+ * The maximum number of Address Spaces to support in the Hardware.
+ *
+ * You can optimize this down if your target devices will only ever support a
+ * small number of Address Spaces
+ */
+#define BASE_MAX_NR_AS 16
+
+/* mmu */
+#define MIDGARD_MMU_VA_BITS 48
+
+#define MIDGARD_MMU_LEVEL(x) (x)
+
+#if MIDGARD_MMU_VA_BITS > 39
+#define MIDGARD_MMU_TOPLEVEL MIDGARD_MMU_LEVEL(0)
+#else
+#define MIDGARD_MMU_TOPLEVEL MIDGARD_MMU_LEVEL(1)
+#endif
+
+#define MIDGARD_MMU_BOTTOMLEVEL MIDGARD_MMU_LEVEL(3)
+
+#define GROWABLE_FLAGS_REQUIRED (KBASE_REG_PF_GROW | KBASE_REG_GPU_WR)
+
+/** setting in kbase_context::as_nr that indicates it's invalid */
+#define KBASEP_AS_NR_INVALID (-1)
+
+#define KBASE_LOCK_REGION_MAX_SIZE (63)
+#define KBASE_LOCK_REGION_MIN_SIZE (11)
+
+#define KBASE_TRACE_SIZE_LOG2 8 /* 256 entries */
+#define KBASE_TRACE_SIZE (1 << KBASE_TRACE_SIZE_LOG2)
+#define KBASE_TRACE_MASK ((1 << KBASE_TRACE_SIZE_LOG2)-1)
+
+#include "mali_kbase_js_defs.h"
+#include "mali_kbase_hwaccess_defs.h"
+
+#define KBASEP_FORCE_REPLAY_DISABLED 0
+
+/* Maximum force replay limit when randomization is enabled */
+#define KBASEP_FORCE_REPLAY_RANDOM_LIMIT 16
+
+/** Atom has been previously soft-stoppped */
+#define KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED (1<<1)
+/** Atom has been previously retried to execute */
+#define KBASE_KATOM_FLAGS_RERUN (1<<2)
+#define KBASE_KATOM_FLAGS_JOBCHAIN (1<<3)
+/** Atom has been previously hard-stopped. */
+#define KBASE_KATOM_FLAG_BEEN_HARD_STOPPED (1<<4)
+/** Atom has caused us to enter disjoint state */
+#define KBASE_KATOM_FLAG_IN_DISJOINT (1<<5)
+/* Atom blocked on cross-slot dependency */
+#define KBASE_KATOM_FLAG_X_DEP_BLOCKED (1<<7)
+/* Atom has fail dependency on cross-slot dependency */
+#define KBASE_KATOM_FLAG_FAIL_BLOCKER (1<<8)
+/* Atom is currently in the list of atoms blocked on cross-slot dependencies */
+#define KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST (1<<9)
+/* Atom is currently holding a context reference */
+#define KBASE_KATOM_FLAG_HOLDING_CTX_REF (1<<10)
+/* Atom requires GPU to be in protected mode */
+#define KBASE_KATOM_FLAG_PROTECTED (1<<11)
+/* Atom has been stored in runnable_tree */
+#define KBASE_KATOM_FLAG_JSCTX_IN_TREE (1<<12)
+
+/* SW related flags about types of JS_COMMAND action
+ * NOTE: These must be masked off by JS_COMMAND_MASK */
+
+/** This command causes a disjoint event */
+#define JS_COMMAND_SW_CAUSES_DISJOINT 0x100
+
+/** Bitmask of all SW related flags */
+#define JS_COMMAND_SW_BITS (JS_COMMAND_SW_CAUSES_DISJOINT)
+
+#if (JS_COMMAND_SW_BITS & JS_COMMAND_MASK)
+#error JS_COMMAND_SW_BITS not masked off by JS_COMMAND_MASK. Must update JS_COMMAND_SW_<..> bitmasks
+#endif
+
+/** Soft-stop command that causes a Disjoint event. This of course isn't
+ * entirely masked off by JS_COMMAND_MASK */
+#define JS_COMMAND_SOFT_STOP_WITH_SW_DISJOINT \
+ (JS_COMMAND_SW_CAUSES_DISJOINT | JS_COMMAND_SOFT_STOP)
+
+#define KBASEP_ATOM_ID_INVALID BASE_JD_ATOM_COUNT
+
+/* Serialize atoms within a slot (ie only one atom per job slot) */
+#define KBASE_SERIALIZE_INTRA_SLOT (1 << 0)
+/* Serialize atoms between slots (ie only one job slot running at any time) */
+#define KBASE_SERIALIZE_INTER_SLOT (1 << 1)
+/* Reset the GPU after each atom completion */
+#define KBASE_SERIALIZE_RESET (1 << 2)
+
+#ifdef CONFIG_DEBUG_FS
+struct base_job_fault_event {
+
+ u32 event_code;
+ struct kbase_jd_atom *katom;
+ struct work_struct job_fault_work;
+ struct list_head head;
+ int reg_offset;
+};
+
+#endif
+
+struct kbase_jd_atom_dependency {
+ struct kbase_jd_atom *atom;
+ u8 dep_type;
+};
+
+/**
+ * struct kbase_io_access - holds information about 1 register access
+ *
+ * @addr: first bit indicates r/w (r=0, w=1)
+ * @value: value written or read
+ */
+struct kbase_io_access {
+ uintptr_t addr;
+ u32 value;
+};
+
+/**
+ * struct kbase_io_history - keeps track of all recent register accesses
+ *
+ * @enabled: true if register accesses are recorded, false otherwise
+ * @lock: spinlock protecting kbase_io_access array
+ * @count: number of registers read/written
+ * @size: number of elements in kbase_io_access array
+ * @buf: array of kbase_io_access
+ */
+struct kbase_io_history {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ bool enabled;
+#else
+ u32 enabled;
+#endif
+
+ spinlock_t lock;
+ size_t count;
+ u16 size;
+ struct kbase_io_access *buf;
+};
+
+/**
+ * @brief The function retrieves a read-only reference to the atom field from
+ * the kbase_jd_atom_dependency structure
+ *
+ * @param[in] dep kbase jd atom dependency.
+ *
+ * @return readonly reference to dependent ATOM.
+ */
+static inline const struct kbase_jd_atom * kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency *dep)
+{
+ LOCAL_ASSERT(dep != NULL);
+
+ return (const struct kbase_jd_atom *)(dep->atom);
+}
+
+/**
+ * @brief The function retrieves a read-only reference to the dependency type field from
+ * the kbase_jd_atom_dependency structure
+ *
+ * @param[in] dep kbase jd atom dependency.
+ *
+ * @return A dependency type value.
+ */
+static inline u8 kbase_jd_katom_dep_type(const struct kbase_jd_atom_dependency *dep)
+{
+ LOCAL_ASSERT(dep != NULL);
+
+ return dep->dep_type;
+}
+
+/**
+ * @brief Setter macro for dep_atom array entry in kbase_jd_atom
+ *
+ * @param[in] dep The kbase jd atom dependency.
+ * @param[in] a The ATOM to be set as a dependency.
+ * @param type The ATOM dependency type to be set.
+ *
+ */
+static inline void kbase_jd_katom_dep_set(const struct kbase_jd_atom_dependency *const_dep,
+ struct kbase_jd_atom *a, u8 type)
+{
+ struct kbase_jd_atom_dependency *dep;
+
+ LOCAL_ASSERT(const_dep != NULL);
+
+ dep = (struct kbase_jd_atom_dependency *)const_dep;
+
+ dep->atom = a;
+ dep->dep_type = type;
+}
+
+/**
+ * @brief Setter macro for dep_atom array entry in kbase_jd_atom
+ *
+ * @param[in] dep The kbase jd atom dependency to be cleared.
+ *
+ */
+static inline void kbase_jd_katom_dep_clear(const struct kbase_jd_atom_dependency *const_dep)
+{
+ struct kbase_jd_atom_dependency *dep;
+
+ LOCAL_ASSERT(const_dep != NULL);
+
+ dep = (struct kbase_jd_atom_dependency *)const_dep;
+
+ dep->atom = NULL;
+ dep->dep_type = BASE_JD_DEP_TYPE_INVALID;
+}
+
+enum kbase_atom_gpu_rb_state {
+ /* Atom is not currently present in slot ringbuffer */
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB,
+ /* Atom is in slot ringbuffer but is blocked on a previous atom */
+ KBASE_ATOM_GPU_RB_WAITING_BLOCKED,
+ /* Atom is in slot ringbuffer but is waiting for a previous protected
+ * mode transition to complete */
+ KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV,
+ /* Atom is in slot ringbuffer but is waiting for proected mode
+ * transition */
+ KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION,
+ /* Atom is in slot ringbuffer but is waiting for cores to become
+ * available */
+ KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE,
+ /* Atom is in slot ringbuffer but is blocked on affinity */
+ KBASE_ATOM_GPU_RB_WAITING_AFFINITY,
+ /* Atom is in slot ringbuffer and ready to run */
+ KBASE_ATOM_GPU_RB_READY,
+ /* Atom is in slot ringbuffer and has been submitted to the GPU */
+ KBASE_ATOM_GPU_RB_SUBMITTED,
+ /* Atom must be returned to JS as soon as it reaches the head of the
+ * ringbuffer due to a previous failure */
+ KBASE_ATOM_GPU_RB_RETURN_TO_JS = -1
+};
+
+enum kbase_atom_enter_protected_state {
+ /*
+ * Starting state:
+ * Check if a transition into protected mode is required.
+ *
+ * NOTE: The integer value of this must
+ * match KBASE_ATOM_EXIT_PROTECTED_CHECK.
+ */
+ KBASE_ATOM_ENTER_PROTECTED_CHECK = 0,
+ /* Wait for vinstr to suspend. */
+ KBASE_ATOM_ENTER_PROTECTED_VINSTR,
+ /* Wait for the L2 to become idle in preparation for
+ * the coherency change. */
+ KBASE_ATOM_ENTER_PROTECTED_IDLE_L2,
+ /* End state;
+ * Prepare coherency change. */
+ KBASE_ATOM_ENTER_PROTECTED_FINISHED,
+};
+
+enum kbase_atom_exit_protected_state {
+ /*
+ * Starting state:
+ * Check if a transition out of protected mode is required.
+ *
+ * NOTE: The integer value of this must
+ * match KBASE_ATOM_ENTER_PROTECTED_CHECK.
+ */
+ KBASE_ATOM_EXIT_PROTECTED_CHECK = 0,
+ /* Wait for the L2 to become idle in preparation
+ * for the reset. */
+ KBASE_ATOM_EXIT_PROTECTED_IDLE_L2,
+ /* Issue the protected reset. */
+ KBASE_ATOM_EXIT_PROTECTED_RESET,
+ /* End state;
+ * Wait for the reset to complete. */
+ KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT,
+};
+
+struct kbase_ext_res {
+ u64 gpu_address;
+ struct kbase_mem_phy_alloc *alloc;
+};
+
+struct kbase_jd_atom {
+ struct work_struct work;
+ ktime_t start_timestamp;
+
+ struct base_jd_udata udata;
+ struct kbase_context *kctx;
+
+ struct list_head dep_head[2];
+ struct list_head dep_item[2];
+ const struct kbase_jd_atom_dependency dep[2];
+ /* List head used during job dispatch job_done processing - as
+ * dependencies may not be entirely resolved at this point, we need to
+ * use a separate list head. */
+ struct list_head jd_item;
+ /* true if atom's jd_item is currently on a list. Prevents atom being
+ * processed twice. */
+ bool in_jd_list;
+
+ u16 nr_extres;
+ struct kbase_ext_res *extres;
+
+ u32 device_nr;
+ u64 affinity;
+ u64 jc;
+ enum kbase_atom_coreref_state coreref_state;
+#ifdef CONFIG_KDS
+ struct list_head node;
+ struct kds_resource_set *kds_rset;
+ bool kds_dep_satisfied;
+#endif /* CONFIG_KDS */
+#if defined(CONFIG_SYNC)
+ /* Stores either an input or output fence, depending on soft-job type */
+ struct sync_fence *fence;
+ struct sync_fence_waiter sync_waiter;
+#endif /* CONFIG_SYNC */
+#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE)
+ struct {
+ /* Use the functions/API defined in mali_kbase_fence.h to
+ * when working with this sub struct */
+#if defined(CONFIG_SYNC_FILE)
+ /* Input fence */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence *fence_in;
+#else
+ struct dma_fence *fence_in;
+#endif
+#endif
+ /* This points to the dma-buf output fence for this atom. If
+ * this is NULL then there is no fence for this atom and the
+ * following fields related to dma_fence may have invalid data.
+ *
+ * The context and seqno fields contain the details for this
+ * fence.
+ *
+ * This fence is signaled when the katom is completed,
+ * regardless of the event_code of the katom (signal also on
+ * failure).
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence *fence;
+#else
+ struct dma_fence *fence;
+#endif
+ /* The dma-buf fence context number for this atom. A unique
+ * context number is allocated to each katom in the context on
+ * context creation.
+ */
+ unsigned int context;
+ /* The dma-buf fence sequence number for this atom. This is
+ * increased every time this katom uses dma-buf fence.
+ */
+ atomic_t seqno;
+ /* This contains a list of all callbacks set up to wait on
+ * other fences. This atom must be held back from JS until all
+ * these callbacks have been called and dep_count have reached
+ * 0. The initial value of dep_count must be equal to the
+ * number of callbacks on this list.
+ *
+ * This list is protected by jctx.lock. Callbacks are added to
+ * this list when the atom is built and the wait are set up.
+ * All the callbacks then stay on the list until all callbacks
+ * have been called and the atom is queued, or cancelled, and
+ * then all callbacks are taken off the list and freed.
+ */
+ struct list_head callbacks;
+ /* Atomic counter of number of outstandind dma-buf fence
+ * dependencies for this atom. When dep_count reaches 0 the
+ * atom may be queued.
+ *
+ * The special value "-1" may only be set after the count
+ * reaches 0, while holding jctx.lock. This indicates that the
+ * atom has been handled, either queued in JS or cancelled.
+ *
+ * If anyone but the dma-fence worker sets this to -1 they must
+ * ensure that any potentially queued worker must have
+ * completed before allowing the atom to be marked as unused.
+ * This can be done by flushing the fence work queue:
+ * kctx->dma_fence.wq.
+ */
+ atomic_t dep_count;
+ } dma_fence;
+#endif /* CONFIG_MALI_DMA_FENCE || CONFIG_SYNC_FILE*/
+
+ /* Note: refer to kbasep_js_atom_retained_state, which will take a copy of some of the following members */
+ enum base_jd_event_code event_code;
+ base_jd_core_req core_req; /**< core requirements */
+ /** Job Slot to retry submitting to if submission from IRQ handler failed
+ *
+ * NOTE: see if this can be unified into the another member e.g. the event */
+ int retry_submit_on_slot;
+
+ u32 ticks;
+ /* JS atom priority with respect to other atoms on its kctx. */
+ int sched_priority;
+
+ int poking; /* BASE_HW_ISSUE_8316 */
+
+ wait_queue_head_t completed;
+ enum kbase_jd_atom_state status;
+#ifdef CONFIG_GPU_TRACEPOINTS
+ int work_id;
+#endif
+ /* Assigned after atom is completed. Used to check whether PRLAM-10676 workaround should be applied */
+ int slot_nr;
+
+ u32 atom_flags;
+
+ /* Number of times this atom has been retried. Used by replay soft job.
+ */
+ int retry_count;
+
+ enum kbase_atom_gpu_rb_state gpu_rb_state;
+
+ u64 need_cache_flush_cores_retained;
+
+ atomic_t blocked;
+
+ /* Pointer to atom that this atom has same-slot dependency on */
+ struct kbase_jd_atom *pre_dep;
+ /* Pointer to atom that has same-slot dependency on this atom */
+ struct kbase_jd_atom *post_dep;
+
+ /* Pointer to atom that this atom has cross-slot dependency on */
+ struct kbase_jd_atom *x_pre_dep;
+ /* Pointer to atom that has cross-slot dependency on this atom */
+ struct kbase_jd_atom *x_post_dep;
+
+ /* The GPU's flush count recorded at the time of submission, used for
+ * the cache flush optimisation */
+ u32 flush_id;
+
+ struct kbase_jd_atom_backend backend;
+#ifdef CONFIG_DEBUG_FS
+ struct base_job_fault_event fault_event;
+#endif
+
+ /* List head used for three different purposes:
+ * 1. Overflow list for JS ring buffers. If an atom is ready to run,
+ * but there is no room in the JS ring buffer, then the atom is put
+ * on the ring buffer's overflow list using this list node.
+ * 2. List of waiting soft jobs.
+ */
+ struct list_head queue;
+
+ /* Used to keep track of all JIT free/alloc jobs in submission order
+ */
+ struct list_head jit_node;
+ bool jit_blocked;
+
+ /* If non-zero, this indicates that the atom will fail with the set
+ * event_code when the atom is processed. */
+ enum base_jd_event_code will_fail_event_code;
+
+ /* Atoms will only ever be transitioning into, or out of
+ * protected mode so we do not need two separate fields.
+ */
+ union {
+ enum kbase_atom_enter_protected_state enter;
+ enum kbase_atom_exit_protected_state exit;
+ } protected_state;
+
+ struct rb_node runnable_tree_node;
+
+ /* 'Age' of atom relative to other atoms in the context. */
+ u32 age;
+};
+
+static inline bool kbase_jd_katom_is_protected(const struct kbase_jd_atom *katom)
+{
+ return (bool)(katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED);
+}
+
+/*
+ * Theory of operations:
+ *
+ * Atom objects are statically allocated within the context structure.
+ *
+ * Each atom is the head of two lists, one for the "left" set of dependencies, one for the "right" set.
+ */
+
+#define KBASE_JD_DEP_QUEUE_SIZE 256
+
+struct kbase_jd_context {
+ struct mutex lock;
+ struct kbasep_js_kctx_info sched_info;
+ struct kbase_jd_atom atoms[BASE_JD_ATOM_COUNT];
+
+ /** Tracks all job-dispatch jobs. This includes those not tracked by
+ * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
+ u32 job_nr;
+
+ /** Waitq that reflects whether there are no jobs (including SW-only
+ * dependency jobs). This is set when no jobs are present on the ctx,
+ * and clear when there are jobs.
+ *
+ * @note: Job Dispatcher knows about more jobs than the Job Scheduler:
+ * the Job Scheduler is unaware of jobs that are blocked on dependencies,
+ * and SW-only dependency jobs.
+ *
+ * This waitq can be waited upon to find out when the context jobs are all
+ * done/cancelled (including those that might've been blocked on
+ * dependencies) - and so, whether it can be terminated. However, it should
+ * only be terminated once it is not present in the run-pool (see
+ * kbasep_js_kctx_info::ctx::is_scheduled).
+ *
+ * Since the waitq is only set under kbase_jd_context::lock,
+ * the waiter should also briefly obtain and drop kbase_jd_context::lock to
+ * guarentee that the setter has completed its work on the kbase_context
+ *
+ * This must be updated atomically with:
+ * - kbase_jd_context::job_nr */
+ wait_queue_head_t zero_jobs_wait;
+
+ /** Job Done workqueue. */
+ struct workqueue_struct *job_done_wq;
+
+ spinlock_t tb_lock;
+ u32 *tb;
+ size_t tb_wrap_offset;
+
+#ifdef CONFIG_KDS
+ struct kds_callback kds_cb;
+#endif /* CONFIG_KDS */
+#ifdef CONFIG_GPU_TRACEPOINTS
+ atomic_t work_id;
+#endif
+};
+
+struct kbase_device_info {
+ u32 features;
+};
+
+/** Poking state for BASE_HW_ISSUE_8316 */
+enum {
+ KBASE_AS_POKE_STATE_IN_FLIGHT = 1<<0,
+ KBASE_AS_POKE_STATE_KILLING_POKE = 1<<1
+};
+
+/** Poking state for BASE_HW_ISSUE_8316 */
+typedef u32 kbase_as_poke_state;
+
+struct kbase_mmu_setup {
+ u64 transtab;
+ u64 memattr;
+ u64 transcfg;
+};
+
+/**
+ * Important: Our code makes assumptions that a struct kbase_as structure is always at
+ * kbase_device->as[number]. This is used to recover the containing
+ * struct kbase_device from a struct kbase_as structure.
+ *
+ * Therefore, struct kbase_as structures must not be allocated anywhere else.
+ */
+struct kbase_as {
+ int number;
+
+ struct workqueue_struct *pf_wq;
+ struct work_struct work_pagefault;
+ struct work_struct work_busfault;
+ enum kbase_mmu_fault_type fault_type;
+ bool protected_mode;
+ u32 fault_status;
+ u64 fault_addr;
+ u64 fault_extra_addr;
+
+ struct kbase_mmu_setup current_setup;
+
+ /* BASE_HW_ISSUE_8316 */
+ struct workqueue_struct *poke_wq;
+ struct work_struct poke_work;
+ /** Protected by hwaccess_lock */
+ int poke_refcount;
+ /** Protected by hwaccess_lock */
+ kbase_as_poke_state poke_state;
+ struct hrtimer poke_timer;
+};
+
+static inline int kbase_as_has_bus_fault(struct kbase_as *as)
+{
+ return as->fault_type == KBASE_MMU_FAULT_TYPE_BUS;
+}
+
+static inline int kbase_as_has_page_fault(struct kbase_as *as)
+{
+ return as->fault_type == KBASE_MMU_FAULT_TYPE_PAGE;
+}
+
+struct kbasep_mem_device {
+ atomic_t used_pages; /* Tracks usage of OS shared memory. Updated
+ when OS memory is allocated/freed. */
+
+};
+
+#define KBASE_TRACE_CODE(X) KBASE_TRACE_CODE_ ## X
+
+enum kbase_trace_code {
+ /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
+ * THIS MUST BE USED AT THE START OF THE ENUM */
+#define KBASE_TRACE_CODE_MAKE_CODE(X) KBASE_TRACE_CODE(X)
+#include "mali_kbase_trace_defs.h"
+#undef KBASE_TRACE_CODE_MAKE_CODE
+ /* Comma on its own, to extend the list */
+ ,
+ /* Must be the last in the enum */
+ KBASE_TRACE_CODE_COUNT
+};
+
+#define KBASE_TRACE_FLAG_REFCOUNT (((u8)1) << 0)
+#define KBASE_TRACE_FLAG_JOBSLOT (((u8)1) << 1)
+
+struct kbase_trace {
+ struct timespec timestamp;
+ u32 thread_id;
+ u32 cpu;
+ void *ctx;
+ bool katom;
+ int atom_number;
+ u64 atom_udata[2];
+ u64 gpu_addr;
+ unsigned long info_val;
+ u8 code;
+ u8 jobslot;
+ u8 refcount;
+ u8 flags;
+};
+
+/** Event IDs for the power management framework.
+ *
+ * Any of these events might be missed, so they should not be relied upon to
+ * find the precise state of the GPU at a particular time in the
+ * trace. Overall, we should get a high percentage of these events for
+ * statisical purposes, and so a few missing should not be a problem */
+enum kbase_timeline_pm_event {
+ /* helper for tests */
+ KBASEP_TIMELINE_PM_EVENT_FIRST,
+
+ /** Event reserved for backwards compatibility with 'init' events */
+ KBASE_TIMELINE_PM_EVENT_RESERVED_0 = KBASEP_TIMELINE_PM_EVENT_FIRST,
+
+ /** The power state of the device has changed.
+ *
+ * Specifically, the device has reached a desired or available state.
+ */
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED,
+
+ /** The GPU is becoming active.
+ *
+ * This event is sent when the first context is about to use the GPU.
+ */
+ KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE,
+
+ /** The GPU is becoming idle.
+ *
+ * This event is sent when the last context has finished using the GPU.
+ */
+ KBASE_TIMELINE_PM_EVENT_GPU_IDLE,
+
+ /** Event reserved for backwards compatibility with 'policy_change'
+ * events */
+ KBASE_TIMELINE_PM_EVENT_RESERVED_4,
+
+ /** Event reserved for backwards compatibility with 'system_suspend'
+ * events */
+ KBASE_TIMELINE_PM_EVENT_RESERVED_5,
+
+ /** Event reserved for backwards compatibility with 'system_resume'
+ * events */
+ KBASE_TIMELINE_PM_EVENT_RESERVED_6,
+
+ /** The job scheduler is requesting to power up/down cores.
+ *
+ * This event is sent when:
+ * - powered down cores are needed to complete a job
+ * - powered up cores are not needed anymore
+ */
+ KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
+
+ KBASEP_TIMELINE_PM_EVENT_LAST = KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
+};
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+struct kbase_trace_kctx_timeline {
+ atomic_t jd_atoms_in_flight;
+ u32 owner_tgid;
+};
+
+struct kbase_trace_kbdev_timeline {
+ /* Note: strictly speaking, not needed, because it's in sync with
+ * kbase_device::jm_slots[]::submitted_nr
+ *
+ * But it's kept as an example of how to add global timeline tracking
+ * information
+ *
+ * The caller must hold hwaccess_lock when accessing this */
+ u8 slot_atoms_submitted[BASE_JM_MAX_NR_SLOTS];
+
+ /* Last UID for each PM event */
+ atomic_t pm_event_uid[KBASEP_TIMELINE_PM_EVENT_LAST+1];
+ /* Counter for generating PM event UIDs */
+ atomic_t pm_event_uid_counter;
+ /*
+ * L2 transition state - true indicates that the transition is ongoing
+ * Expected to be protected by hwaccess_lock */
+ bool l2_transitioning;
+};
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
+
+
+struct kbasep_kctx_list_element {
+ struct list_head link;
+ struct kbase_context *kctx;
+};
+
+/**
+ * Data stored per device for power management.
+ *
+ * This structure contains data for the power management framework. There is one
+ * instance of this structure per device in the system.
+ */
+struct kbase_pm_device_data {
+ /**
+ * The lock protecting Power Management structures accessed outside of
+ * IRQ.
+ *
+ * This lock must also be held whenever the GPU is being powered on or
+ * off.
+ */
+ struct mutex lock;
+
+ /** The reference count of active contexts on this device. */
+ int active_count;
+ /** Flag indicating suspending/suspended */
+ bool suspending;
+ /* Wait queue set when active_count == 0 */
+ wait_queue_head_t zero_active_count_wait;
+
+ /**
+ * Bit masks identifying the available shader cores that are specified
+ * via sysfs. One mask per job slot.
+ */
+ u64 debug_core_mask[BASE_JM_MAX_NR_SLOTS];
+ u64 debug_core_mask_all;
+
+ /**
+ * Callback for initializing the runtime power management.
+ *
+ * @param kbdev The kbase device
+ *
+ * @return 0 on success, else error code
+ */
+ int (*callback_power_runtime_init)(struct kbase_device *kbdev);
+
+ /**
+ * Callback for terminating the runtime power management.
+ *
+ * @param kbdev The kbase device
+ */
+ void (*callback_power_runtime_term)(struct kbase_device *kbdev);
+
+ /* Time in milliseconds between each dvfs sample */
+ u32 dvfs_period;
+
+ /* Period of GPU poweroff timer */
+ ktime_t gpu_poweroff_time;
+
+ /* Number of ticks of GPU poweroff timer before shader is powered off */
+ int poweroff_shader_ticks;
+
+ /* Number of ticks of GPU poweroff timer before GPU is powered off */
+ int poweroff_gpu_ticks;
+
+ struct kbase_pm_backend_data backend;
+};
+
+/**
+ * struct kbase_mem_pool - Page based memory pool for kctx/kbdev
+ * @kbdev: Kbase device where memory is used
+ * @cur_size: Number of free pages currently in the pool (may exceed @max_size
+ * in some corner cases)
+ * @max_size: Maximum number of free pages in the pool
+ * @order: order = 0 refers to a pool of 4 KB pages
+ * order = 9 refers to a pool of 2 MB pages (2^9 * 4KB = 2 MB)
+ * @pool_lock: Lock protecting the pool - must be held when modifying @cur_size
+ * and @page_list
+ * @page_list: List of free pages in the pool
+ * @reclaim: Shrinker for kernel reclaim of free pages
+ * @next_pool: Pointer to next pool where pages can be allocated when this pool
+ * is empty. Pages will spill over to the next pool when this pool
+ * is full. Can be NULL if there is no next pool.
+ */
+struct kbase_mem_pool {
+ struct kbase_device *kbdev;
+ size_t cur_size;
+ size_t max_size;
+ size_t order;
+ spinlock_t pool_lock;
+ struct list_head page_list;
+ struct shrinker reclaim;
+
+ struct kbase_mem_pool *next_pool;
+};
+
+/**
+ * struct kbase_devfreq_opp - Lookup table for converting between nominal OPP
+ * frequency, and real frequency and core mask
+ * @opp_freq: Nominal OPP frequency
+ * @real_freq: Real GPU frequency
+ * @core_mask: Shader core mask
+ */
+struct kbase_devfreq_opp {
+ u64 opp_freq;
+ u64 real_freq;
+ u64 core_mask;
+};
+
+#define DEVNAME_SIZE 16
+
+struct kbase_device {
+ s8 slot_submit_count_irq[BASE_JM_MAX_NR_SLOTS];
+
+ u32 hw_quirks_sc;
+ u32 hw_quirks_tiler;
+ u32 hw_quirks_mmu;
+ u32 hw_quirks_jm;
+
+ struct list_head entry;
+ struct device *dev;
+ struct miscdevice mdev;
+ u64 reg_start;
+ size_t reg_size;
+ void __iomem *reg;
+
+ void __iomem *crgreg;
+ void __iomem *pmctrlreg;
+ void __iomem *pctrlreg;
+
+ struct {
+ int irq;
+ int flags;
+ } irqs[3];
+
+ struct clk *clock;
+#ifdef CONFIG_REGULATOR
+ struct regulator *regulator;
+#endif
+ char devname[DEVNAME_SIZE];
+
+#ifdef CONFIG_MALI_NO_MALI
+ void *model;
+ struct kmem_cache *irq_slab;
+ struct workqueue_struct *irq_workq;
+ atomic_t serving_job_irq;
+ atomic_t serving_gpu_irq;
+ atomic_t serving_mmu_irq;
+ spinlock_t reg_op_lock;
+#endif /* CONFIG_MALI_NO_MALI */
+
+ struct kbase_pm_device_data pm;
+ struct kbasep_js_device_data js_data;
+ struct kbase_mem_pool mem_pool;
+ struct kbase_mem_pool lp_mem_pool;
+ struct kbasep_mem_device memdev;
+ struct kbase_mmu_mode const *mmu_mode;
+
+ struct kbase_as as[BASE_MAX_NR_AS];
+ /* The below variables (as_free and as_to_kctx) are managed by the
+ * Context Scheduler. The kbasep_js_device_data::runpool_irq::lock must
+ * be held whilst accessing these.
+ */
+ u16 as_free; /* Bitpattern of free Address Spaces */
+ /* Mapping from active Address Spaces to kbase_context */
+ struct kbase_context *as_to_kctx[BASE_MAX_NR_AS];
+
+
+ spinlock_t mmu_mask_change;
+
+ struct kbase_gpu_props gpu_props;
+
+ /** List of SW workarounds for HW issues */
+ unsigned long hw_issues_mask[(BASE_HW_ISSUE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
+ /** List of features available */
+ unsigned long hw_features_mask[(BASE_HW_FEATURE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
+
+ /* Bitmaps of cores that are currently in use (running jobs).
+ * These should be kept up to date by the job scheduler.
+ *
+ * pm.power_change_lock should be held when accessing these members.
+ *
+ * kbase_pm_check_transitions_nolock() should be called when bits are
+ * cleared to update the power management system and allow transitions to
+ * occur. */
+ u64 shader_inuse_bitmap;
+
+ /* Refcount for cores in use */
+ u32 shader_inuse_cnt[64];
+
+ /* Bitmaps of cores the JS needs for jobs ready to run */
+ u64 shader_needed_bitmap;
+
+ /* Refcount for cores needed */
+ u32 shader_needed_cnt[64];
+
+ u32 tiler_inuse_cnt;
+
+ u32 tiler_needed_cnt;
+
+ /* struct for keeping track of the disjoint information
+ *
+ * The state is > 0 if the GPU is in a disjoint state. Otherwise 0
+ * The count is the number of disjoint events that have occurred on the GPU
+ */
+ struct {
+ atomic_t count;
+ atomic_t state;
+ } disjoint_event;
+
+ /* Refcount for tracking users of the l2 cache, e.g. when using hardware counter instrumentation. */
+ u32 l2_users_count;
+
+ /* Bitmaps of cores that are currently available (powered up and the power policy is happy for jobs to be
+ * submitted to these cores. These are updated by the power management code. The job scheduler should avoid
+ * submitting new jobs to any cores that are not marked as available.
+ *
+ * pm.power_change_lock should be held when accessing these members.
+ */
+ u64 shader_available_bitmap;
+ u64 tiler_available_bitmap;
+ u64 l2_available_bitmap;
+ u64 stack_available_bitmap;
+
+ u64 shader_ready_bitmap;
+ u64 shader_transitioning_bitmap;
+
+ s8 nr_hw_address_spaces; /**< Number of address spaces in the GPU (constant after driver initialisation) */
+ s8 nr_user_address_spaces; /**< Number of address spaces available to user contexts */
+
+ /* Structure used for instrumentation and HW counters dumping */
+ struct kbase_hwcnt {
+ /* The lock should be used when accessing any of the following members */
+ spinlock_t lock;
+
+ struct kbase_context *kctx;
+ u64 addr;
+
+ struct kbase_instr_backend backend;
+ } hwcnt;
+
+ struct kbase_vinstr_context *vinstr_ctx;
+
+#if KBASE_TRACE_ENABLE
+ spinlock_t trace_lock;
+ u16 trace_first_out;
+ u16 trace_next_in;
+ struct kbase_trace *trace_rbuf;
+#endif
+
+ u32 reset_timeout_ms;
+
+ struct mutex cacheclean_lock;
+
+ /* Platform specific private data to be accessed by mali_kbase_config_xxx.c only */
+ void *platform_context;
+
+ /* List of kbase_contexts created */
+ struct list_head kctx_list;
+ struct mutex kctx_list_lock;
+
+#ifdef CONFIG_MALI_DEVFREQ
+ struct devfreq_dev_profile devfreq_profile;
+ struct devfreq *devfreq;
+ unsigned long current_freq;
+ unsigned long current_nominal_freq;
+ unsigned long current_voltage;
+ u64 current_core_mask;
+ struct kbase_devfreq_opp *opp_table;
+ int num_opps;
+#ifdef CONFIG_DEVFREQ_THERMAL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+ struct devfreq_cooling_device *devfreq_cooling;
+#else
+ struct thermal_cooling_device *devfreq_cooling;
+#endif
+ /* Current IPA model - true for configured model, false for fallback */
+ atomic_t ipa_use_configured_model;
+ struct {
+ /* Access to this struct must be with ipa.lock held */
+ struct mutex lock;
+ struct kbase_ipa_model *configured_model;
+ struct kbase_ipa_model *fallback_model;
+ } ipa;
+#endif /* CONFIG_DEVFREQ_THERMAL */
+#endif /* CONFIG_MALI_DEVFREQ */
+
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ struct kbase_trace_kbdev_timeline timeline;
+#endif
+
+ /*
+ * Control for enabling job dump on failure, set when control debugfs
+ * is opened.
+ */
+ bool job_fault_debug;
+
+#ifdef CONFIG_DEBUG_FS
+ /* directory for debugfs entries */
+ struct dentry *mali_debugfs_directory;
+ /* Root directory for per context entry */
+ struct dentry *debugfs_ctx_directory;
+
+#ifdef CONFIG_MALI_DEBUG
+ /* bit for each as, set if there is new data to report */
+ u64 debugfs_as_read_bitmap;
+#endif /* CONFIG_MALI_DEBUG */
+
+ /* failed job dump, used for separate debug process */
+ wait_queue_head_t job_fault_wq;
+ wait_queue_head_t job_fault_resume_wq;
+ struct workqueue_struct *job_fault_resume_workq;
+ struct list_head job_fault_event_list;
+ spinlock_t job_fault_event_lock;
+ struct kbase_context *kctx_fault;
+
+#if !MALI_CUSTOMER_RELEASE
+ /* Per-device data for register dumping interface */
+ struct {
+ u16 reg_offset; /* Offset of a GPU_CONTROL register to be
+ dumped upon request */
+ } regs_dump_debugfs_data;
+#endif /* !MALI_CUSTOMER_RELEASE */
+#endif /* CONFIG_DEBUG_FS */
+
+ /* fbdump profiling controls set by gator */
+ u32 kbase_profiling_controls[FBDUMP_CONTROL_MAX];
+
+
+#if MALI_CUSTOMER_RELEASE == 0
+ /* Number of jobs that are run before a job is forced to fail and
+ * replay. May be KBASEP_FORCE_REPLAY_DISABLED, to disable forced
+ * failures. */
+ int force_replay_limit;
+ /* Count of jobs between forced failures. Incremented on each job. A
+ * job is forced to fail once this is greater than or equal to
+ * force_replay_limit. */
+ int force_replay_count;
+ /* Core requirement for jobs to be failed and replayed. May be zero. */
+ base_jd_core_req force_replay_core_req;
+ /* true if force_replay_limit should be randomized. The random
+ * value will be in the range of 1 - KBASEP_FORCE_REPLAY_RANDOM_LIMIT.
+ */
+ bool force_replay_random;
+#endif
+
+ /* Total number of created contexts */
+ atomic_t ctx_num;
+
+#ifdef CONFIG_DEBUG_FS
+ /* Holds the most recent register accesses */
+ struct kbase_io_history io_history;
+#endif /* CONFIG_DEBUG_FS */
+
+ struct kbase_hwaccess_data hwaccess;
+
+ /* Count of page/bus faults waiting for workqueues to process */
+ atomic_t faults_pending;
+
+ /* true if GPU is powered off or power off operation is in progress */
+ bool poweroff_pending;
+
+
+ /* defaults for new context created for this device */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ bool infinite_cache_active_default;
+#else
+ u32 infinite_cache_active_default;
+#endif
+ size_t mem_pool_max_size_default;
+
+ /* current gpu coherency mode */
+ u32 current_gpu_coherency_mode;
+ /* system coherency mode */
+ u32 system_coherency;
+ /* Flag to track when cci snoops have been enabled on the interface */
+ bool cci_snoop_enabled;
+
+ /* SMC function IDs to call into Trusted firmware to enable/disable
+ * cache snooping. Value of 0 indicates that they are not used
+ */
+ u32 snoop_enable_smc;
+ u32 snoop_disable_smc;
+
+ /* Protected mode operations */
+ struct protected_mode_ops *protected_ops;
+
+ /* Protected device attached to this kbase device */
+ struct protected_mode_device *protected_dev;
+
+ /*
+ * true when GPU is put into protected mode
+ */
+ bool protected_mode;
+
+ /*
+ * true when GPU is transitioning into or out of protected mode
+ */
+ bool protected_mode_transition;
+
+ /*
+ * true if protected mode is supported
+ */
+ bool protected_mode_support;
+
+
+#ifdef CONFIG_MALI_DEBUG
+ wait_queue_head_t driver_inactive_wait;
+ bool driver_inactive;
+#endif /* CONFIG_MALI_DEBUG */
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+ /*
+ * Bus logger integration.
+ */
+ struct bus_logger_client *buslogger;
+#endif
+ /* Boolean indicating if an IRQ flush during reset is in progress. */
+ bool irq_reset_flush;
+
+ /* list of inited sub systems. Used during terminate/error recovery */
+ u32 inited_subsys;
+
+ spinlock_t hwaccess_lock;
+
+ /* Protects access to MMU operations */
+ struct mutex mmu_hw_mutex;
+
+ /* Current serialization mode. See KBASE_SERIALIZE_* for details */
+ u8 serialize_jobs;
+
+ /* gpu virtual id */
+ u32 gpu_vid;
+
+ unsigned long hi_features_mask[2];
+};
+
+/**
+ * struct jsctx_queue - JS context atom queue
+ * @runnable_tree: Root of RB-tree containing currently runnable atoms on this
+ * job slot.
+ * @x_dep_head: Head item of the linked list of atoms blocked on cross-slot
+ * dependencies. Atoms on this list will be moved to the
+ * runnable_tree when the blocking atom completes.
+ *
+ * hwaccess_lock must be held when accessing this structure.
+ */
+struct jsctx_queue {
+ struct rb_root runnable_tree;
+ struct list_head x_dep_head;
+};
+
+
+#define KBASE_API_VERSION(major, minor) ((((major) & 0xFFF) << 20) | \
+ (((minor) & 0xFFF) << 8) | \
+ ((0 & 0xFF) << 0))
+
+/**
+ * enum kbase_context_flags - Flags for kbase contexts
+ *
+ * @KCTX_COMPAT: Set when the context process is a compat process, 32-bit
+ * process on a 64-bit kernel.
+ *
+ * @KCTX_RUNNABLE_REF: Set when context is counted in
+ * kbdev->js_data.nr_contexts_runnable. Must hold queue_mutex when accessing.
+ *
+ * @KCTX_ACTIVE: Set when the context is active.
+ *
+ * @KCTX_PULLED: Set when last kick() caused atoms to be pulled from this
+ * context.
+ *
+ * @KCTX_MEM_PROFILE_INITIALIZED: Set when the context's memory profile has been
+ * initialized.
+ *
+ * @KCTX_INFINITE_CACHE: Set when infinite cache is to be enabled for new
+ * allocations. Existing allocations will not change.
+ *
+ * @KCTX_SUBMIT_DISABLED: Set to prevent context from submitting any jobs.
+ *
+ * @KCTX_PRIVILEGED:Set if the context uses an address space and should be kept
+ * scheduled in.
+ *
+ * @KCTX_SCHEDULED: Set when the context is scheduled on the Run Pool.
+ * This is only ever updated whilst the jsctx_mutex is held.
+ *
+ * @KCTX_DYING: Set when the context process is in the process of being evicted.
+ *
+ * @KCTX_NO_IMPLICIT_SYNC: Set when explicit Android fences are in use on this
+ * context, to disable use of implicit dma-buf fences. This is used to avoid
+ * potential synchronization deadlocks.
+ *
+ * All members need to be separate bits. This enum is intended for use in a
+ * bitmask where multiple values get OR-ed together.
+ */
+enum kbase_context_flags {
+ KCTX_COMPAT = 1U << 0,
+ KCTX_RUNNABLE_REF = 1U << 1,
+ KCTX_ACTIVE = 1U << 2,
+ KCTX_PULLED = 1U << 3,
+ KCTX_MEM_PROFILE_INITIALIZED = 1U << 4,
+ KCTX_INFINITE_CACHE = 1U << 5,
+ KCTX_SUBMIT_DISABLED = 1U << 6,
+ KCTX_PRIVILEGED = 1U << 7,
+ KCTX_SCHEDULED = 1U << 8,
+ KCTX_DYING = 1U << 9,
+ KCTX_NO_IMPLICIT_SYNC = 1U << 10,
+};
+
+struct kbase_sub_alloc {
+ struct list_head link;
+ struct page *page;
+ DECLARE_BITMAP(sub_pages, SZ_2M / SZ_4K);
+};
+
+struct kbase_context {
+ struct file *filp;
+ struct kbase_device *kbdev;
+ int id; /* System wide unique id */
+ unsigned long api_version;
+ phys_addr_t pgd;
+ struct list_head event_list;
+ struct list_head event_coalesce_list;
+ struct mutex event_mutex;
+ atomic_t event_closed;
+ struct workqueue_struct *event_workq;
+ atomic_t event_count;
+ int event_coalesce_count;
+
+ atomic_t flags;
+
+ atomic_t setup_complete;
+ atomic_t setup_in_progress;
+
+ u64 *mmu_teardown_pages;
+
+ struct tagged_addr aliasing_sink_page;
+
+ struct mutex mem_partials_lock;
+ struct list_head mem_partials;
+
+ struct mutex mmu_lock;
+ struct mutex reg_lock; /* To be converted to a rwlock? */
+ struct rb_root reg_rbtree_same; /* RB tree of GPU (live) regions,
+ * SAME_VA zone */
+ struct rb_root reg_rbtree_exec; /* RB tree of GPU (live) regions,
+ * EXEC zone */
+ struct rb_root reg_rbtree_custom; /* RB tree of GPU (live) regions,
+ * CUSTOM_VA zone */
+
+ unsigned long cookies;
+ struct kbase_va_region *pending_regions[BITS_PER_LONG];
+
+ wait_queue_head_t event_queue;
+ pid_t tgid;
+ pid_t pid;
+
+ struct kbase_jd_context jctx;
+ atomic_t used_pages;
+ atomic_t nonmapped_pages;
+
+ struct kbase_mem_pool mem_pool;
+ struct kbase_mem_pool lp_mem_pool;
+
+ struct shrinker reclaim;
+ struct list_head evict_list;
+
+ struct list_head waiting_soft_jobs;
+ spinlock_t waiting_soft_jobs_lock;
+#ifdef CONFIG_KDS
+ struct list_head waiting_kds_resource;
+#endif
+#ifdef CONFIG_MALI_DMA_FENCE
+ struct {
+ struct list_head waiting_resource;
+ struct workqueue_struct *wq;
+ } dma_fence;
+#endif /* CONFIG_MALI_DMA_FENCE */
+ /** This is effectively part of the Run Pool, because it only has a valid
+ * setting (!=KBASEP_AS_NR_INVALID) whilst the context is scheduled in
+ *
+ * The hwaccess_lock must be held whilst accessing this.
+ *
+ * If the context relating to this as_nr is required, you must use
+ * kbasep_js_runpool_retain_ctx() to ensure that the context doesn't disappear
+ * whilst you're using it. Alternatively, just hold the hwaccess_lock
+ * to ensure the context doesn't disappear (but this has restrictions on what other locks
+ * you can take whilst doing this) */
+ int as_nr;
+
+ /* Keeps track of the number of users of this context. A user can be a
+ * job that is available for execution, instrumentation needing to 'pin'
+ * a context for counter collection, etc. If the refcount reaches 0 then
+ * this context is considered inactive and the previously programmed
+ * AS might be cleared at any point.
+ */
+ atomic_t refcount;
+
+ /* NOTE:
+ *
+ * Flags are in jctx.sched_info.ctx.flags
+ * Mutable flags *must* be accessed under jctx.sched_info.ctx.jsctx_mutex
+ *
+ * All other flags must be added there */
+ spinlock_t mm_update_lock;
+ struct mm_struct *process_mm;
+ /* End of the SAME_VA zone */
+ u64 same_va_end;
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ struct kbase_trace_kctx_timeline timeline;
+#endif
+#ifdef CONFIG_DEBUG_FS
+ /* Content of mem_profile file */
+ char *mem_profile_data;
+ /* Size of @c mem_profile_data */
+ size_t mem_profile_size;
+ /* Mutex guarding memory profile state */
+ struct mutex mem_profile_lock;
+ /* Memory profile directory under debugfs */
+ struct dentry *kctx_dentry;
+
+ /* for job fault debug */
+ unsigned int *reg_dump;
+ atomic_t job_fault_count;
+ /* This list will keep the following atoms during the dump
+ * in the same context
+ */
+ struct list_head job_fault_resume_event_list;
+
+#endif /* CONFIG_DEBUG_FS */
+
+ struct jsctx_queue jsctx_queue
+ [KBASE_JS_ATOM_SCHED_PRIO_COUNT][BASE_JM_MAX_NR_SLOTS];
+
+ /* Number of atoms currently pulled from this context */
+ atomic_t atoms_pulled;
+ /* Number of atoms currently pulled from this context, per slot */
+ atomic_t atoms_pulled_slot[BASE_JM_MAX_NR_SLOTS];
+ /* Number of atoms currently pulled from this context, per slot and
+ * priority. Hold hwaccess_lock when accessing */
+ int atoms_pulled_slot_pri[BASE_JM_MAX_NR_SLOTS][
+ KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+
+ /* true if slot is blocked on the given priority. This will be set on a
+ * soft-stop */
+ bool blocked_js[BASE_JM_MAX_NR_SLOTS][KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+
+ /* Bitmask of slots that can be pulled from */
+ u32 slots_pullable;
+
+ /* Backend specific data */
+ struct kbase_context_backend backend;
+
+ /* Work structure used for deferred ASID assignment */
+ struct work_struct work;
+
+ /* Only one userspace vinstr client per kbase context */
+ struct kbase_vinstr_client *vinstr_cli;
+ struct mutex vinstr_cli_lock;
+
+ /* List of completed jobs waiting for events to be posted */
+ struct list_head completed_jobs;
+ /* Number of work items currently pending on job_done_wq */
+ atomic_t work_count;
+
+ /* Waiting soft-jobs will fail when this timer expires */
+ struct timer_list soft_job_timeout;
+
+ /* JIT allocation management */
+ struct kbase_va_region *jit_alloc[256];
+ struct list_head jit_active_head;
+ struct list_head jit_pool_head;
+ struct list_head jit_destroy_head;
+ struct mutex jit_evict_lock;
+ struct work_struct jit_work;
+
+ /* A list of the JIT soft-jobs in submission order
+ * (protected by kbase_jd_context.lock)
+ */
+ struct list_head jit_atoms_head;
+ /* A list of pending JIT alloc soft-jobs (using the 'queue' list_head)
+ * (protected by kbase_jd_context.lock)
+ */
+ struct list_head jit_pending_alloc;
+
+ /* External sticky resource management */
+ struct list_head ext_res_meta_head;
+
+ /* Used to record that a drain was requested from atomic context */
+ atomic_t drain_pending;
+
+ /* Current age count, used to determine age for newly submitted atoms */
+ u32 age_count;
+};
+
+/**
+ * struct kbase_ctx_ext_res_meta - Structure which binds an external resource
+ * to a @kbase_context.
+ * @ext_res_node: List head for adding the metadata to a
+ * @kbase_context.
+ * @alloc: The physical memory allocation structure
+ * which is mapped.
+ * @gpu_addr: The GPU virtual address the resource is
+ * mapped to.
+ *
+ * External resources can be mapped into multiple contexts as well as the same
+ * context multiple times.
+ * As kbase_va_region itself isn't refcounted we can't attach our extra
+ * information to it as it could be removed under our feet leaving external
+ * resources pinned.
+ * This metadata structure binds a single external resource to a single
+ * context, ensuring that per context mapping is tracked separately so it can
+ * be overridden when needed and abuses by the application (freeing the resource
+ * multiple times) don't effect the refcount of the physical allocation.
+ */
+struct kbase_ctx_ext_res_meta {
+ struct list_head ext_res_node;
+ struct kbase_mem_phy_alloc *alloc;
+ u64 gpu_addr;
+};
+
+enum kbase_reg_access_type {
+ REG_READ,
+ REG_WRITE
+};
+
+enum kbase_share_attr_bits {
+ /* (1ULL << 8) bit is reserved */
+ SHARE_BOTH_BITS = (2ULL << 8), /* inner and outer shareable coherency */
+ SHARE_INNER_BITS = (3ULL << 8) /* inner shareable coherency */
+};
+
+/**
+ * kbase_device_is_cpu_coherent - Returns if the device is CPU coherent.
+ * @kbdev: kbase device
+ *
+ * Return: true if the device access are coherent, false if not.
+ */
+static inline bool kbase_device_is_cpu_coherent(struct kbase_device *kbdev)
+{
+ if ((kbdev->system_coherency == COHERENCY_ACE_LITE) ||
+ (kbdev->system_coherency == COHERENCY_ACE))
+ return true;
+
+ return false;
+}
+
+/* Conversion helpers for setting up high resolution timers */
+#define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime(((u64)(x))*1000000U))
+#define HR_TIMER_DELAY_NSEC(x) (ns_to_ktime(x))
+
+/* Maximum number of loops polling the GPU for a cache flush before we assume it must have completed */
+#define KBASE_CLEAN_CACHE_MAX_LOOPS 100000
+/* Maximum number of loops polling the GPU for an AS command to complete before we assume the GPU has hung */
+#define KBASE_AS_INACTIVE_MAX_LOOPS 100000
+
+/* Maximum number of times a job can be replayed */
+#define BASEP_JD_REPLAY_LIMIT 15
+
+/* JobDescriptorHeader - taken from the architecture specifications, the layout
+ * is currently identical for all GPU archs. */
+struct job_descriptor_header {
+ u32 exception_status;
+ u32 first_incomplete_task;
+ u64 fault_pointer;
+ u8 job_descriptor_size : 1;
+ u8 job_type : 7;
+ u8 job_barrier : 1;
+ u8 _reserved_01 : 1;
+ u8 _reserved_1 : 1;
+ u8 _reserved_02 : 1;
+ u8 _reserved_03 : 1;
+ u8 _reserved_2 : 1;
+ u8 _reserved_04 : 1;
+ u8 _reserved_05 : 1;
+ u16 job_index;
+ u16 job_dependency_index_1;
+ u16 job_dependency_index_2;
+ union {
+ u64 _64;
+ u32 _32;
+ } next_job;
+};
+
+#endif /* _KBASE_DEFS_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_device.c b/drivers/gpu/arm_gpu/mali_kbase_device.c
new file mode 100644
index 000000000000..d635fccaea14
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_device.c
@@ -0,0 +1,674 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Base kernel device APIs
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/seq_file.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_hwaccess_instr.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_config_defaults.h>
+
+#include <mali_kbase_profiling_gator_api.h>
+
+/* NOTE: Magic - 0x45435254 (TRCE in ASCII).
+ * Supports tracing feature provided in the base module.
+ * Please keep it in sync with the value of base module.
+ */
+#define TRACE_BUFFER_HEADER_SPECIAL 0x45435254
+
+#if KBASE_TRACE_ENABLE
+static const char *kbasep_trace_code_string[] = {
+ /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
+ * THIS MUST BE USED AT THE START OF THE ARRAY */
+#define KBASE_TRACE_CODE_MAKE_CODE(X) # X
+#include "mali_kbase_trace_defs.h"
+#undef KBASE_TRACE_CODE_MAKE_CODE
+};
+#endif
+
+#define DEBUG_MESSAGE_SIZE 256
+
+static int kbasep_trace_init(struct kbase_device *kbdev);
+static void kbasep_trace_term(struct kbase_device *kbdev);
+static void kbasep_trace_hook_wrapper(void *param);
+
+struct kbase_device *kbase_device_alloc(void)
+{
+ return kzalloc(sizeof(struct kbase_device), GFP_KERNEL);
+}
+
+static int kbase_device_as_init(struct kbase_device *kbdev, int i)
+{
+ const char format[] = "mali_mmu%d";
+ char name[sizeof(format)];
+ const char poke_format[] = "mali_mmu%d_poker";
+ char poke_name[sizeof(poke_format)];
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+ snprintf(poke_name, sizeof(poke_name), poke_format, i);
+
+ snprintf(name, sizeof(name), format, i);
+
+ kbdev->as[i].number = i;
+ kbdev->as[i].fault_addr = 0ULL;
+
+ kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1);
+ if (!kbdev->as[i].pf_wq)
+ return -EINVAL;
+
+ INIT_WORK(&kbdev->as[i].work_pagefault, page_fault_worker);
+ INIT_WORK(&kbdev->as[i].work_busfault, bus_fault_worker);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
+ struct hrtimer *poke_timer = &kbdev->as[i].poke_timer;
+ struct work_struct *poke_work = &kbdev->as[i].poke_work;
+
+ kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1);
+ if (!kbdev->as[i].poke_wq) {
+ destroy_workqueue(kbdev->as[i].pf_wq);
+ return -EINVAL;
+ }
+ KBASE_DEBUG_ASSERT(!object_is_on_stack(poke_work));
+ INIT_WORK(poke_work, kbasep_as_do_poke);
+
+ hrtimer_init(poke_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ poke_timer->function = kbasep_as_poke_timer_callback;
+
+ kbdev->as[i].poke_refcount = 0;
+ kbdev->as[i].poke_state = 0u;
+ }
+
+ return 0;
+}
+
+static void kbase_device_as_term(struct kbase_device *kbdev, int i)
+{
+ destroy_workqueue(kbdev->as[i].pf_wq);
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+ destroy_workqueue(kbdev->as[i].poke_wq);
+}
+
+static int kbase_device_all_as_init(struct kbase_device *kbdev)
+{
+ int i, err;
+
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ err = kbase_device_as_init(kbdev, i);
+ if (err)
+ goto free_workqs;
+ }
+
+ return 0;
+
+free_workqs:
+ for (; i > 0; i--)
+ kbase_device_as_term(kbdev, i);
+
+ return err;
+}
+
+static void kbase_device_all_as_term(struct kbase_device *kbdev)
+{
+ int i;
+
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++)
+ kbase_device_as_term(kbdev, i);
+}
+
+int kbase_device_init(struct kbase_device * const kbdev)
+{
+ int i, err;
+#ifdef CONFIG_ARM64
+ struct device_node *np = NULL;
+#endif /* CONFIG_ARM64 */
+
+ spin_lock_init(&kbdev->mmu_mask_change);
+ mutex_init(&kbdev->mmu_hw_mutex);
+#ifdef CONFIG_ARM64
+ kbdev->cci_snoop_enabled = false;
+ np = kbdev->dev->of_node;
+ if (np != NULL) {
+ if (of_property_read_u32(np, "snoop_enable_smc",
+ &kbdev->snoop_enable_smc))
+ kbdev->snoop_enable_smc = 0;
+ if (of_property_read_u32(np, "snoop_disable_smc",
+ &kbdev->snoop_disable_smc))
+ kbdev->snoop_disable_smc = 0;
+ /* Either both or none of the calls should be provided. */
+ if (!((kbdev->snoop_disable_smc == 0
+ && kbdev->snoop_enable_smc == 0)
+ || (kbdev->snoop_disable_smc != 0
+ && kbdev->snoop_enable_smc != 0))) {
+ WARN_ON(1);
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+#endif /* CONFIG_ARM64 */
+ /* Get the list of workarounds for issues on the current HW
+ * (identified by the GPU_ID register)
+ */
+ err = kbase_hw_set_issues_mask(kbdev);
+ if (err)
+ goto fail;
+
+ /* Set the list of features available on the current HW
+ * (identified by the GPU_ID register)
+ */
+ kbase_hw_set_features_mask(kbdev);
+
+ kbase_gpuprops_set_features(kbdev);
+
+ /* On Linux 4.0+, dma coherency is determined from device tree */
+#if defined(CONFIG_ARM64) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
+ set_dma_ops(kbdev->dev, &noncoherent_swiotlb_dma_ops);
+#endif
+
+ /* Workaround a pre-3.13 Linux issue, where dma_mask is NULL when our
+ * device structure was created by device-tree
+ */
+ if (!kbdev->dev->dma_mask)
+ kbdev->dev->dma_mask = &kbdev->dev->coherent_dma_mask;
+
+ err = dma_set_mask(kbdev->dev,
+ DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
+ if (err)
+ goto dma_set_mask_failed;
+
+ err = dma_set_coherent_mask(kbdev->dev,
+ DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
+ if (err)
+ goto dma_set_mask_failed;
+
+ kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
+
+ err = kbase_device_all_as_init(kbdev);
+ if (err)
+ goto as_init_failed;
+
+ spin_lock_init(&kbdev->hwcnt.lock);
+
+ err = kbasep_trace_init(kbdev);
+ if (err)
+ goto term_as;
+
+ mutex_init(&kbdev->cacheclean_lock);
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
+ kbdev->timeline.slot_atoms_submitted[i] = 0;
+
+ for (i = 0; i <= KBASEP_TIMELINE_PM_EVENT_LAST; ++i)
+ atomic_set(&kbdev->timeline.pm_event_uid[i], 0);
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
+
+ /* fbdump profiling controls set to 0 - fbdump not enabled until changed by gator */
+ for (i = 0; i < FBDUMP_CONTROL_MAX; i++)
+ kbdev->kbase_profiling_controls[i] = 0;
+
+ kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev);
+
+ atomic_set(&kbdev->ctx_num, 0);
+
+ err = kbase_instr_backend_init(kbdev);
+ if (err)
+ goto term_trace;
+
+ kbdev->pm.dvfs_period = DEFAULT_PM_DVFS_PERIOD;
+
+ kbdev->reset_timeout_ms = DEFAULT_RESET_TIMEOUT_MS;
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+ kbdev->mmu_mode = kbase_mmu_mode_get_aarch64();
+ else
+ kbdev->mmu_mode = kbase_mmu_mode_get_lpae();
+
+#ifdef CONFIG_MALI_DEBUG
+ init_waitqueue_head(&kbdev->driver_inactive_wait);
+#endif /* CONFIG_MALI_DEBUG */
+
+ return 0;
+term_trace:
+ kbasep_trace_term(kbdev);
+term_as:
+ kbase_device_all_as_term(kbdev);
+as_init_failed:
+dma_set_mask_failed:
+fail:
+ return err;
+}
+
+void kbase_device_term(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev);
+
+#if KBASE_TRACE_ENABLE
+ kbase_debug_assert_register_hook(NULL, NULL);
+#endif
+
+ kbase_instr_backend_term(kbdev);
+
+ kbasep_trace_term(kbdev);
+
+ kbase_device_all_as_term(kbdev);
+}
+
+void kbase_device_free(struct kbase_device *kbdev)
+{
+ kfree(kbdev);
+}
+
+int kbase_device_trace_buffer_install(
+ struct kbase_context *kctx, u32 *tb, size_t size)
+{
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(tb);
+
+ /* Interface uses 16-bit value to track last accessed entry. Each entry
+ * is composed of two 32-bit words.
+ * This limits the size that can be handled without an overflow. */
+ if (0xFFFF * (2 * sizeof(u32)) < size)
+ return -EINVAL;
+
+ /* set up the header */
+ /* magic number in the first 4 bytes */
+ tb[0] = TRACE_BUFFER_HEADER_SPECIAL;
+ /* Store (write offset = 0, wrap counter = 0, transaction active = no)
+ * write offset 0 means never written.
+ * Offsets 1 to (wrap_offset - 1) used to store values when trace started
+ */
+ tb[1] = 0;
+
+ /* install trace buffer */
+ spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
+ kctx->jctx.tb_wrap_offset = size / 8;
+ kctx->jctx.tb = tb;
+ spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
+
+ return 0;
+}
+
+void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx)
+{
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
+ kctx->jctx.tb = NULL;
+ kctx->jctx.tb_wrap_offset = 0;
+ spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
+}
+
+void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
+ if (kctx->jctx.tb) {
+ u16 wrap_count;
+ u16 write_offset;
+ u32 *tb = kctx->jctx.tb;
+ u32 header_word;
+
+ header_word = tb[1];
+ KBASE_DEBUG_ASSERT(0 == (header_word & 0x1));
+
+ wrap_count = (header_word >> 1) & 0x7FFF;
+ write_offset = (header_word >> 16) & 0xFFFF;
+
+ /* mark as transaction in progress */
+ tb[1] |= 0x1;
+ mb();
+
+ /* calculate new offset */
+ write_offset++;
+ if (write_offset == kctx->jctx.tb_wrap_offset) {
+ /* wrap */
+ write_offset = 1;
+ wrap_count++;
+ wrap_count &= 0x7FFF; /* 15bit wrap counter */
+ }
+
+ /* store the trace entry at the selected offset */
+ tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0);
+ tb[write_offset * 2 + 1] = reg_value;
+ mb();
+
+ /* new header word */
+ header_word = (write_offset << 16) | (wrap_count << 1) | 0x0; /* transaction complete */
+ tb[1] = header_word;
+ }
+ spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
+}
+
+/*
+ * Device trace functions
+ */
+#if KBASE_TRACE_ENABLE
+
+static int kbasep_trace_init(struct kbase_device *kbdev)
+{
+ struct kbase_trace *rbuf;
+
+ rbuf = kmalloc_array(KBASE_TRACE_SIZE, sizeof(*rbuf), GFP_KERNEL);
+
+ if (!rbuf)
+ return -EINVAL;
+
+ kbdev->trace_rbuf = rbuf;
+ spin_lock_init(&kbdev->trace_lock);
+ return 0;
+}
+
+static void kbasep_trace_term(struct kbase_device *kbdev)
+{
+ kfree(kbdev->trace_rbuf);
+}
+
+static void kbasep_trace_format_msg(struct kbase_trace *trace_msg, char *buffer, int len)
+{
+ s32 written = 0;
+
+ /* Initial part of message */
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0);
+
+ if (trace_msg->katom)
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0);
+
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0);
+
+ /* NOTE: Could add function callbacks to handle different message types */
+ /* Jobslot present */
+ if (trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT)
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0);
+
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
+
+ /* Refcount present */
+ if (trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT)
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0);
+
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
+
+ /* Rest of message */
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8lx", trace_msg->info_val), 0);
+}
+
+static void kbasep_trace_dump_msg(struct kbase_device *kbdev, struct kbase_trace *trace_msg)
+{
+ char buffer[DEBUG_MESSAGE_SIZE];
+
+ kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
+ dev_dbg(kbdev->dev, "%s", buffer);
+}
+
+void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
+{
+ unsigned long irqflags;
+ struct kbase_trace *trace_msg;
+
+ spin_lock_irqsave(&kbdev->trace_lock, irqflags);
+
+ trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in];
+
+ /* Fill the message */
+ trace_msg->thread_id = task_pid_nr(current);
+ trace_msg->cpu = task_cpu(current);
+
+ getnstimeofday(&trace_msg->timestamp);
+
+ trace_msg->code = code;
+ trace_msg->ctx = ctx;
+
+ if (NULL == katom) {
+ trace_msg->katom = false;
+ } else {
+ trace_msg->katom = true;
+ trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom);
+ trace_msg->atom_udata[0] = katom->udata.blob[0];
+ trace_msg->atom_udata[1] = katom->udata.blob[1];
+ }
+
+ trace_msg->gpu_addr = gpu_addr;
+ trace_msg->jobslot = jobslot;
+ trace_msg->refcount = MIN((unsigned int)refcount, 0xFF);
+ trace_msg->info_val = info_val;
+ trace_msg->flags = flags;
+
+ /* Update the ringbuffer indices */
+ kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK;
+ if (kbdev->trace_next_in == kbdev->trace_first_out)
+ kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK;
+
+ /* Done */
+
+ spin_unlock_irqrestore(&kbdev->trace_lock, irqflags);
+}
+
+void kbasep_trace_clear(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->trace_lock, flags);
+ kbdev->trace_first_out = kbdev->trace_next_in;
+ spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+}
+
+void kbasep_trace_dump(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ u32 start;
+ u32 end;
+
+ dev_dbg(kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
+ spin_lock_irqsave(&kbdev->trace_lock, flags);
+ start = kbdev->trace_first_out;
+ end = kbdev->trace_next_in;
+
+ while (start != end) {
+ struct kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
+
+ kbasep_trace_dump_msg(kbdev, trace_msg);
+
+ start = (start + 1) & KBASE_TRACE_MASK;
+ }
+ dev_dbg(kbdev->dev, "TRACE_END");
+
+ spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+
+ KBASE_TRACE_CLEAR(kbdev);
+}
+
+static void kbasep_trace_hook_wrapper(void *param)
+{
+ struct kbase_device *kbdev = (struct kbase_device *)param;
+
+ kbasep_trace_dump(kbdev);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct trace_seq_state {
+ struct kbase_trace trace_buf[KBASE_TRACE_SIZE];
+ u32 start;
+ u32 end;
+};
+
+static void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct trace_seq_state *state = s->private;
+ int i;
+
+ if (*pos > KBASE_TRACE_SIZE)
+ return NULL;
+ i = state->start + *pos;
+ if ((state->end >= state->start && i >= state->end) ||
+ i >= state->end + KBASE_TRACE_SIZE)
+ return NULL;
+
+ i &= KBASE_TRACE_MASK;
+
+ return &state->trace_buf[i];
+}
+
+static void kbasep_trace_seq_stop(struct seq_file *s, void *data)
+{
+}
+
+static void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
+{
+ struct trace_seq_state *state = s->private;
+ int i;
+
+ (*pos)++;
+
+ i = (state->start + *pos) & KBASE_TRACE_MASK;
+ if (i == state->end)
+ return NULL;
+
+ return &state->trace_buf[i];
+}
+
+static int kbasep_trace_seq_show(struct seq_file *s, void *data)
+{
+ struct kbase_trace *trace_msg = data;
+ char buffer[DEBUG_MESSAGE_SIZE];
+
+ kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
+ seq_printf(s, "%s\n", buffer);
+ return 0;
+}
+
+static const struct seq_operations kbasep_trace_seq_ops = {
+ .start = kbasep_trace_seq_start,
+ .next = kbasep_trace_seq_next,
+ .stop = kbasep_trace_seq_stop,
+ .show = kbasep_trace_seq_show,
+};
+
+static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct kbase_device *kbdev = inode->i_private;
+ unsigned long flags;
+
+ struct trace_seq_state *state;
+
+ state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state));
+ if (!state)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&kbdev->trace_lock, flags);
+ state->start = kbdev->trace_first_out;
+ state->end = kbdev->trace_next_in;
+ memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf));
+ spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+
+ return 0;
+}
+
+static const struct file_operations kbasep_trace_debugfs_fops = {
+ .open = kbasep_trace_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
+{
+ debugfs_create_file("mali_trace", S_IRUGO,
+ kbdev->mali_debugfs_directory, kbdev,
+ &kbasep_trace_debugfs_fops);
+}
+
+#else
+void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+#else /* KBASE_TRACE_ENABLE */
+static int kbasep_trace_init(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+ return 0;
+}
+
+static void kbasep_trace_term(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+static void kbasep_trace_hook_wrapper(void *param)
+{
+ CSTD_UNUSED(param);
+}
+
+void kbasep_trace_dump(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+#endif /* KBASE_TRACE_ENABLE */
+
+void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value)
+{
+ switch (control) {
+ case FBDUMP_CONTROL_ENABLE:
+ /* fall through */
+ case FBDUMP_CONTROL_RATE:
+ /* fall through */
+ case SW_COUNTER_ENABLE:
+ /* fall through */
+ case FBDUMP_CONTROL_RESIZE_FACTOR:
+ kbdev->kbase_profiling_controls[control] = value;
+ break;
+ default:
+ dev_err(kbdev->dev, "Profiling control %d not found\n", control);
+ break;
+ }
+}
+
+/*
+ * Called by gator to control the production of
+ * profiling information at runtime
+ * */
+
+void _mali_profiling_control(u32 action, u32 value)
+{
+ struct kbase_device *kbdev = NULL;
+
+ /* find the first i.e. call with -1 */
+ kbdev = kbase_find_device(-1);
+
+ if (NULL != kbdev)
+ kbase_set_profiling_control(kbdev, action, value);
+}
+KBASE_EXPORT_SYMBOL(_mali_profiling_control);
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_disjoint_events.c b/drivers/gpu/arm_gpu/mali_kbase_disjoint_events.c
new file mode 100644
index 000000000000..f70bcccf4050
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_disjoint_events.c
@@ -0,0 +1,76 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Base kernel disjoint events helper functions
+ */
+
+#include <mali_kbase.h>
+
+void kbase_disjoint_init(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ atomic_set(&kbdev->disjoint_event.count, 0);
+ atomic_set(&kbdev->disjoint_event.state, 0);
+}
+
+/* increment the disjoint event count */
+void kbase_disjoint_event(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ atomic_inc(&kbdev->disjoint_event.count);
+}
+
+/* increment the state and the event counter */
+void kbase_disjoint_state_up(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ atomic_inc(&kbdev->disjoint_event.state);
+
+ kbase_disjoint_event(kbdev);
+}
+
+/* decrement the state */
+void kbase_disjoint_state_down(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(atomic_read(&kbdev->disjoint_event.state) > 0);
+
+ kbase_disjoint_event(kbdev);
+
+ atomic_dec(&kbdev->disjoint_event.state);
+}
+
+/* increments the count only if the state is > 0 */
+void kbase_disjoint_event_potential(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ if (atomic_read(&kbdev->disjoint_event.state))
+ kbase_disjoint_event(kbdev);
+}
+
+u32 kbase_disjoint_event_get(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ return atomic_read(&kbdev->disjoint_event.count);
+}
+KBASE_EXPORT_TEST_API(kbase_disjoint_event_get);
diff --git a/drivers/gpu/arm_gpu/mali_kbase_dma_fence.c b/drivers/gpu/arm_gpu/mali_kbase_dma_fence.c
new file mode 100644
index 000000000000..9197743c81d4
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_dma_fence.c
@@ -0,0 +1,449 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/* Include mali_kbase_dma_fence.h before checking for CONFIG_MALI_DMA_FENCE as
+ * it will be set there.
+ */
+#include "mali_kbase_dma_fence.h"
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/reservation.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/ww_mutex.h>
+
+#include <mali_kbase.h>
+
+static void
+kbase_dma_fence_work(struct work_struct *pwork);
+
+static void
+kbase_dma_fence_waiters_add(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+
+ list_add_tail(&katom->queue, &kctx->dma_fence.waiting_resource);
+}
+
+static void
+kbase_dma_fence_waiters_remove(struct kbase_jd_atom *katom)
+{
+ list_del(&katom->queue);
+}
+
+static int
+kbase_dma_fence_lock_reservations(struct kbase_dma_fence_resv_info *info,
+ struct ww_acquire_ctx *ctx)
+{
+ struct reservation_object *content_res = NULL;
+ unsigned int content_res_idx = 0;
+ unsigned int r;
+ int err = 0;
+
+ ww_acquire_init(ctx, &reservation_ww_class);
+
+retry:
+ for (r = 0; r < info->dma_fence_resv_count; r++) {
+ if (info->resv_objs[r] == content_res) {
+ content_res = NULL;
+ continue;
+ }
+
+ err = ww_mutex_lock(&info->resv_objs[r]->lock, ctx);
+ if (err)
+ goto error;
+ }
+
+ ww_acquire_done(ctx);
+ return err;
+
+error:
+ content_res_idx = r;
+
+ /* Unlock the locked one ones */
+ while (r--)
+ ww_mutex_unlock(&info->resv_objs[r]->lock);
+
+ if (content_res)
+ ww_mutex_unlock(&content_res->lock);
+
+ /* If we deadlock try with lock_slow and retry */
+ if (err == -EDEADLK) {
+ content_res = info->resv_objs[content_res_idx];
+ ww_mutex_lock_slow(&content_res->lock, ctx);
+ goto retry;
+ }
+
+ /* If we are here the function failed */
+ ww_acquire_fini(ctx);
+ return err;
+}
+
+static void
+kbase_dma_fence_unlock_reservations(struct kbase_dma_fence_resv_info *info,
+ struct ww_acquire_ctx *ctx)
+{
+ unsigned int r;
+
+ for (r = 0; r < info->dma_fence_resv_count; r++)
+ ww_mutex_unlock(&info->resv_objs[r]->lock);
+ ww_acquire_fini(ctx);
+}
+
+/**
+ * kbase_dma_fence_queue_work() - Queue work to handle @katom
+ * @katom: Pointer to atom for which to queue work
+ *
+ * Queue kbase_dma_fence_work() for @katom to clean up the fence callbacks and
+ * submit the atom.
+ */
+static void
+kbase_dma_fence_queue_work(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ bool ret;
+
+ INIT_WORK(&katom->work, kbase_dma_fence_work);
+ ret = queue_work(kctx->dma_fence.wq, &katom->work);
+ /* Warn if work was already queued, that should not happen. */
+ WARN_ON(!ret);
+}
+
+/**
+ * kbase_dma_fence_cancel_atom() - Cancels waiting on an atom
+ * @katom: Katom to cancel
+ *
+ * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
+ */
+static void
+kbase_dma_fence_cancel_atom(struct kbase_jd_atom *katom)
+{
+ lockdep_assert_held(&katom->kctx->jctx.lock);
+
+ /* Cancel callbacks and clean up. */
+ kbase_fence_free_callbacks(katom);
+
+ /* Mark the atom as handled in case all fences signaled just before
+ * canceling the callbacks and the worker was queued.
+ */
+ kbase_fence_dep_count_set(katom, -1);
+
+ /* Prevent job_done_nolock from being called twice on an atom when
+ * there is a race between job completion and cancellation.
+ */
+
+ if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
+ /* Wait was cancelled - zap the atom */
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ if (jd_done_nolock(katom, NULL))
+ kbase_js_sched_all(katom->kctx->kbdev);
+ }
+}
+
+/**
+ * kbase_dma_fence_work() - Worker thread called when a fence is signaled
+ * @pwork: work_struct containing a pointer to a katom
+ *
+ * This function will clean and mark all dependencies as satisfied
+ */
+static void
+kbase_dma_fence_work(struct work_struct *pwork)
+{
+ struct kbase_jd_atom *katom;
+ struct kbase_jd_context *ctx;
+
+ katom = container_of(pwork, struct kbase_jd_atom, work);
+ ctx = &katom->kctx->jctx;
+
+ mutex_lock(&ctx->lock);
+ if (kbase_fence_dep_count_read(katom) != 0)
+ goto out;
+
+ kbase_fence_dep_count_set(katom, -1);
+
+ /* Remove atom from list of dma-fence waiting atoms. */
+ kbase_dma_fence_waiters_remove(katom);
+ /* Cleanup callbacks. */
+ kbase_fence_free_callbacks(katom);
+ /*
+ * Queue atom on GPU, unless it has already completed due to a failing
+ * dependency. Run jd_done_nolock() on the katom if it is completed.
+ */
+ if (unlikely(katom->status == KBASE_JD_ATOM_STATE_COMPLETED))
+ jd_done_nolock(katom, NULL);
+ else
+ kbase_jd_dep_clear_locked(katom);
+
+out:
+ mutex_unlock(&ctx->lock);
+}
+
+static void
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_dma_fence_cb(struct fence *fence, struct fence_cb *cb)
+#else
+kbase_dma_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+#endif
+{
+ struct kbase_fence_cb *kcb = container_of(cb,
+ struct kbase_fence_cb,
+ fence_cb);
+ struct kbase_jd_atom *katom = kcb->katom;
+
+ /* If the atom is zapped dep_count will be forced to a negative number
+ * preventing this callback from ever scheduling work. Which in turn
+ * would reschedule the atom.
+ */
+
+ if (kbase_fence_dep_count_dec_and_test(katom))
+ kbase_dma_fence_queue_work(katom);
+}
+
+static int
+kbase_dma_fence_add_reservation_callback(struct kbase_jd_atom *katom,
+ struct reservation_object *resv,
+ bool exclusive)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence *excl_fence = NULL;
+ struct fence **shared_fences = NULL;
+#else
+ struct dma_fence *excl_fence = NULL;
+ struct dma_fence **shared_fences = NULL;
+#endif
+ unsigned int shared_count = 0;
+ int err, i;
+
+ err = reservation_object_get_fences_rcu(resv,
+ &excl_fence,
+ &shared_count,
+ &shared_fences);
+ if (err)
+ return err;
+
+ if (excl_fence) {
+ err = kbase_fence_add_callback(katom,
+ excl_fence,
+ kbase_dma_fence_cb);
+
+ /* Release our reference, taken by reservation_object_get_fences_rcu(),
+ * to the fence. We have set up our callback (if that was possible),
+ * and it's the fence's owner is responsible for singling the fence
+ * before allowing it to disappear.
+ */
+ dma_fence_put(excl_fence);
+
+ if (err)
+ goto out;
+ }
+
+ if (exclusive) {
+ for (i = 0; i < shared_count; i++) {
+ err = kbase_fence_add_callback(katom,
+ shared_fences[i],
+ kbase_dma_fence_cb);
+ if (err)
+ goto out;
+ }
+ }
+
+ /* Release all our references to the shared fences, taken by
+ * reservation_object_get_fences_rcu(). We have set up our callback (if
+ * that was possible), and it's the fence's owner is responsible for
+ * signaling the fence before allowing it to disappear.
+ */
+out:
+ for (i = 0; i < shared_count; i++)
+ dma_fence_put(shared_fences[i]);
+ kfree(shared_fences);
+
+ if (err) {
+ /*
+ * On error, cancel and clean up all callbacks that was set up
+ * before the error.
+ */
+ kbase_fence_free_callbacks(katom);
+ }
+
+ return err;
+}
+
+void kbase_dma_fence_add_reservation(struct reservation_object *resv,
+ struct kbase_dma_fence_resv_info *info,
+ bool exclusive)
+{
+ unsigned int i;
+
+ for (i = 0; i < info->dma_fence_resv_count; i++) {
+ /* Duplicate resource, ignore */
+ if (info->resv_objs[i] == resv)
+ return;
+ }
+
+ info->resv_objs[info->dma_fence_resv_count] = resv;
+ if (exclusive)
+ set_bit(info->dma_fence_resv_count,
+ info->dma_fence_excl_bitmap);
+ (info->dma_fence_resv_count)++;
+}
+
+int kbase_dma_fence_wait(struct kbase_jd_atom *katom,
+ struct kbase_dma_fence_resv_info *info)
+{
+ int err, i;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence *fence;
+#else
+ struct dma_fence *fence;
+#endif
+ struct ww_acquire_ctx ww_ctx;
+
+ lockdep_assert_held(&katom->kctx->jctx.lock);
+
+ fence = kbase_fence_out_new(katom);
+ if (!fence) {
+ err = -ENOMEM;
+ dev_err(katom->kctx->kbdev->dev,
+ "Error %d creating fence.\n", err);
+ return err;
+ }
+
+ kbase_fence_dep_count_set(katom, 1);
+
+ err = kbase_dma_fence_lock_reservations(info, &ww_ctx);
+ if (err) {
+ dev_err(katom->kctx->kbdev->dev,
+ "Error %d locking reservations.\n", err);
+ kbase_fence_dep_count_set(katom, -1);
+ kbase_fence_out_remove(katom);
+ return err;
+ }
+
+ for (i = 0; i < info->dma_fence_resv_count; i++) {
+ struct reservation_object *obj = info->resv_objs[i];
+
+ if (!test_bit(i, info->dma_fence_excl_bitmap)) {
+ err = reservation_object_reserve_shared(obj);
+ if (err) {
+ dev_err(katom->kctx->kbdev->dev,
+ "Error %d reserving space for shared fence.\n", err);
+ goto end;
+ }
+
+ err = kbase_dma_fence_add_reservation_callback(katom, obj, false);
+ if (err) {
+ dev_err(katom->kctx->kbdev->dev,
+ "Error %d adding reservation to callback.\n", err);
+ goto end;
+ }
+
+ reservation_object_add_shared_fence(obj, fence);
+ } else {
+ err = kbase_dma_fence_add_reservation_callback(katom, obj, true);
+ if (err) {
+ dev_err(katom->kctx->kbdev->dev,
+ "Error %d adding reservation to callback.\n", err);
+ goto end;
+ }
+
+ reservation_object_add_excl_fence(obj, fence);
+ }
+ }
+
+end:
+ kbase_dma_fence_unlock_reservations(info, &ww_ctx);
+
+ if (likely(!err)) {
+ /* Test if the callbacks are already triggered */
+ if (kbase_fence_dep_count_dec_and_test(katom)) {
+ kbase_fence_dep_count_set(katom, -1);
+ kbase_fence_free_callbacks(katom);
+ } else {
+ /* Add katom to the list of dma-buf fence waiting atoms
+ * only if it is still waiting.
+ */
+ kbase_dma_fence_waiters_add(katom);
+ }
+ } else {
+ /* There was an error, cancel callbacks, set dep_count to -1 to
+ * indicate that the atom has been handled (the caller will
+ * kill it for us), signal the fence, free callbacks and the
+ * fence.
+ */
+ kbase_fence_free_callbacks(katom);
+ kbase_fence_dep_count_set(katom, -1);
+ kbase_dma_fence_signal(katom);
+ }
+
+ return err;
+}
+
+void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx)
+{
+ struct list_head *list = &kctx->dma_fence.waiting_resource;
+
+ while (!list_empty(list)) {
+ struct kbase_jd_atom *katom;
+
+ katom = list_first_entry(list, struct kbase_jd_atom, queue);
+ kbase_dma_fence_waiters_remove(katom);
+ kbase_dma_fence_cancel_atom(katom);
+ }
+}
+
+void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom)
+{
+ /* Cancel callbacks and clean up. */
+ if (kbase_fence_free_callbacks(katom))
+ kbase_dma_fence_queue_work(katom);
+}
+
+void kbase_dma_fence_signal(struct kbase_jd_atom *katom)
+{
+ if (!katom->dma_fence.fence)
+ return;
+
+ /* Signal the atom's fence. */
+ dma_fence_signal(katom->dma_fence.fence);
+
+ kbase_fence_out_remove(katom);
+
+ kbase_fence_free_callbacks(katom);
+}
+
+void kbase_dma_fence_term(struct kbase_context *kctx)
+{
+ destroy_workqueue(kctx->dma_fence.wq);
+ kctx->dma_fence.wq = NULL;
+}
+
+int kbase_dma_fence_init(struct kbase_context *kctx)
+{
+ INIT_LIST_HEAD(&kctx->dma_fence.waiting_resource);
+
+ kctx->dma_fence.wq = alloc_workqueue("mali-fence-%d",
+ WQ_UNBOUND, 1, kctx->pid);
+ if (!kctx->dma_fence.wq)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_dma_fence.h b/drivers/gpu/arm_gpu/mali_kbase_dma_fence.h
new file mode 100644
index 000000000000..c9ab40350422
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_dma_fence.h
@@ -0,0 +1,131 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_DMA_FENCE_H_
+#define _KBASE_DMA_FENCE_H_
+
+#ifdef CONFIG_MALI_DMA_FENCE
+
+#include <linux/list.h>
+#include <linux/reservation.h>
+#include <mali_kbase_fence.h>
+
+
+/* Forward declaration from mali_kbase_defs.h */
+struct kbase_jd_atom;
+struct kbase_context;
+
+/**
+ * struct kbase_dma_fence_resv_info - Structure with list of reservation objects
+ * @resv_objs: Array of reservation objects to attach the
+ * new fence to.
+ * @dma_fence_resv_count: Number of reservation objects in the array.
+ * @dma_fence_excl_bitmap: Specifies which resv_obj are exclusive.
+ *
+ * This is used by some functions to pass around a collection of data about
+ * reservation objects.
+ */
+struct kbase_dma_fence_resv_info {
+ struct reservation_object **resv_objs;
+ unsigned int dma_fence_resv_count;
+ unsigned long *dma_fence_excl_bitmap;
+};
+
+/**
+ * kbase_dma_fence_add_reservation() - Adds a resv to the array of resv_objs
+ * @resv: Reservation object to add to the array.
+ * @info: Pointer to struct with current reservation info
+ * @exclusive: Boolean indicating if exclusive access is needed
+ *
+ * The function adds a new reservation_object to an existing array of
+ * reservation_objects. At the same time keeps track of which objects require
+ * exclusive access in dma_fence_excl_bitmap.
+ */
+void kbase_dma_fence_add_reservation(struct reservation_object *resv,
+ struct kbase_dma_fence_resv_info *info,
+ bool exclusive);
+
+/**
+ * kbase_dma_fence_wait() - Creates a new fence and attaches it to the resv_objs
+ * @katom: Katom with the external dependency.
+ * @info: Pointer to struct with current reservation info
+ *
+ * Return: An error code or 0 if succeeds
+ */
+int kbase_dma_fence_wait(struct kbase_jd_atom *katom,
+ struct kbase_dma_fence_resv_info *info);
+
+/**
+ * kbase_dma_fence_cancel_ctx() - Cancel all dma-fences blocked atoms on kctx
+ * @kctx: Pointer to kbase context
+ *
+ * This function will cancel and clean up all katoms on @kctx that is waiting
+ * on dma-buf fences.
+ *
+ * Locking: jctx.lock needs to be held when calling this function.
+ */
+void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx);
+
+/**
+ * kbase_dma_fence_cancel_callbacks() - Cancel only callbacks on katom
+ * @katom: Pointer to katom whose callbacks are to be canceled
+ *
+ * This function cancels all dma-buf fence callbacks on @katom, but does not
+ * cancel the katom itself.
+ *
+ * The caller is responsible for ensuring that jd_done_nolock is called on
+ * @katom.
+ *
+ * Locking: jctx.lock must be held when calling this function.
+ */
+void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_dma_fence_signal() - Signal katom's fence and clean up after wait
+ * @katom: Pointer to katom to signal and clean up
+ *
+ * This function will signal the @katom's fence, if it has one, and clean up
+ * the callback data from the katom's wait on earlier fences.
+ *
+ * Locking: jctx.lock must be held while calling this function.
+ */
+void kbase_dma_fence_signal(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_dma_fence_term() - Terminate Mali dma-fence context
+ * @kctx: kbase context to terminate
+ */
+void kbase_dma_fence_term(struct kbase_context *kctx);
+
+/**
+ * kbase_dma_fence_init() - Initialize Mali dma-fence context
+ * @kctx: kbase context to initialize
+ */
+int kbase_dma_fence_init(struct kbase_context *kctx);
+
+
+#else /* CONFIG_MALI_DMA_FENCE */
+/* Dummy functions for when dma-buf fence isn't enabled. */
+
+static inline int kbase_dma_fence_init(struct kbase_context *kctx)
+{
+ return 0;
+}
+
+static inline void kbase_dma_fence_term(struct kbase_context *kctx) {}
+#endif /* CONFIG_MALI_DMA_FENCE */
+#endif
diff --git a/drivers/gpu/arm_gpu/mali_kbase_event.c b/drivers/gpu/arm_gpu/mali_kbase_event.c
new file mode 100644
index 000000000000..188148645f37
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_event.c
@@ -0,0 +1,259 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_debug.h>
+#include <mali_kbase_tlstream.h>
+
+static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ struct base_jd_udata data;
+
+ lockdep_assert_held(&kctx->jctx.lock);
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+ KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
+
+ data = katom->udata;
+
+ KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(1, &kctx->timeline.jd_atoms_in_flight));
+
+ KBASE_TLSTREAM_TL_NRET_ATOM_CTX(katom, kctx);
+ KBASE_TLSTREAM_TL_DEL_ATOM(katom);
+
+ katom->status = KBASE_JD_ATOM_STATE_UNUSED;
+
+ wake_up(&katom->completed);
+
+ return data;
+}
+
+int kbase_event_pending(struct kbase_context *ctx)
+{
+ KBASE_DEBUG_ASSERT(ctx);
+
+ return (atomic_read(&ctx->event_count) != 0) ||
+ (atomic_read(&ctx->event_closed) != 0);
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_pending);
+
+int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)
+{
+ struct kbase_jd_atom *atom;
+
+ KBASE_DEBUG_ASSERT(ctx);
+
+ mutex_lock(&ctx->event_mutex);
+
+ if (list_empty(&ctx->event_list)) {
+ if (!atomic_read(&ctx->event_closed)) {
+ mutex_unlock(&ctx->event_mutex);
+ return -1;
+ }
+
+ /* generate the BASE_JD_EVENT_DRV_TERMINATED message on the fly */
+ mutex_unlock(&ctx->event_mutex);
+ uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED;
+ memset(&uevent->udata, 0, sizeof(uevent->udata));
+ dev_dbg(ctx->kbdev->dev,
+ "event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n",
+ BASE_JD_EVENT_DRV_TERMINATED);
+ return 0;
+ }
+
+ /* normal event processing */
+ atomic_dec(&ctx->event_count);
+ atom = list_entry(ctx->event_list.next, struct kbase_jd_atom, dep_item[0]);
+ list_del(ctx->event_list.next);
+
+ mutex_unlock(&ctx->event_mutex);
+
+ dev_dbg(ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom);
+ uevent->event_code = atom->event_code;
+ uevent->atom_number = (atom - ctx->jctx.atoms);
+
+ if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+ kbase_jd_free_external_resources(atom);
+
+ mutex_lock(&ctx->jctx.lock);
+ uevent->udata = kbase_event_process(ctx, atom);
+ mutex_unlock(&ctx->jctx.lock);
+
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_dequeue);
+
+/**
+ * kbase_event_process_noreport_worker - Worker for processing atoms that do not
+ * return an event but do have external
+ * resources
+ * @data: Work structure
+ */
+static void kbase_event_process_noreport_worker(struct work_struct *data)
+{
+ struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
+ work);
+ struct kbase_context *kctx = katom->kctx;
+
+ if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+ kbase_jd_free_external_resources(katom);
+
+ mutex_lock(&kctx->jctx.lock);
+ kbase_event_process(kctx, katom);
+ mutex_unlock(&kctx->jctx.lock);
+}
+
+/**
+ * kbase_event_process_noreport - Process atoms that do not return an event
+ * @kctx: Context pointer
+ * @katom: Atom to be processed
+ *
+ * Atoms that do not have external resources will be processed immediately.
+ * Atoms that do have external resources will be processed on a workqueue, in
+ * order to avoid locking issues.
+ */
+static void kbase_event_process_noreport(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom)
+{
+ if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+ INIT_WORK(&katom->work, kbase_event_process_noreport_worker);
+ queue_work(kctx->event_workq, &katom->work);
+ } else {
+ kbase_event_process(kctx, katom);
+ }
+}
+
+/**
+ * kbase_event_coalesce - Move pending events to the main event list
+ * @kctx: Context pointer
+ *
+ * kctx->event_list and kctx->event_coalesce_count must be protected
+ * by a lock unless this is the last thread using them
+ * (and we're about to terminate the lock).
+ *
+ * Return: The number of pending events moved to the main event list
+ */
+static int kbase_event_coalesce(struct kbase_context *kctx)
+{
+ const int event_count = kctx->event_coalesce_count;
+
+ /* Join the list of pending events onto the tail of the main list
+ and reset it */
+ list_splice_tail_init(&kctx->event_coalesce_list, &kctx->event_list);
+ kctx->event_coalesce_count = 0;
+
+ /* Return the number of events moved */
+ return event_count;
+}
+
+void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom)
+{
+ if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE) {
+ if (atom->event_code == BASE_JD_EVENT_DONE) {
+ /* Don't report the event */
+ kbase_event_process_noreport(ctx, atom);
+ return;
+ }
+ }
+
+ if (atom->core_req & BASEP_JD_REQ_EVENT_NEVER) {
+ /* Don't report the event */
+ kbase_event_process_noreport(ctx, atom);
+ return;
+ }
+ KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(atom, TL_ATOM_STATE_POSTED);
+ if (atom->core_req & BASE_JD_REQ_EVENT_COALESCE) {
+ /* Don't report the event until other event(s) have completed */
+ mutex_lock(&ctx->event_mutex);
+ list_add_tail(&atom->dep_item[0], &ctx->event_coalesce_list);
+ ++ctx->event_coalesce_count;
+ mutex_unlock(&ctx->event_mutex);
+ } else {
+ /* Report the event and any pending events now */
+ int event_count = 1;
+
+ mutex_lock(&ctx->event_mutex);
+ event_count += kbase_event_coalesce(ctx);
+ list_add_tail(&atom->dep_item[0], &ctx->event_list);
+ atomic_add(event_count, &ctx->event_count);
+ mutex_unlock(&ctx->event_mutex);
+
+ kbase_event_wakeup(ctx);
+ }
+}
+KBASE_EXPORT_TEST_API(kbase_event_post);
+
+void kbase_event_close(struct kbase_context *kctx)
+{
+ mutex_lock(&kctx->event_mutex);
+ atomic_set(&kctx->event_closed, true);
+ mutex_unlock(&kctx->event_mutex);
+ kbase_event_wakeup(kctx);
+}
+
+int kbase_event_init(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx);
+
+ INIT_LIST_HEAD(&kctx->event_list);
+ INIT_LIST_HEAD(&kctx->event_coalesce_list);
+ mutex_init(&kctx->event_mutex);
+ atomic_set(&kctx->event_count, 0);
+ kctx->event_coalesce_count = 0;
+ atomic_set(&kctx->event_closed, false);
+ kctx->event_workq = alloc_workqueue("kbase_event", WQ_MEM_RECLAIM, 1);
+
+ if (NULL == kctx->event_workq)
+ return -EINVAL;
+
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_init);
+
+void kbase_event_cleanup(struct kbase_context *kctx)
+{
+ int event_count;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(kctx->event_workq);
+
+ flush_workqueue(kctx->event_workq);
+ destroy_workqueue(kctx->event_workq);
+
+ /* We use kbase_event_dequeue to remove the remaining events as that
+ * deals with all the cleanup needed for the atoms.
+ *
+ * Note: use of kctx->event_list without a lock is safe because this must be the last
+ * thread using it (because we're about to terminate the lock)
+ */
+ event_count = kbase_event_coalesce(kctx);
+ atomic_add(event_count, &kctx->event_count);
+
+ while (!list_empty(&kctx->event_list)) {
+ struct base_jd_event_v2 event;
+
+ kbase_event_dequeue(kctx, &event);
+ }
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_cleanup);
diff --git a/drivers/gpu/arm_gpu/mali_kbase_fence.c b/drivers/gpu/arm_gpu/mali_kbase_fence.c
new file mode 100644
index 000000000000..fcb373372596
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_fence.c
@@ -0,0 +1,196 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <mali_kbase_fence_defs.h>
+#include <mali_kbase_fence.h>
+#include <mali_kbase.h>
+
+/* Spin lock protecting all Mali fences as fence->lock. */
+static DEFINE_SPINLOCK(kbase_fence_lock);
+
+static const char *
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_fence_get_driver_name(struct fence *fence)
+#else
+kbase_fence_get_driver_name(struct dma_fence *fence)
+#endif
+{
+ return kbase_drv_name;
+}
+
+static const char *
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_fence_get_timeline_name(struct fence *fence)
+#else
+kbase_fence_get_timeline_name(struct dma_fence *fence)
+#endif
+{
+ return kbase_timeline_name;
+}
+
+static bool
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_fence_enable_signaling(struct fence *fence)
+#else
+kbase_fence_enable_signaling(struct dma_fence *fence)
+#endif
+{
+ return true;
+}
+
+static void
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_fence_fence_value_str(struct fence *fence, char *str, int size)
+#else
+kbase_fence_fence_value_str(struct dma_fence *fence, char *str, int size)
+#endif
+{
+ snprintf(str, size, "%u", fence->seqno);
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+const struct fence_ops kbase_fence_ops = {
+ .wait = fence_default_wait,
+#else
+const struct dma_fence_ops kbase_fence_ops = {
+ .wait = dma_fence_default_wait,
+#endif
+ .get_driver_name = kbase_fence_get_driver_name,
+ .get_timeline_name = kbase_fence_get_timeline_name,
+ .enable_signaling = kbase_fence_enable_signaling,
+ .fence_value_str = kbase_fence_fence_value_str
+};
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+struct fence *
+kbase_fence_out_new(struct kbase_jd_atom *katom)
+#else
+struct dma_fence *
+kbase_fence_out_new(struct kbase_jd_atom *katom)
+#endif
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence *fence;
+#else
+ struct dma_fence *fence;
+#endif
+
+ WARN_ON(katom->dma_fence.fence);
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ dma_fence_init(fence,
+ &kbase_fence_ops,
+ &kbase_fence_lock,
+ katom->dma_fence.context,
+ atomic_inc_return(&katom->dma_fence.seqno));
+
+ katom->dma_fence.fence = fence;
+
+ return fence;
+}
+
+bool
+kbase_fence_free_callbacks(struct kbase_jd_atom *katom)
+{
+ struct kbase_fence_cb *cb, *tmp;
+ bool res = false;
+
+ lockdep_assert_held(&katom->kctx->jctx.lock);
+
+ /* Clean up and free callbacks. */
+ list_for_each_entry_safe(cb, tmp, &katom->dma_fence.callbacks, node) {
+ bool ret;
+
+ /* Cancel callbacks that hasn't been called yet. */
+ ret = dma_fence_remove_callback(cb->fence, &cb->fence_cb);
+ if (ret) {
+ int ret;
+
+ /* Fence had not signaled, clean up after
+ * canceling.
+ */
+ ret = atomic_dec_return(&katom->dma_fence.dep_count);
+
+ if (unlikely(ret == 0))
+ res = true;
+ }
+
+ /*
+ * Release the reference taken in
+ * kbase_fence_add_callback().
+ */
+ dma_fence_put(cb->fence);
+ list_del(&cb->node);
+ kfree(cb);
+ }
+
+ return res;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+int
+kbase_fence_add_callback(struct kbase_jd_atom *katom,
+ struct fence *fence,
+ fence_func_t callback)
+#else
+int
+kbase_fence_add_callback(struct kbase_jd_atom *katom,
+ struct dma_fence *fence,
+ dma_fence_func_t callback)
+#endif
+{
+ int err = 0;
+ struct kbase_fence_cb *kbase_fence_cb;
+
+ if (!fence)
+ return -EINVAL;
+
+ kbase_fence_cb = kmalloc(sizeof(*kbase_fence_cb), GFP_KERNEL);
+ if (!kbase_fence_cb)
+ return -ENOMEM;
+
+ kbase_fence_cb->fence = fence;
+ kbase_fence_cb->katom = katom;
+ INIT_LIST_HEAD(&kbase_fence_cb->node);
+
+ err = dma_fence_add_callback(fence, &kbase_fence_cb->fence_cb,
+ callback);
+ if (err == -ENOENT) {
+ /* Fence signaled, clear the error and return */
+ err = 0;
+ kfree(kbase_fence_cb);
+ } else if (err) {
+ kfree(kbase_fence_cb);
+ } else {
+ /*
+ * Get reference to fence that will be kept until callback gets
+ * cleaned up in kbase_fence_free_callbacks().
+ */
+ dma_fence_get(fence);
+ atomic_inc(&katom->dma_fence.dep_count);
+ /* Add callback to katom's list of callbacks */
+ list_add(&kbase_fence_cb->node, &katom->dma_fence.callbacks);
+ }
+
+ return err;
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_fence.h b/drivers/gpu/arm_gpu/mali_kbase_fence.h
new file mode 100644
index 000000000000..070dd12e1b7a
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_fence.h
@@ -0,0 +1,266 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_FENCE_H_
+#define _KBASE_FENCE_H_
+
+/*
+ * mali_kbase_fence.[hc] has common fence code used by both
+ * - CONFIG_MALI_DMA_FENCE - implicit DMA fences
+ * - CONFIG_SYNC_FILE - explicit fences beginning with 4.9 kernel
+ */
+
+#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE)
+
+#include <linux/list.h>
+#include "mali_kbase_fence_defs.h"
+#include "mali_kbase.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+extern const struct fence_ops kbase_fence_ops;
+#else
+extern const struct dma_fence_ops kbase_fence_ops;
+#endif
+
+/**
+* struct kbase_fence_cb - Mali dma-fence callback data struct
+* @fence_cb: Callback function
+* @katom: Pointer to katom that is waiting on this callback
+* @fence: Pointer to the fence object on which this callback is waiting
+* @node: List head for linking this callback to the katom
+*/
+struct kbase_fence_cb {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence_cb fence_cb;
+ struct fence *fence;
+#else
+ struct dma_fence_cb fence_cb;
+ struct dma_fence *fence;
+#endif
+ struct kbase_jd_atom *katom;
+ struct list_head node;
+};
+
+/**
+ * kbase_fence_out_new() - Creates a new output fence and puts it on the atom
+ * @katom: Atom to create an output fence for
+ *
+ * return: A new fence object on success, NULL on failure.
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+struct fence *kbase_fence_out_new(struct kbase_jd_atom *katom);
+#else
+struct dma_fence *kbase_fence_out_new(struct kbase_jd_atom *katom);
+#endif
+
+#if defined(CONFIG_SYNC_FILE)
+/**
+ * kbase_fence_fence_in_set() - Assign input fence to atom
+ * @katom: Atom to assign input fence to
+ * @fence: Input fence to assign to atom
+ *
+ * This function will take ownership of one fence reference!
+ */
+#define kbase_fence_fence_in_set(katom, fence) \
+ do { \
+ WARN_ON((katom)->dma_fence.fence_in); \
+ (katom)->dma_fence.fence_in = fence; \
+ } while (0)
+#endif
+
+/**
+ * kbase_fence_out_remove() - Removes the output fence from atom
+ * @katom: Atom to remove output fence for
+ *
+ * This will also release the reference to this fence which the atom keeps
+ */
+static inline void kbase_fence_out_remove(struct kbase_jd_atom *katom)
+{
+ if (katom->dma_fence.fence) {
+ dma_fence_put(katom->dma_fence.fence);
+ katom->dma_fence.fence = NULL;
+ }
+}
+
+#if defined(CONFIG_SYNC_FILE)
+/**
+ * kbase_fence_out_remove() - Removes the input fence from atom
+ * @katom: Atom to remove input fence for
+ *
+ * This will also release the reference to this fence which the atom keeps
+ */
+static inline void kbase_fence_in_remove(struct kbase_jd_atom *katom)
+{
+ if (katom->dma_fence.fence_in) {
+ dma_fence_put(katom->dma_fence.fence_in);
+ katom->dma_fence.fence_in = NULL;
+ }
+}
+#endif
+
+/**
+ * kbase_fence_out_is_ours() - Check if atom has a valid fence created by us
+ * @katom: Atom to check output fence for
+ *
+ * Return: true if fence exists and is valid, otherwise false
+ */
+static inline bool kbase_fence_out_is_ours(struct kbase_jd_atom *katom)
+{
+ return katom->dma_fence.fence &&
+ katom->dma_fence.fence->ops == &kbase_fence_ops;
+}
+
+/**
+ * kbase_fence_out_signal() - Signal output fence of atom
+ * @katom: Atom to signal output fence for
+ * @status: Status to signal with (0 for success, < 0 for error)
+ *
+ * Return: 0 on success, < 0 on error
+ */
+static inline int kbase_fence_out_signal(struct kbase_jd_atom *katom,
+ int status)
+{
+ katom->dma_fence.fence->error = status;
+ return dma_fence_signal(katom->dma_fence.fence);
+}
+
+/**
+ * kbase_fence_add_callback() - Add callback on @fence to block @katom
+ * @katom: Pointer to katom that will be blocked by @fence
+ * @fence: Pointer to fence on which to set up the callback
+ * @callback: Pointer to function to be called when fence is signaled
+ *
+ * Caller needs to hold a reference to @fence when calling this function, and
+ * the caller is responsible for releasing that reference. An additional
+ * reference to @fence will be taken when the callback was successfully set up
+ * and @fence needs to be kept valid until the callback has been called and
+ * cleanup have been done.
+ *
+ * Return: 0 on success: fence was either already signaled, or callback was
+ * set up. Negative error code is returned on error.
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+int kbase_fence_add_callback(struct kbase_jd_atom *katom,
+ struct fence *fence,
+ fence_func_t callback);
+#else
+int kbase_fence_add_callback(struct kbase_jd_atom *katom,
+ struct dma_fence *fence,
+ dma_fence_func_t callback);
+#endif
+
+/**
+ * kbase_fence_dep_count_set() - Set dep_count value on atom to specified value
+ * @katom: Atom to set dep_count for
+ * @val: value to set dep_count to
+ *
+ * The dep_count is available to the users of this module so that they can
+ * synchronize completion of the wait with cancellation and adding of more
+ * callbacks. For instance, a user could do the following:
+ *
+ * dep_count set to 1
+ * callback #1 added, dep_count is increased to 2
+ * callback #1 happens, dep_count decremented to 1
+ * since dep_count > 0, no completion is done
+ * callback #2 is added, dep_count is increased to 2
+ * dep_count decremented to 1
+ * callback #2 happens, dep_count decremented to 0
+ * since dep_count now is zero, completion executes
+ *
+ * The dep_count can also be used to make sure that the completion only
+ * executes once. This is typically done by setting dep_count to -1 for the
+ * thread that takes on this responsibility.
+ */
+static inline void
+kbase_fence_dep_count_set(struct kbase_jd_atom *katom, int val)
+{
+ atomic_set(&katom->dma_fence.dep_count, val);
+}
+
+/**
+ * kbase_fence_dep_count_dec_and_test() - Decrements dep_count
+ * @katom: Atom to decrement dep_count for
+ *
+ * See @kbase_fence_dep_count_set for general description about dep_count
+ *
+ * Return: true if value was decremented to zero, otherwise false
+ */
+static inline bool
+kbase_fence_dep_count_dec_and_test(struct kbase_jd_atom *katom)
+{
+ return atomic_dec_and_test(&katom->dma_fence.dep_count);
+}
+
+/**
+ * kbase_fence_dep_count_read() - Returns the current dep_count value
+ * @katom: Pointer to katom
+ *
+ * See @kbase_fence_dep_count_set for general description about dep_count
+ *
+ * Return: The current dep_count value
+ */
+static inline int kbase_fence_dep_count_read(struct kbase_jd_atom *katom)
+{
+ return atomic_read(&katom->dma_fence.dep_count);
+}
+
+/**
+ * kbase_fence_free_callbacks() - Free dma-fence callbacks on a katom
+ * @katom: Pointer to katom
+ *
+ * This function will free all fence callbacks on the katom's list of
+ * callbacks. Callbacks that have not yet been called, because their fence
+ * hasn't yet signaled, will first be removed from the fence.
+ *
+ * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
+ *
+ * Return: true if dep_count reached 0, otherwise false.
+ */
+bool kbase_fence_free_callbacks(struct kbase_jd_atom *katom);
+
+#if defined(CONFIG_SYNC_FILE)
+/**
+ * kbase_fence_in_get() - Retrieve input fence for atom.
+ * @katom: Atom to get input fence from
+ *
+ * A ref will be taken for the fence, so use @kbase_fence_put() to release it
+ *
+ * Return: The fence, or NULL if there is no input fence for atom
+ */
+#define kbase_fence_in_get(katom) dma_fence_get((katom)->dma_fence.fence_in)
+#endif
+
+/**
+ * kbase_fence_out_get() - Retrieve output fence for atom.
+ * @katom: Atom to get output fence from
+ *
+ * A ref will be taken for the fence, so use @kbase_fence_put() to release it
+ *
+ * Return: The fence, or NULL if there is no output fence for atom
+ */
+#define kbase_fence_out_get(katom) dma_fence_get((katom)->dma_fence.fence)
+
+/**
+ * kbase_fence_put() - Releases a reference to a fence
+ * @fence: Fence to release reference for.
+ */
+#define kbase_fence_put(fence) dma_fence_put(fence)
+
+
+#endif /* CONFIG_MALI_DMA_FENCE || defined(CONFIG_SYNC_FILE */
+
+#endif /* _KBASE_FENCE_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_fence_defs.h b/drivers/gpu/arm_gpu/mali_kbase_fence_defs.h
new file mode 100644
index 000000000000..fa2c6dfe999e
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_fence_defs.h
@@ -0,0 +1,51 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_FENCE_DEFS_H_
+#define _KBASE_FENCE_DEFS_H_
+
+/*
+ * There was a big rename in the 4.10 kernel (fence* -> dma_fence*)
+ * This file hides the compatibility issues with this for the rest the driver
+ */
+
+#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE)
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+
+#include <linux/fence.h>
+
+#define dma_fence_context_alloc(a) fence_context_alloc(a)
+#define dma_fence_init(a, b, c, d, e) fence_init(a, b, c, d, e)
+#define dma_fence_get(a) fence_get(a)
+#define dma_fence_put(a) fence_put(a)
+#define dma_fence_signal(a) fence_signal(a)
+#define dma_fence_is_signaled(a) fence_is_signaled(a)
+#define dma_fence_add_callback(a, b, c) fence_add_callback(a, b, c)
+#define dma_fence_remove_callback(a, b) fence_remove_callback(a, b)
+
+#else
+
+#include <linux/dma-fence.h>
+
+#endif /* < 4.10.0 */
+
+#endif /* CONFIG_MALI_DMA_FENCE || CONFIG_SYNC_FILE */
+
+#endif /* _KBASE_FENCE_DEFS_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_gator.h b/drivers/gpu/arm_gpu/mali_kbase_gator.h
new file mode 100644
index 000000000000..ce65b5562a2b
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_gator.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/* NB taken from gator */
+/*
+ * List of possible actions to be controlled by DS-5 Streamline.
+ * The following numbers are used by gator to control the frame buffer dumping
+ * and s/w counter reporting. We cannot use the enums in mali_uk_types.h because
+ * they are unknown inside gator.
+ */
+#ifndef _KBASE_GATOR_H_
+#define _KBASE_GATOR_H_
+
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+#define GATOR_MAKE_EVENT(type, number) (((type) << 24) | ((number) << 16))
+#define GATOR_JOB_SLOT_START 1
+#define GATOR_JOB_SLOT_STOP 2
+#define GATOR_JOB_SLOT_SOFT_STOPPED 3
+
+void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id);
+void kbase_trace_mali_pm_status(u32 event, u64 value);
+void kbase_trace_mali_pm_power_off(u32 event, u64 value);
+void kbase_trace_mali_pm_power_on(u32 event, u64 value);
+void kbase_trace_mali_page_fault_insert_pages(int event, u32 value);
+void kbase_trace_mali_mmu_as_in_use(int event);
+void kbase_trace_mali_mmu_as_released(int event);
+void kbase_trace_mali_total_alloc_pages_change(long long int event);
+
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+
+#endif /* _KBASE_GATOR_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_gator_api.c b/drivers/gpu/arm_gpu/mali_kbase_gator_api.c
new file mode 100644
index 000000000000..860e10159fb3
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_gator_api.c
@@ -0,0 +1,334 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include "mali_kbase.h"
+#include "mali_kbase_hw.h"
+#include "mali_kbase_mem_linux.h"
+#include "mali_kbase_gator_api.h"
+#include "mali_kbase_gator_hwcnt_names.h"
+
+#define MALI_MAX_CORES_PER_GROUP 4
+#define MALI_MAX_NUM_BLOCKS_PER_GROUP 8
+#define MALI_COUNTERS_PER_BLOCK 64
+#define MALI_BYTES_PER_COUNTER 4
+
+struct kbase_gator_hwcnt_handles {
+ struct kbase_device *kbdev;
+ struct kbase_vinstr_client *vinstr_cli;
+ void *vinstr_buffer;
+ struct work_struct dump_work;
+ int dump_complete;
+ spinlock_t dump_lock;
+};
+
+static void dump_worker(struct work_struct *work);
+
+const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters)
+{
+ const char * const *hardware_counters;
+ struct kbase_device *kbdev;
+ uint32_t product_id;
+ uint32_t count;
+
+ if (!total_counters)
+ return NULL;
+
+ /* Get the first device - it doesn't matter in this case */
+ kbdev = kbase_find_device(-1);
+ if (!kbdev)
+ return NULL;
+
+ product_id = kbdev->gpu_props.props.core_props.product_id;
+
+ if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+ switch (GPU_ID2_MODEL_MATCH_VALUE(product_id)) {
+ case GPU_ID2_PRODUCT_TMIX:
+ hardware_counters = hardware_counters_mali_tMIx;
+ count = ARRAY_SIZE(hardware_counters_mali_tMIx);
+ break;
+ case GPU_ID2_PRODUCT_THEX:
+ hardware_counters = hardware_counters_mali_tHEx;
+ count = ARRAY_SIZE(hardware_counters_mali_tHEx);
+ break;
+ case GPU_ID2_PRODUCT_TSIX:
+ hardware_counters = hardware_counters_mali_tSIx;
+ count = ARRAY_SIZE(hardware_counters_mali_tSIx);
+ break;
+ default:
+ hardware_counters = NULL;
+ count = 0;
+ dev_err(kbdev->dev, "Unrecognized product ID: %u\n",
+ product_id);
+ break;
+ }
+ } else {
+ switch (product_id) {
+ /* If we are using a Mali-T60x device */
+ case GPU_ID_PI_T60X:
+ hardware_counters = hardware_counters_mali_t60x;
+ count = ARRAY_SIZE(hardware_counters_mali_t60x);
+ break;
+ /* If we are using a Mali-T62x device */
+ case GPU_ID_PI_T62X:
+ hardware_counters = hardware_counters_mali_t62x;
+ count = ARRAY_SIZE(hardware_counters_mali_t62x);
+ break;
+ /* If we are using a Mali-T72x device */
+ case GPU_ID_PI_T72X:
+ hardware_counters = hardware_counters_mali_t72x;
+ count = ARRAY_SIZE(hardware_counters_mali_t72x);
+ break;
+ /* If we are using a Mali-T76x device */
+ case GPU_ID_PI_T76X:
+ hardware_counters = hardware_counters_mali_t76x;
+ count = ARRAY_SIZE(hardware_counters_mali_t76x);
+ break;
+ /* If we are using a Mali-T82x device */
+ case GPU_ID_PI_T82X:
+ hardware_counters = hardware_counters_mali_t82x;
+ count = ARRAY_SIZE(hardware_counters_mali_t82x);
+ break;
+ /* If we are using a Mali-T83x device */
+ case GPU_ID_PI_T83X:
+ hardware_counters = hardware_counters_mali_t83x;
+ count = ARRAY_SIZE(hardware_counters_mali_t83x);
+ break;
+ /* If we are using a Mali-T86x device */
+ case GPU_ID_PI_T86X:
+ hardware_counters = hardware_counters_mali_t86x;
+ count = ARRAY_SIZE(hardware_counters_mali_t86x);
+ break;
+ /* If we are using a Mali-T88x device */
+ case GPU_ID_PI_TFRX:
+ hardware_counters = hardware_counters_mali_t88x;
+ count = ARRAY_SIZE(hardware_counters_mali_t88x);
+ break;
+ default:
+ hardware_counters = NULL;
+ count = 0;
+ dev_err(kbdev->dev, "Unrecognized product ID: %u\n",
+ product_id);
+ break;
+ }
+ }
+
+ /* Release the kbdev reference. */
+ kbase_release_device(kbdev);
+
+ *total_counters = count;
+
+ /* If we return a string array take a reference on the module (or fail). */
+ if (hardware_counters && !try_module_get(THIS_MODULE))
+ return NULL;
+
+ return hardware_counters;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init_names);
+
+void kbase_gator_hwcnt_term_names(void)
+{
+ /* Release the module reference. */
+ module_put(THIS_MODULE);
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term_names);
+
+struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info)
+{
+ struct kbase_gator_hwcnt_handles *hand;
+ struct kbase_uk_hwcnt_reader_setup setup;
+ uint32_t dump_size = 0, i = 0;
+
+ if (!in_out_info)
+ return NULL;
+
+ hand = kzalloc(sizeof(*hand), GFP_KERNEL);
+ if (!hand)
+ return NULL;
+
+ INIT_WORK(&hand->dump_work, dump_worker);
+ spin_lock_init(&hand->dump_lock);
+
+ /* Get the first device */
+ hand->kbdev = kbase_find_device(-1);
+ if (!hand->kbdev)
+ goto free_hand;
+
+ dump_size = kbase_vinstr_dump_size(hand->kbdev);
+ hand->vinstr_buffer = kzalloc(dump_size, GFP_KERNEL);
+ if (!hand->vinstr_buffer)
+ goto release_device;
+ in_out_info->kernel_dump_buffer = hand->vinstr_buffer;
+
+ in_out_info->nr_cores = hand->kbdev->gpu_props.num_cores;
+ in_out_info->nr_core_groups = hand->kbdev->gpu_props.num_core_groups;
+ in_out_info->gpu_id = hand->kbdev->gpu_props.props.core_props.product_id;
+
+ /* If we are using a v4 device (Mali-T6xx or Mali-T72x) */
+ if (kbase_hw_has_feature(hand->kbdev, BASE_HW_FEATURE_V4)) {
+ uint32_t cg, j;
+ uint64_t core_mask;
+
+ /* There are 8 hardware counters blocks per core group */
+ in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) *
+ MALI_MAX_NUM_BLOCKS_PER_GROUP *
+ in_out_info->nr_core_groups, GFP_KERNEL);
+
+ if (!in_out_info->hwc_layout)
+ goto free_vinstr_buffer;
+
+ dump_size = in_out_info->nr_core_groups *
+ MALI_MAX_NUM_BLOCKS_PER_GROUP *
+ MALI_COUNTERS_PER_BLOCK *
+ MALI_BYTES_PER_COUNTER;
+
+ for (cg = 0; cg < in_out_info->nr_core_groups; cg++) {
+ core_mask = hand->kbdev->gpu_props.props.coherency_info.group[cg].core_mask;
+
+ for (j = 0; j < MALI_MAX_CORES_PER_GROUP; j++) {
+ if (core_mask & (1u << j))
+ in_out_info->hwc_layout[i++] = SHADER_BLOCK;
+ else
+ in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+ }
+
+ in_out_info->hwc_layout[i++] = TILER_BLOCK;
+ in_out_info->hwc_layout[i++] = MMU_L2_BLOCK;
+
+ in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+
+ if (0 == cg)
+ in_out_info->hwc_layout[i++] = JM_BLOCK;
+ else
+ in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+ }
+ /* If we are using any other device */
+ } else {
+ uint32_t nr_l2, nr_sc_bits, j;
+ uint64_t core_mask;
+
+ nr_l2 = hand->kbdev->gpu_props.props.l2_props.num_l2_slices;
+
+ core_mask = hand->kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+
+ nr_sc_bits = fls64(core_mask);
+
+ /* The job manager and tiler sets of counters
+ * are always present */
+ in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) * (2 + nr_sc_bits + nr_l2), GFP_KERNEL);
+
+ if (!in_out_info->hwc_layout)
+ goto free_vinstr_buffer;
+
+ dump_size = (2 + nr_sc_bits + nr_l2) * MALI_COUNTERS_PER_BLOCK * MALI_BYTES_PER_COUNTER;
+
+ in_out_info->hwc_layout[i++] = JM_BLOCK;
+ in_out_info->hwc_layout[i++] = TILER_BLOCK;
+
+ for (j = 0; j < nr_l2; j++)
+ in_out_info->hwc_layout[i++] = MMU_L2_BLOCK;
+
+ while (core_mask != 0ull) {
+ if ((core_mask & 1ull) != 0ull)
+ in_out_info->hwc_layout[i++] = SHADER_BLOCK;
+ else
+ in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+ core_mask >>= 1;
+ }
+ }
+
+ in_out_info->nr_hwc_blocks = i;
+ in_out_info->size = dump_size;
+
+ setup.jm_bm = in_out_info->bitmask[0];
+ setup.tiler_bm = in_out_info->bitmask[1];
+ setup.shader_bm = in_out_info->bitmask[2];
+ setup.mmu_l2_bm = in_out_info->bitmask[3];
+ hand->vinstr_cli = kbase_vinstr_hwcnt_kernel_setup(hand->kbdev->vinstr_ctx,
+ &setup, hand->vinstr_buffer);
+ if (!hand->vinstr_cli) {
+ dev_err(hand->kbdev->dev, "Failed to register gator with vinstr core");
+ goto free_layout;
+ }
+
+ return hand;
+
+free_layout:
+ kfree(in_out_info->hwc_layout);
+
+free_vinstr_buffer:
+ kfree(hand->vinstr_buffer);
+
+release_device:
+ kbase_release_device(hand->kbdev);
+
+free_hand:
+ kfree(hand);
+ return NULL;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init);
+
+void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles)
+{
+ if (in_out_info)
+ kfree(in_out_info->hwc_layout);
+
+ if (opaque_handles) {
+ cancel_work_sync(&opaque_handles->dump_work);
+ kbase_vinstr_detach_client(opaque_handles->vinstr_cli);
+ kfree(opaque_handles->vinstr_buffer);
+ kbase_release_device(opaque_handles->kbdev);
+ kfree(opaque_handles);
+ }
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term);
+
+static void dump_worker(struct work_struct *work)
+{
+ struct kbase_gator_hwcnt_handles *hand;
+
+ hand = container_of(work, struct kbase_gator_hwcnt_handles, dump_work);
+ if (!kbase_vinstr_hwc_dump(hand->vinstr_cli,
+ BASE_HWCNT_READER_EVENT_MANUAL)) {
+ spin_lock_bh(&hand->dump_lock);
+ hand->dump_complete = 1;
+ spin_unlock_bh(&hand->dump_lock);
+ } else {
+ schedule_work(&hand->dump_work);
+ }
+}
+
+uint32_t kbase_gator_instr_hwcnt_dump_complete(
+ struct kbase_gator_hwcnt_handles *opaque_handles,
+ uint32_t * const success)
+{
+
+ if (opaque_handles && success) {
+ *success = opaque_handles->dump_complete;
+ opaque_handles->dump_complete = 0;
+ return *success;
+ }
+ return 0;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_complete);
+
+uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles)
+{
+ if (opaque_handles)
+ schedule_work(&opaque_handles->dump_work);
+ return 0;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_irq);
diff --git a/drivers/gpu/arm_gpu/mali_kbase_gator_api.h b/drivers/gpu/arm_gpu/mali_kbase_gator_api.h
new file mode 100644
index 000000000000..ef9ac0f7b633
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_gator_api.h
@@ -0,0 +1,219 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_GATOR_API_H_
+#define _KBASE_GATOR_API_H_
+
+/**
+ * @brief This file describes the API used by Gator to fetch hardware counters.
+ */
+
+/* This define is used by the gator kernel module compile to select which DDK
+ * API calling convention to use. If not defined (legacy DDK) gator assumes
+ * version 1. The version to DDK release mapping is:
+ * Version 1 API: DDK versions r1px, r2px
+ * Version 2 API: DDK versions r3px, r4px
+ * Version 3 API: DDK version r5p0 and newer
+ *
+ * API Usage
+ * =========
+ *
+ * 1] Call kbase_gator_hwcnt_init_names() to return the list of short counter
+ * names for the GPU present in this device.
+ *
+ * 2] Create a kbase_gator_hwcnt_info structure and set the counter enables for
+ * the counters you want enabled. The enables can all be set for simplicity in
+ * most use cases, but disabling some will let you minimize bandwidth impact.
+ *
+ * 3] Call kbase_gator_hwcnt_init() using the above structure, to create a
+ * counter context. On successful return the DDK will have populated the
+ * structure with a variety of useful information.
+ *
+ * 4] Call kbase_gator_hwcnt_dump_irq() to queue a non-blocking request for a
+ * counter dump. If this returns a non-zero value the request has been queued,
+ * otherwise the driver has been unable to do so (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 5] Call kbase_gator_hwcnt_dump_complete() to test whether the previously
+ * requested dump has been succesful. If this returns non-zero the counter dump
+ * has resolved, but the value of *success must also be tested as the dump
+ * may have not been successful. If it returns zero the counter dump was
+ * abandoned due to the device being busy (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 6] Process the counters stored in the buffer pointed to by ...
+ *
+ * kbase_gator_hwcnt_info->kernel_dump_buffer
+ *
+ * In pseudo code you can find all of the counters via this approach:
+ *
+ *
+ * hwcnt_info # pointer to kbase_gator_hwcnt_info structure
+ * hwcnt_name # pointer to name list
+ *
+ * u32 * hwcnt_data = (u32*)hwcnt_info->kernel_dump_buffer
+ *
+ * # Iterate over each 64-counter block in this GPU configuration
+ * for( i = 0; i < hwcnt_info->nr_hwc_blocks; i++) {
+ * hwc_type type = hwcnt_info->hwc_layout[i];
+ *
+ * # Skip reserved type blocks - they contain no counters at all
+ * if( type == RESERVED_BLOCK ) {
+ * continue;
+ * }
+ *
+ * size_t name_offset = type * 64;
+ * size_t data_offset = i * 64;
+ *
+ * # Iterate over the names of the counters in this block type
+ * for( j = 0; j < 64; j++) {
+ * const char * name = hwcnt_name[name_offset+j];
+ *
+ * # Skip empty name strings - there is no counter here
+ * if( name[0] == '\0' ) {
+ * continue;
+ * }
+ *
+ * u32 data = hwcnt_data[data_offset+j];
+ *
+ * printk( "COUNTER: %s DATA: %u\n", name, data );
+ * }
+ * }
+ *
+ *
+ * Note that in most implementations you typically want to either SUM or
+ * AVERAGE multiple instances of the same counter if, for example, you have
+ * multiple shader cores or multiple L2 caches. The most sensible view for
+ * analysis is to AVERAGE shader core counters, but SUM L2 cache and MMU
+ * counters.
+ *
+ * 7] Goto 4, repeating until you want to stop collecting counters.
+ *
+ * 8] Release the dump resources by calling kbase_gator_hwcnt_term().
+ *
+ * 9] Release the name table resources by calling
+ * kbase_gator_hwcnt_term_names(). This function must only be called if
+ * init_names() returned a non-NULL value.
+ **/
+
+#define MALI_DDK_GATOR_API_VERSION 3
+
+enum hwc_type {
+ JM_BLOCK = 0,
+ TILER_BLOCK,
+ SHADER_BLOCK,
+ MMU_L2_BLOCK,
+ RESERVED_BLOCK
+};
+
+struct kbase_gator_hwcnt_info {
+ /* Passed from Gator to kbase */
+
+ /* the bitmask of enabled hardware counters for each counter block */
+ uint16_t bitmask[4];
+
+ /* Passed from kbase to Gator */
+
+ /* ptr to counter dump memory */
+ void *kernel_dump_buffer;
+
+ /* size of counter dump memory */
+ uint32_t size;
+
+ /* the ID of the Mali device */
+ uint32_t gpu_id;
+
+ /* the number of shader cores in the GPU */
+ uint32_t nr_cores;
+
+ /* the number of core groups */
+ uint32_t nr_core_groups;
+
+ /* the memory layout of the performance counters */
+ enum hwc_type *hwc_layout;
+
+ /* the total number of hardware couter blocks */
+ uint32_t nr_hwc_blocks;
+};
+
+/**
+ * @brief Opaque block of Mali data which Gator needs to return to the API later.
+ */
+struct kbase_gator_hwcnt_handles;
+
+/**
+ * @brief Initialize the resources Gator needs for performance profiling.
+ *
+ * @param in_out_info A pointer to a structure containing the enabled counters passed from Gator and all the Mali
+ * specific information that will be returned to Gator. On entry Gator must have populated the
+ * 'bitmask' field with the counters it wishes to enable for each class of counter block.
+ * Each entry in the array corresponds to a single counter class based on the "hwc_type"
+ * enumeration, and each bit corresponds to an enable for 4 sequential counters (LSB enables
+ * the first 4 counters in the block, and so on). See the GPU counter array as returned by
+ * kbase_gator_hwcnt_get_names() for the index values of each counter for the curernt GPU.
+ *
+ * @return Pointer to an opaque handle block on success, NULL on error.
+ */
+extern struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info);
+
+/**
+ * @brief Free all resources once Gator has finished using performance counters.
+ *
+ * @param in_out_info A pointer to a structure containing the enabled counters passed from Gator and all the
+ * Mali specific information that will be returned to Gator.
+ * @param opaque_handles A wrapper structure for kbase structures.
+ */
+extern void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief Poll whether a counter dump is successful.
+ *
+ * @param opaque_handles A wrapper structure for kbase structures.
+ * @param[out] success Non-zero on success, zero on failure.
+ *
+ * @return Zero if the dump is still pending, non-zero if the dump has completed. Note that a
+ * completed dump may not have dumped succesfully, so the caller must test for both
+ * a completed and successful dump before processing counters.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_complete(struct kbase_gator_hwcnt_handles *opaque_handles, uint32_t * const success);
+
+/**
+ * @brief Request the generation of a new counter dump.
+ *
+ * @param opaque_handles A wrapper structure for kbase structures.
+ *
+ * @return Zero if the hardware device is busy and cannot handle the request, non-zero otherwise.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief This function is used to fetch the names table based on the Mali device in use.
+ *
+ * @param[out] total_counters The total number of counters short names in the Mali devices' list.
+ *
+ * @return Pointer to an array of strings of length *total_counters.
+ */
+extern const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters);
+
+/**
+ * @brief This function is used to terminate the use of the names table.
+ *
+ * This function must only be called if the initial call to kbase_gator_hwcnt_init_names returned a non-NULL value.
+ */
+extern void kbase_gator_hwcnt_term_names(void);
+
+#endif
diff --git a/drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names.h b/drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names.h
new file mode 100644
index 000000000000..cad19b66200d
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names.h
@@ -0,0 +1,2170 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_H_
+#define _KBASE_GATOR_HWCNT_NAMES_H_
+
+/*
+ * "Short names" for hardware counters used by Streamline. Counters names are
+ * stored in accordance with their memory layout in the binary counter block
+ * emitted by the Mali GPU. Each "master" in the GPU emits a fixed-size block
+ * of 64 counters, and each GPU implements the same set of "masters" although
+ * the counters each master exposes within its block of 64 may vary.
+ *
+ * Counters which are an empty string are simply "holes" in the counter memory
+ * where no counter exists.
+ */
+
+static const char * const hardware_counters_mali_t60x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T60x_MESSAGES_SENT",
+ "T60x_MESSAGES_RECEIVED",
+ "T60x_GPU_ACTIVE",
+ "T60x_IRQ_ACTIVE",
+ "T60x_JS0_JOBS",
+ "T60x_JS0_TASKS",
+ "T60x_JS0_ACTIVE",
+ "",
+ "T60x_JS0_WAIT_READ",
+ "T60x_JS0_WAIT_ISSUE",
+ "T60x_JS0_WAIT_DEPEND",
+ "T60x_JS0_WAIT_FINISH",
+ "T60x_JS1_JOBS",
+ "T60x_JS1_TASKS",
+ "T60x_JS1_ACTIVE",
+ "",
+ "T60x_JS1_WAIT_READ",
+ "T60x_JS1_WAIT_ISSUE",
+ "T60x_JS1_WAIT_DEPEND",
+ "T60x_JS1_WAIT_FINISH",
+ "T60x_JS2_JOBS",
+ "T60x_JS2_TASKS",
+ "T60x_JS2_ACTIVE",
+ "",
+ "T60x_JS2_WAIT_READ",
+ "T60x_JS2_WAIT_ISSUE",
+ "T60x_JS2_WAIT_DEPEND",
+ "T60x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T60x_TI_JOBS_PROCESSED",
+ "T60x_TI_TRIANGLES",
+ "T60x_TI_QUADS",
+ "T60x_TI_POLYGONS",
+ "T60x_TI_POINTS",
+ "T60x_TI_LINES",
+ "T60x_TI_VCACHE_HIT",
+ "T60x_TI_VCACHE_MISS",
+ "T60x_TI_FRONT_FACING",
+ "T60x_TI_BACK_FACING",
+ "T60x_TI_PRIM_VISIBLE",
+ "T60x_TI_PRIM_CULLED",
+ "T60x_TI_PRIM_CLIPPED",
+ "T60x_TI_LEVEL0",
+ "T60x_TI_LEVEL1",
+ "T60x_TI_LEVEL2",
+ "T60x_TI_LEVEL3",
+ "T60x_TI_LEVEL4",
+ "T60x_TI_LEVEL5",
+ "T60x_TI_LEVEL6",
+ "T60x_TI_LEVEL7",
+ "T60x_TI_COMMAND_1",
+ "T60x_TI_COMMAND_2",
+ "T60x_TI_COMMAND_3",
+ "T60x_TI_COMMAND_4",
+ "T60x_TI_COMMAND_4_7",
+ "T60x_TI_COMMAND_8_15",
+ "T60x_TI_COMMAND_16_63",
+ "T60x_TI_COMMAND_64",
+ "T60x_TI_COMPRESS_IN",
+ "T60x_TI_COMPRESS_OUT",
+ "T60x_TI_COMPRESS_FLUSH",
+ "T60x_TI_TIMESTAMPS",
+ "T60x_TI_PCACHE_HIT",
+ "T60x_TI_PCACHE_MISS",
+ "T60x_TI_PCACHE_LINE",
+ "T60x_TI_PCACHE_STALL",
+ "T60x_TI_WRBUF_HIT",
+ "T60x_TI_WRBUF_MISS",
+ "T60x_TI_WRBUF_LINE",
+ "T60x_TI_WRBUF_PARTIAL",
+ "T60x_TI_WRBUF_STALL",
+ "T60x_TI_ACTIVE",
+ "T60x_TI_LOADING_DESC",
+ "T60x_TI_INDEX_WAIT",
+ "T60x_TI_INDEX_RANGE_WAIT",
+ "T60x_TI_VERTEX_WAIT",
+ "T60x_TI_PCACHE_WAIT",
+ "T60x_TI_WRBUF_WAIT",
+ "T60x_TI_BUS_READ",
+ "T60x_TI_BUS_WRITE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T60x_TI_UTLB_STALL",
+ "T60x_TI_UTLB_REPLAY_MISS",
+ "T60x_TI_UTLB_REPLAY_FULL",
+ "T60x_TI_UTLB_NEW_MISS",
+ "T60x_TI_UTLB_HIT",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T60x_FRAG_ACTIVE",
+ "T60x_FRAG_PRIMITIVES",
+ "T60x_FRAG_PRIMITIVES_DROPPED",
+ "T60x_FRAG_CYCLES_DESC",
+ "T60x_FRAG_CYCLES_PLR",
+ "T60x_FRAG_CYCLES_VERT",
+ "T60x_FRAG_CYCLES_TRISETUP",
+ "T60x_FRAG_CYCLES_RAST",
+ "T60x_FRAG_THREADS",
+ "T60x_FRAG_DUMMY_THREADS",
+ "T60x_FRAG_QUADS_RAST",
+ "T60x_FRAG_QUADS_EZS_TEST",
+ "T60x_FRAG_QUADS_EZS_KILLED",
+ "T60x_FRAG_THREADS_LZS_TEST",
+ "T60x_FRAG_THREADS_LZS_KILLED",
+ "T60x_FRAG_CYCLES_NO_TILE",
+ "T60x_FRAG_NUM_TILES",
+ "T60x_FRAG_TRANS_ELIM",
+ "T60x_COMPUTE_ACTIVE",
+ "T60x_COMPUTE_TASKS",
+ "T60x_COMPUTE_THREADS",
+ "T60x_COMPUTE_CYCLES_DESC",
+ "T60x_TRIPIPE_ACTIVE",
+ "T60x_ARITH_WORDS",
+ "T60x_ARITH_CYCLES_REG",
+ "T60x_ARITH_CYCLES_L0",
+ "T60x_ARITH_FRAG_DEPEND",
+ "T60x_LS_WORDS",
+ "T60x_LS_ISSUES",
+ "T60x_LS_RESTARTS",
+ "T60x_LS_REISSUES_MISS",
+ "T60x_LS_REISSUES_VD",
+ "T60x_LS_REISSUE_ATTRIB_MISS",
+ "T60x_LS_NO_WB",
+ "T60x_TEX_WORDS",
+ "T60x_TEX_BUBBLES",
+ "T60x_TEX_WORDS_L0",
+ "T60x_TEX_WORDS_DESC",
+ "T60x_TEX_ISSUES",
+ "T60x_TEX_RECIRC_FMISS",
+ "T60x_TEX_RECIRC_DESC",
+ "T60x_TEX_RECIRC_MULTI",
+ "T60x_TEX_RECIRC_PMISS",
+ "T60x_TEX_RECIRC_CONF",
+ "T60x_LSC_READ_HITS",
+ "T60x_LSC_READ_MISSES",
+ "T60x_LSC_WRITE_HITS",
+ "T60x_LSC_WRITE_MISSES",
+ "T60x_LSC_ATOMIC_HITS",
+ "T60x_LSC_ATOMIC_MISSES",
+ "T60x_LSC_LINE_FETCHES",
+ "T60x_LSC_DIRTY_LINE",
+ "T60x_LSC_SNOOPS",
+ "T60x_AXI_TLB_STALL",
+ "T60x_AXI_TLB_MISS",
+ "T60x_AXI_TLB_TRANSACTION",
+ "T60x_LS_TLB_MISS",
+ "T60x_LS_TLB_HIT",
+ "T60x_AXI_BEATS_READ",
+ "T60x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T60x_MMU_HIT",
+ "T60x_MMU_NEW_MISS",
+ "T60x_MMU_REPLAY_FULL",
+ "T60x_MMU_REPLAY_MISS",
+ "T60x_MMU_TABLE_WALK",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T60x_UTLB_HIT",
+ "T60x_UTLB_NEW_MISS",
+ "T60x_UTLB_REPLAY_FULL",
+ "T60x_UTLB_REPLAY_MISS",
+ "T60x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T60x_L2_EXT_WRITE_BEATS",
+ "T60x_L2_EXT_READ_BEATS",
+ "T60x_L2_ANY_LOOKUP",
+ "T60x_L2_READ_LOOKUP",
+ "T60x_L2_SREAD_LOOKUP",
+ "T60x_L2_READ_REPLAY",
+ "T60x_L2_READ_SNOOP",
+ "T60x_L2_READ_HIT",
+ "T60x_L2_CLEAN_MISS",
+ "T60x_L2_WRITE_LOOKUP",
+ "T60x_L2_SWRITE_LOOKUP",
+ "T60x_L2_WRITE_REPLAY",
+ "T60x_L2_WRITE_SNOOP",
+ "T60x_L2_WRITE_HIT",
+ "T60x_L2_EXT_READ_FULL",
+ "T60x_L2_EXT_READ_HALF",
+ "T60x_L2_EXT_WRITE_FULL",
+ "T60x_L2_EXT_WRITE_HALF",
+ "T60x_L2_EXT_READ",
+ "T60x_L2_EXT_READ_LINE",
+ "T60x_L2_EXT_WRITE",
+ "T60x_L2_EXT_WRITE_LINE",
+ "T60x_L2_EXT_WRITE_SMALL",
+ "T60x_L2_EXT_BARRIER",
+ "T60x_L2_EXT_AR_STALL",
+ "T60x_L2_EXT_R_BUF_FULL",
+ "T60x_L2_EXT_RD_BUF_FULL",
+ "T60x_L2_EXT_R_RAW",
+ "T60x_L2_EXT_W_STALL",
+ "T60x_L2_EXT_W_BUF_FULL",
+ "T60x_L2_EXT_R_W_HAZARD",
+ "T60x_L2_TAG_HAZARD",
+ "T60x_L2_SNOOP_FULL",
+ "T60x_L2_REPLAY_FULL"
+};
+static const char * const hardware_counters_mali_t62x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T62x_MESSAGES_SENT",
+ "T62x_MESSAGES_RECEIVED",
+ "T62x_GPU_ACTIVE",
+ "T62x_IRQ_ACTIVE",
+ "T62x_JS0_JOBS",
+ "T62x_JS0_TASKS",
+ "T62x_JS0_ACTIVE",
+ "",
+ "T62x_JS0_WAIT_READ",
+ "T62x_JS0_WAIT_ISSUE",
+ "T62x_JS0_WAIT_DEPEND",
+ "T62x_JS0_WAIT_FINISH",
+ "T62x_JS1_JOBS",
+ "T62x_JS1_TASKS",
+ "T62x_JS1_ACTIVE",
+ "",
+ "T62x_JS1_WAIT_READ",
+ "T62x_JS1_WAIT_ISSUE",
+ "T62x_JS1_WAIT_DEPEND",
+ "T62x_JS1_WAIT_FINISH",
+ "T62x_JS2_JOBS",
+ "T62x_JS2_TASKS",
+ "T62x_JS2_ACTIVE",
+ "",
+ "T62x_JS2_WAIT_READ",
+ "T62x_JS2_WAIT_ISSUE",
+ "T62x_JS2_WAIT_DEPEND",
+ "T62x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T62x_TI_JOBS_PROCESSED",
+ "T62x_TI_TRIANGLES",
+ "T62x_TI_QUADS",
+ "T62x_TI_POLYGONS",
+ "T62x_TI_POINTS",
+ "T62x_TI_LINES",
+ "T62x_TI_VCACHE_HIT",
+ "T62x_TI_VCACHE_MISS",
+ "T62x_TI_FRONT_FACING",
+ "T62x_TI_BACK_FACING",
+ "T62x_TI_PRIM_VISIBLE",
+ "T62x_TI_PRIM_CULLED",
+ "T62x_TI_PRIM_CLIPPED",
+ "T62x_TI_LEVEL0",
+ "T62x_TI_LEVEL1",
+ "T62x_TI_LEVEL2",
+ "T62x_TI_LEVEL3",
+ "T62x_TI_LEVEL4",
+ "T62x_TI_LEVEL5",
+ "T62x_TI_LEVEL6",
+ "T62x_TI_LEVEL7",
+ "T62x_TI_COMMAND_1",
+ "T62x_TI_COMMAND_2",
+ "T62x_TI_COMMAND_3",
+ "T62x_TI_COMMAND_4",
+ "T62x_TI_COMMAND_5_7",
+ "T62x_TI_COMMAND_8_15",
+ "T62x_TI_COMMAND_16_63",
+ "T62x_TI_COMMAND_64",
+ "T62x_TI_COMPRESS_IN",
+ "T62x_TI_COMPRESS_OUT",
+ "T62x_TI_COMPRESS_FLUSH",
+ "T62x_TI_TIMESTAMPS",
+ "T62x_TI_PCACHE_HIT",
+ "T62x_TI_PCACHE_MISS",
+ "T62x_TI_PCACHE_LINE",
+ "T62x_TI_PCACHE_STALL",
+ "T62x_TI_WRBUF_HIT",
+ "T62x_TI_WRBUF_MISS",
+ "T62x_TI_WRBUF_LINE",
+ "T62x_TI_WRBUF_PARTIAL",
+ "T62x_TI_WRBUF_STALL",
+ "T62x_TI_ACTIVE",
+ "T62x_TI_LOADING_DESC",
+ "T62x_TI_INDEX_WAIT",
+ "T62x_TI_INDEX_RANGE_WAIT",
+ "T62x_TI_VERTEX_WAIT",
+ "T62x_TI_PCACHE_WAIT",
+ "T62x_TI_WRBUF_WAIT",
+ "T62x_TI_BUS_READ",
+ "T62x_TI_BUS_WRITE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T62x_TI_UTLB_STALL",
+ "T62x_TI_UTLB_REPLAY_MISS",
+ "T62x_TI_UTLB_REPLAY_FULL",
+ "T62x_TI_UTLB_NEW_MISS",
+ "T62x_TI_UTLB_HIT",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "T62x_SHADER_CORE_ACTIVE",
+ "T62x_FRAG_ACTIVE",
+ "T62x_FRAG_PRIMITIVES",
+ "T62x_FRAG_PRIMITIVES_DROPPED",
+ "T62x_FRAG_CYCLES_DESC",
+ "T62x_FRAG_CYCLES_FPKQ_ACTIVE",
+ "T62x_FRAG_CYCLES_VERT",
+ "T62x_FRAG_CYCLES_TRISETUP",
+ "T62x_FRAG_CYCLES_EZS_ACTIVE",
+ "T62x_FRAG_THREADS",
+ "T62x_FRAG_DUMMY_THREADS",
+ "T62x_FRAG_QUADS_RAST",
+ "T62x_FRAG_QUADS_EZS_TEST",
+ "T62x_FRAG_QUADS_EZS_KILLED",
+ "T62x_FRAG_THREADS_LZS_TEST",
+ "T62x_FRAG_THREADS_LZS_KILLED",
+ "T62x_FRAG_CYCLES_NO_TILE",
+ "T62x_FRAG_NUM_TILES",
+ "T62x_FRAG_TRANS_ELIM",
+ "T62x_COMPUTE_ACTIVE",
+ "T62x_COMPUTE_TASKS",
+ "T62x_COMPUTE_THREADS",
+ "T62x_COMPUTE_CYCLES_DESC",
+ "T62x_TRIPIPE_ACTIVE",
+ "T62x_ARITH_WORDS",
+ "T62x_ARITH_CYCLES_REG",
+ "T62x_ARITH_CYCLES_L0",
+ "T62x_ARITH_FRAG_DEPEND",
+ "T62x_LS_WORDS",
+ "T62x_LS_ISSUES",
+ "T62x_LS_RESTARTS",
+ "T62x_LS_REISSUES_MISS",
+ "T62x_LS_REISSUES_VD",
+ "T62x_LS_REISSUE_ATTRIB_MISS",
+ "T62x_LS_NO_WB",
+ "T62x_TEX_WORDS",
+ "T62x_TEX_BUBBLES",
+ "T62x_TEX_WORDS_L0",
+ "T62x_TEX_WORDS_DESC",
+ "T62x_TEX_ISSUES",
+ "T62x_TEX_RECIRC_FMISS",
+ "T62x_TEX_RECIRC_DESC",
+ "T62x_TEX_RECIRC_MULTI",
+ "T62x_TEX_RECIRC_PMISS",
+ "T62x_TEX_RECIRC_CONF",
+ "T62x_LSC_READ_HITS",
+ "T62x_LSC_READ_MISSES",
+ "T62x_LSC_WRITE_HITS",
+ "T62x_LSC_WRITE_MISSES",
+ "T62x_LSC_ATOMIC_HITS",
+ "T62x_LSC_ATOMIC_MISSES",
+ "T62x_LSC_LINE_FETCHES",
+ "T62x_LSC_DIRTY_LINE",
+ "T62x_LSC_SNOOPS",
+ "T62x_AXI_TLB_STALL",
+ "T62x_AXI_TLB_MISS",
+ "T62x_AXI_TLB_TRANSACTION",
+ "T62x_LS_TLB_MISS",
+ "T62x_LS_TLB_HIT",
+ "T62x_AXI_BEATS_READ",
+ "T62x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T62x_MMU_HIT",
+ "T62x_MMU_NEW_MISS",
+ "T62x_MMU_REPLAY_FULL",
+ "T62x_MMU_REPLAY_MISS",
+ "T62x_MMU_TABLE_WALK",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T62x_UTLB_HIT",
+ "T62x_UTLB_NEW_MISS",
+ "T62x_UTLB_REPLAY_FULL",
+ "T62x_UTLB_REPLAY_MISS",
+ "T62x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T62x_L2_EXT_WRITE_BEATS",
+ "T62x_L2_EXT_READ_BEATS",
+ "T62x_L2_ANY_LOOKUP",
+ "T62x_L2_READ_LOOKUP",
+ "T62x_L2_SREAD_LOOKUP",
+ "T62x_L2_READ_REPLAY",
+ "T62x_L2_READ_SNOOP",
+ "T62x_L2_READ_HIT",
+ "T62x_L2_CLEAN_MISS",
+ "T62x_L2_WRITE_LOOKUP",
+ "T62x_L2_SWRITE_LOOKUP",
+ "T62x_L2_WRITE_REPLAY",
+ "T62x_L2_WRITE_SNOOP",
+ "T62x_L2_WRITE_HIT",
+ "T62x_L2_EXT_READ_FULL",
+ "T62x_L2_EXT_READ_HALF",
+ "T62x_L2_EXT_WRITE_FULL",
+ "T62x_L2_EXT_WRITE_HALF",
+ "T62x_L2_EXT_READ",
+ "T62x_L2_EXT_READ_LINE",
+ "T62x_L2_EXT_WRITE",
+ "T62x_L2_EXT_WRITE_LINE",
+ "T62x_L2_EXT_WRITE_SMALL",
+ "T62x_L2_EXT_BARRIER",
+ "T62x_L2_EXT_AR_STALL",
+ "T62x_L2_EXT_R_BUF_FULL",
+ "T62x_L2_EXT_RD_BUF_FULL",
+ "T62x_L2_EXT_R_RAW",
+ "T62x_L2_EXT_W_STALL",
+ "T62x_L2_EXT_W_BUF_FULL",
+ "T62x_L2_EXT_R_W_HAZARD",
+ "T62x_L2_TAG_HAZARD",
+ "T62x_L2_SNOOP_FULL",
+ "T62x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t72x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T72x_GPU_ACTIVE",
+ "T72x_IRQ_ACTIVE",
+ "T72x_JS0_JOBS",
+ "T72x_JS0_TASKS",
+ "T72x_JS0_ACTIVE",
+ "T72x_JS1_JOBS",
+ "T72x_JS1_TASKS",
+ "T72x_JS1_ACTIVE",
+ "T72x_JS2_JOBS",
+ "T72x_JS2_TASKS",
+ "T72x_JS2_ACTIVE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T72x_TI_JOBS_PROCESSED",
+ "T72x_TI_TRIANGLES",
+ "T72x_TI_QUADS",
+ "T72x_TI_POLYGONS",
+ "T72x_TI_POINTS",
+ "T72x_TI_LINES",
+ "T72x_TI_FRONT_FACING",
+ "T72x_TI_BACK_FACING",
+ "T72x_TI_PRIM_VISIBLE",
+ "T72x_TI_PRIM_CULLED",
+ "T72x_TI_PRIM_CLIPPED",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T72x_TI_ACTIVE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T72x_FRAG_ACTIVE",
+ "T72x_FRAG_PRIMITIVES",
+ "T72x_FRAG_PRIMITIVES_DROPPED",
+ "T72x_FRAG_THREADS",
+ "T72x_FRAG_DUMMY_THREADS",
+ "T72x_FRAG_QUADS_RAST",
+ "T72x_FRAG_QUADS_EZS_TEST",
+ "T72x_FRAG_QUADS_EZS_KILLED",
+ "T72x_FRAG_THREADS_LZS_TEST",
+ "T72x_FRAG_THREADS_LZS_KILLED",
+ "T72x_FRAG_CYCLES_NO_TILE",
+ "T72x_FRAG_NUM_TILES",
+ "T72x_FRAG_TRANS_ELIM",
+ "T72x_COMPUTE_ACTIVE",
+ "T72x_COMPUTE_TASKS",
+ "T72x_COMPUTE_THREADS",
+ "T72x_TRIPIPE_ACTIVE",
+ "T72x_ARITH_WORDS",
+ "T72x_ARITH_CYCLES_REG",
+ "T72x_LS_WORDS",
+ "T72x_LS_ISSUES",
+ "T72x_LS_RESTARTS",
+ "T72x_LS_REISSUES_MISS",
+ "T72x_TEX_WORDS",
+ "T72x_TEX_BUBBLES",
+ "T72x_TEX_ISSUES",
+ "T72x_LSC_READ_HITS",
+ "T72x_LSC_READ_MISSES",
+ "T72x_LSC_WRITE_HITS",
+ "T72x_LSC_WRITE_MISSES",
+ "T72x_LSC_ATOMIC_HITS",
+ "T72x_LSC_ATOMIC_MISSES",
+ "T72x_LSC_LINE_FETCHES",
+ "T72x_LSC_DIRTY_LINE",
+ "T72x_LSC_SNOOPS",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T72x_L2_EXT_WRITE_BEAT",
+ "T72x_L2_EXT_READ_BEAT",
+ "T72x_L2_READ_SNOOP",
+ "T72x_L2_READ_HIT",
+ "T72x_L2_WRITE_SNOOP",
+ "T72x_L2_WRITE_HIT",
+ "T72x_L2_EXT_WRITE_SMALL",
+ "T72x_L2_EXT_BARRIER",
+ "T72x_L2_EXT_AR_STALL",
+ "T72x_L2_EXT_W_STALL",
+ "T72x_L2_SNOOP_FULL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+};
+
+static const char * const hardware_counters_mali_t76x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T76x_MESSAGES_SENT",
+ "T76x_MESSAGES_RECEIVED",
+ "T76x_GPU_ACTIVE",
+ "T76x_IRQ_ACTIVE",
+ "T76x_JS0_JOBS",
+ "T76x_JS0_TASKS",
+ "T76x_JS0_ACTIVE",
+ "",
+ "T76x_JS0_WAIT_READ",
+ "T76x_JS0_WAIT_ISSUE",
+ "T76x_JS0_WAIT_DEPEND",
+ "T76x_JS0_WAIT_FINISH",
+ "T76x_JS1_JOBS",
+ "T76x_JS1_TASKS",
+ "T76x_JS1_ACTIVE",
+ "",
+ "T76x_JS1_WAIT_READ",
+ "T76x_JS1_WAIT_ISSUE",
+ "T76x_JS1_WAIT_DEPEND",
+ "T76x_JS1_WAIT_FINISH",
+ "T76x_JS2_JOBS",
+ "T76x_JS2_TASKS",
+ "T76x_JS2_ACTIVE",
+ "",
+ "T76x_JS2_WAIT_READ",
+ "T76x_JS2_WAIT_ISSUE",
+ "T76x_JS2_WAIT_DEPEND",
+ "T76x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T76x_TI_JOBS_PROCESSED",
+ "T76x_TI_TRIANGLES",
+ "T76x_TI_QUADS",
+ "T76x_TI_POLYGONS",
+ "T76x_TI_POINTS",
+ "T76x_TI_LINES",
+ "T76x_TI_VCACHE_HIT",
+ "T76x_TI_VCACHE_MISS",
+ "T76x_TI_FRONT_FACING",
+ "T76x_TI_BACK_FACING",
+ "T76x_TI_PRIM_VISIBLE",
+ "T76x_TI_PRIM_CULLED",
+ "T76x_TI_PRIM_CLIPPED",
+ "T76x_TI_LEVEL0",
+ "T76x_TI_LEVEL1",
+ "T76x_TI_LEVEL2",
+ "T76x_TI_LEVEL3",
+ "T76x_TI_LEVEL4",
+ "T76x_TI_LEVEL5",
+ "T76x_TI_LEVEL6",
+ "T76x_TI_LEVEL7",
+ "T76x_TI_COMMAND_1",
+ "T76x_TI_COMMAND_2",
+ "T76x_TI_COMMAND_3",
+ "T76x_TI_COMMAND_4",
+ "T76x_TI_COMMAND_5_7",
+ "T76x_TI_COMMAND_8_15",
+ "T76x_TI_COMMAND_16_63",
+ "T76x_TI_COMMAND_64",
+ "T76x_TI_COMPRESS_IN",
+ "T76x_TI_COMPRESS_OUT",
+ "T76x_TI_COMPRESS_FLUSH",
+ "T76x_TI_TIMESTAMPS",
+ "T76x_TI_PCACHE_HIT",
+ "T76x_TI_PCACHE_MISS",
+ "T76x_TI_PCACHE_LINE",
+ "T76x_TI_PCACHE_STALL",
+ "T76x_TI_WRBUF_HIT",
+ "T76x_TI_WRBUF_MISS",
+ "T76x_TI_WRBUF_LINE",
+ "T76x_TI_WRBUF_PARTIAL",
+ "T76x_TI_WRBUF_STALL",
+ "T76x_TI_ACTIVE",
+ "T76x_TI_LOADING_DESC",
+ "T76x_TI_INDEX_WAIT",
+ "T76x_TI_INDEX_RANGE_WAIT",
+ "T76x_TI_VERTEX_WAIT",
+ "T76x_TI_PCACHE_WAIT",
+ "T76x_TI_WRBUF_WAIT",
+ "T76x_TI_BUS_READ",
+ "T76x_TI_BUS_WRITE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T76x_TI_UTLB_HIT",
+ "T76x_TI_UTLB_NEW_MISS",
+ "T76x_TI_UTLB_REPLAY_FULL",
+ "T76x_TI_UTLB_REPLAY_MISS",
+ "T76x_TI_UTLB_STALL",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T76x_FRAG_ACTIVE",
+ "T76x_FRAG_PRIMITIVES",
+ "T76x_FRAG_PRIMITIVES_DROPPED",
+ "T76x_FRAG_CYCLES_DESC",
+ "T76x_FRAG_CYCLES_FPKQ_ACTIVE",
+ "T76x_FRAG_CYCLES_VERT",
+ "T76x_FRAG_CYCLES_TRISETUP",
+ "T76x_FRAG_CYCLES_EZS_ACTIVE",
+ "T76x_FRAG_THREADS",
+ "T76x_FRAG_DUMMY_THREADS",
+ "T76x_FRAG_QUADS_RAST",
+ "T76x_FRAG_QUADS_EZS_TEST",
+ "T76x_FRAG_QUADS_EZS_KILLED",
+ "T76x_FRAG_THREADS_LZS_TEST",
+ "T76x_FRAG_THREADS_LZS_KILLED",
+ "T76x_FRAG_CYCLES_NO_TILE",
+ "T76x_FRAG_NUM_TILES",
+ "T76x_FRAG_TRANS_ELIM",
+ "T76x_COMPUTE_ACTIVE",
+ "T76x_COMPUTE_TASKS",
+ "T76x_COMPUTE_THREADS",
+ "T76x_COMPUTE_CYCLES_DESC",
+ "T76x_TRIPIPE_ACTIVE",
+ "T76x_ARITH_WORDS",
+ "T76x_ARITH_CYCLES_REG",
+ "T76x_ARITH_CYCLES_L0",
+ "T76x_ARITH_FRAG_DEPEND",
+ "T76x_LS_WORDS",
+ "T76x_LS_ISSUES",
+ "T76x_LS_REISSUE_ATTR",
+ "T76x_LS_REISSUES_VARY",
+ "T76x_LS_VARY_RV_MISS",
+ "T76x_LS_VARY_RV_HIT",
+ "T76x_LS_NO_UNPARK",
+ "T76x_TEX_WORDS",
+ "T76x_TEX_BUBBLES",
+ "T76x_TEX_WORDS_L0",
+ "T76x_TEX_WORDS_DESC",
+ "T76x_TEX_ISSUES",
+ "T76x_TEX_RECIRC_FMISS",
+ "T76x_TEX_RECIRC_DESC",
+ "T76x_TEX_RECIRC_MULTI",
+ "T76x_TEX_RECIRC_PMISS",
+ "T76x_TEX_RECIRC_CONF",
+ "T76x_LSC_READ_HITS",
+ "T76x_LSC_READ_OP",
+ "T76x_LSC_WRITE_HITS",
+ "T76x_LSC_WRITE_OP",
+ "T76x_LSC_ATOMIC_HITS",
+ "T76x_LSC_ATOMIC_OP",
+ "T76x_LSC_LINE_FETCHES",
+ "T76x_LSC_DIRTY_LINE",
+ "T76x_LSC_SNOOPS",
+ "T76x_AXI_TLB_STALL",
+ "T76x_AXI_TLB_MISS",
+ "T76x_AXI_TLB_TRANSACTION",
+ "T76x_LS_TLB_MISS",
+ "T76x_LS_TLB_HIT",
+ "T76x_AXI_BEATS_READ",
+ "T76x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T76x_MMU_HIT",
+ "T76x_MMU_NEW_MISS",
+ "T76x_MMU_REPLAY_FULL",
+ "T76x_MMU_REPLAY_MISS",
+ "T76x_MMU_TABLE_WALK",
+ "T76x_MMU_REQUESTS",
+ "",
+ "",
+ "T76x_UTLB_HIT",
+ "T76x_UTLB_NEW_MISS",
+ "T76x_UTLB_REPLAY_FULL",
+ "T76x_UTLB_REPLAY_MISS",
+ "T76x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T76x_L2_EXT_WRITE_BEATS",
+ "T76x_L2_EXT_READ_BEATS",
+ "T76x_L2_ANY_LOOKUP",
+ "T76x_L2_READ_LOOKUP",
+ "T76x_L2_SREAD_LOOKUP",
+ "T76x_L2_READ_REPLAY",
+ "T76x_L2_READ_SNOOP",
+ "T76x_L2_READ_HIT",
+ "T76x_L2_CLEAN_MISS",
+ "T76x_L2_WRITE_LOOKUP",
+ "T76x_L2_SWRITE_LOOKUP",
+ "T76x_L2_WRITE_REPLAY",
+ "T76x_L2_WRITE_SNOOP",
+ "T76x_L2_WRITE_HIT",
+ "T76x_L2_EXT_READ_FULL",
+ "",
+ "T76x_L2_EXT_WRITE_FULL",
+ "T76x_L2_EXT_R_W_HAZARD",
+ "T76x_L2_EXT_READ",
+ "T76x_L2_EXT_READ_LINE",
+ "T76x_L2_EXT_WRITE",
+ "T76x_L2_EXT_WRITE_LINE",
+ "T76x_L2_EXT_WRITE_SMALL",
+ "T76x_L2_EXT_BARRIER",
+ "T76x_L2_EXT_AR_STALL",
+ "T76x_L2_EXT_R_BUF_FULL",
+ "T76x_L2_EXT_RD_BUF_FULL",
+ "T76x_L2_EXT_R_RAW",
+ "T76x_L2_EXT_W_STALL",
+ "T76x_L2_EXT_W_BUF_FULL",
+ "T76x_L2_EXT_R_BUF_FULL",
+ "T76x_L2_TAG_HAZARD",
+ "T76x_L2_SNOOP_FULL",
+ "T76x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t82x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T82x_MESSAGES_SENT",
+ "T82x_MESSAGES_RECEIVED",
+ "T82x_GPU_ACTIVE",
+ "T82x_IRQ_ACTIVE",
+ "T82x_JS0_JOBS",
+ "T82x_JS0_TASKS",
+ "T82x_JS0_ACTIVE",
+ "",
+ "T82x_JS0_WAIT_READ",
+ "T82x_JS0_WAIT_ISSUE",
+ "T82x_JS0_WAIT_DEPEND",
+ "T82x_JS0_WAIT_FINISH",
+ "T82x_JS1_JOBS",
+ "T82x_JS1_TASKS",
+ "T82x_JS1_ACTIVE",
+ "",
+ "T82x_JS1_WAIT_READ",
+ "T82x_JS1_WAIT_ISSUE",
+ "T82x_JS1_WAIT_DEPEND",
+ "T82x_JS1_WAIT_FINISH",
+ "T82x_JS2_JOBS",
+ "T82x_JS2_TASKS",
+ "T82x_JS2_ACTIVE",
+ "",
+ "T82x_JS2_WAIT_READ",
+ "T82x_JS2_WAIT_ISSUE",
+ "T82x_JS2_WAIT_DEPEND",
+ "T82x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T82x_TI_JOBS_PROCESSED",
+ "T82x_TI_TRIANGLES",
+ "T82x_TI_QUADS",
+ "T82x_TI_POLYGONS",
+ "T82x_TI_POINTS",
+ "T82x_TI_LINES",
+ "T82x_TI_FRONT_FACING",
+ "T82x_TI_BACK_FACING",
+ "T82x_TI_PRIM_VISIBLE",
+ "T82x_TI_PRIM_CULLED",
+ "T82x_TI_PRIM_CLIPPED",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T82x_TI_ACTIVE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T82x_FRAG_ACTIVE",
+ "T82x_FRAG_PRIMITIVES",
+ "T82x_FRAG_PRIMITIVES_DROPPED",
+ "T82x_FRAG_CYCLES_DESC",
+ "T82x_FRAG_CYCLES_FPKQ_ACTIVE",
+ "T82x_FRAG_CYCLES_VERT",
+ "T82x_FRAG_CYCLES_TRISETUP",
+ "T82x_FRAG_CYCLES_EZS_ACTIVE",
+ "T82x_FRAG_THREADS",
+ "T82x_FRAG_DUMMY_THREADS",
+ "T82x_FRAG_QUADS_RAST",
+ "T82x_FRAG_QUADS_EZS_TEST",
+ "T82x_FRAG_QUADS_EZS_KILLED",
+ "T82x_FRAG_THREADS_LZS_TEST",
+ "T82x_FRAG_THREADS_LZS_KILLED",
+ "T82x_FRAG_CYCLES_NO_TILE",
+ "T82x_FRAG_NUM_TILES",
+ "T82x_FRAG_TRANS_ELIM",
+ "T82x_COMPUTE_ACTIVE",
+ "T82x_COMPUTE_TASKS",
+ "T82x_COMPUTE_THREADS",
+ "T82x_COMPUTE_CYCLES_DESC",
+ "T82x_TRIPIPE_ACTIVE",
+ "T82x_ARITH_WORDS",
+ "T82x_ARITH_CYCLES_REG",
+ "T82x_ARITH_CYCLES_L0",
+ "T82x_ARITH_FRAG_DEPEND",
+ "T82x_LS_WORDS",
+ "T82x_LS_ISSUES",
+ "T82x_LS_REISSUE_ATTR",
+ "T82x_LS_REISSUES_VARY",
+ "T82x_LS_VARY_RV_MISS",
+ "T82x_LS_VARY_RV_HIT",
+ "T82x_LS_NO_UNPARK",
+ "T82x_TEX_WORDS",
+ "T82x_TEX_BUBBLES",
+ "T82x_TEX_WORDS_L0",
+ "T82x_TEX_WORDS_DESC",
+ "T82x_TEX_ISSUES",
+ "T82x_TEX_RECIRC_FMISS",
+ "T82x_TEX_RECIRC_DESC",
+ "T82x_TEX_RECIRC_MULTI",
+ "T82x_TEX_RECIRC_PMISS",
+ "T82x_TEX_RECIRC_CONF",
+ "T82x_LSC_READ_HITS",
+ "T82x_LSC_READ_OP",
+ "T82x_LSC_WRITE_HITS",
+ "T82x_LSC_WRITE_OP",
+ "T82x_LSC_ATOMIC_HITS",
+ "T82x_LSC_ATOMIC_OP",
+ "T82x_LSC_LINE_FETCHES",
+ "T82x_LSC_DIRTY_LINE",
+ "T82x_LSC_SNOOPS",
+ "T82x_AXI_TLB_STALL",
+ "T82x_AXI_TLB_MISS",
+ "T82x_AXI_TLB_TRANSACTION",
+ "T82x_LS_TLB_MISS",
+ "T82x_LS_TLB_HIT",
+ "T82x_AXI_BEATS_READ",
+ "T82x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T82x_MMU_HIT",
+ "T82x_MMU_NEW_MISS",
+ "T82x_MMU_REPLAY_FULL",
+ "T82x_MMU_REPLAY_MISS",
+ "T82x_MMU_TABLE_WALK",
+ "T82x_MMU_REQUESTS",
+ "",
+ "",
+ "T82x_UTLB_HIT",
+ "T82x_UTLB_NEW_MISS",
+ "T82x_UTLB_REPLAY_FULL",
+ "T82x_UTLB_REPLAY_MISS",
+ "T82x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T82x_L2_EXT_WRITE_BEATS",
+ "T82x_L2_EXT_READ_BEATS",
+ "T82x_L2_ANY_LOOKUP",
+ "T82x_L2_READ_LOOKUP",
+ "T82x_L2_SREAD_LOOKUP",
+ "T82x_L2_READ_REPLAY",
+ "T82x_L2_READ_SNOOP",
+ "T82x_L2_READ_HIT",
+ "T82x_L2_CLEAN_MISS",
+ "T82x_L2_WRITE_LOOKUP",
+ "T82x_L2_SWRITE_LOOKUP",
+ "T82x_L2_WRITE_REPLAY",
+ "T82x_L2_WRITE_SNOOP",
+ "T82x_L2_WRITE_HIT",
+ "T82x_L2_EXT_READ_FULL",
+ "",
+ "T82x_L2_EXT_WRITE_FULL",
+ "T82x_L2_EXT_R_W_HAZARD",
+ "T82x_L2_EXT_READ",
+ "T82x_L2_EXT_READ_LINE",
+ "T82x_L2_EXT_WRITE",
+ "T82x_L2_EXT_WRITE_LINE",
+ "T82x_L2_EXT_WRITE_SMALL",
+ "T82x_L2_EXT_BARRIER",
+ "T82x_L2_EXT_AR_STALL",
+ "T82x_L2_EXT_R_BUF_FULL",
+ "T82x_L2_EXT_RD_BUF_FULL",
+ "T82x_L2_EXT_R_RAW",
+ "T82x_L2_EXT_W_STALL",
+ "T82x_L2_EXT_W_BUF_FULL",
+ "T82x_L2_EXT_R_BUF_FULL",
+ "T82x_L2_TAG_HAZARD",
+ "T82x_L2_SNOOP_FULL",
+ "T82x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t83x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T83x_MESSAGES_SENT",
+ "T83x_MESSAGES_RECEIVED",
+ "T83x_GPU_ACTIVE",
+ "T83x_IRQ_ACTIVE",
+ "T83x_JS0_JOBS",
+ "T83x_JS0_TASKS",
+ "T83x_JS0_ACTIVE",
+ "",
+ "T83x_JS0_WAIT_READ",
+ "T83x_JS0_WAIT_ISSUE",
+ "T83x_JS0_WAIT_DEPEND",
+ "T83x_JS0_WAIT_FINISH",
+ "T83x_JS1_JOBS",
+ "T83x_JS1_TASKS",
+ "T83x_JS1_ACTIVE",
+ "",
+ "T83x_JS1_WAIT_READ",
+ "T83x_JS1_WAIT_ISSUE",
+ "T83x_JS1_WAIT_DEPEND",
+ "T83x_JS1_WAIT_FINISH",
+ "T83x_JS2_JOBS",
+ "T83x_JS2_TASKS",
+ "T83x_JS2_ACTIVE",
+ "",
+ "T83x_JS2_WAIT_READ",
+ "T83x_JS2_WAIT_ISSUE",
+ "T83x_JS2_WAIT_DEPEND",
+ "T83x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T83x_TI_JOBS_PROCESSED",
+ "T83x_TI_TRIANGLES",
+ "T83x_TI_QUADS",
+ "T83x_TI_POLYGONS",
+ "T83x_TI_POINTS",
+ "T83x_TI_LINES",
+ "T83x_TI_FRONT_FACING",
+ "T83x_TI_BACK_FACING",
+ "T83x_TI_PRIM_VISIBLE",
+ "T83x_TI_PRIM_CULLED",
+ "T83x_TI_PRIM_CLIPPED",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T83x_TI_ACTIVE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T83x_FRAG_ACTIVE",
+ "T83x_FRAG_PRIMITIVES",
+ "T83x_FRAG_PRIMITIVES_DROPPED",
+ "T83x_FRAG_CYCLES_DESC",
+ "T83x_FRAG_CYCLES_FPKQ_ACTIVE",
+ "T83x_FRAG_CYCLES_VERT",
+ "T83x_FRAG_CYCLES_TRISETUP",
+ "T83x_FRAG_CYCLES_EZS_ACTIVE",
+ "T83x_FRAG_THREADS",
+ "T83x_FRAG_DUMMY_THREADS",
+ "T83x_FRAG_QUADS_RAST",
+ "T83x_FRAG_QUADS_EZS_TEST",
+ "T83x_FRAG_QUADS_EZS_KILLED",
+ "T83x_FRAG_THREADS_LZS_TEST",
+ "T83x_FRAG_THREADS_LZS_KILLED",
+ "T83x_FRAG_CYCLES_NO_TILE",
+ "T83x_FRAG_NUM_TILES",
+ "T83x_FRAG_TRANS_ELIM",
+ "T83x_COMPUTE_ACTIVE",
+ "T83x_COMPUTE_TASKS",
+ "T83x_COMPUTE_THREADS",
+ "T83x_COMPUTE_CYCLES_DESC",
+ "T83x_TRIPIPE_ACTIVE",
+ "T83x_ARITH_WORDS",
+ "T83x_ARITH_CYCLES_REG",
+ "T83x_ARITH_CYCLES_L0",
+ "T83x_ARITH_FRAG_DEPEND",
+ "T83x_LS_WORDS",
+ "T83x_LS_ISSUES",
+ "T83x_LS_REISSUE_ATTR",
+ "T83x_LS_REISSUES_VARY",
+ "T83x_LS_VARY_RV_MISS",
+ "T83x_LS_VARY_RV_HIT",
+ "T83x_LS_NO_UNPARK",
+ "T83x_TEX_WORDS",
+ "T83x_TEX_BUBBLES",
+ "T83x_TEX_WORDS_L0",
+ "T83x_TEX_WORDS_DESC",
+ "T83x_TEX_ISSUES",
+ "T83x_TEX_RECIRC_FMISS",
+ "T83x_TEX_RECIRC_DESC",
+ "T83x_TEX_RECIRC_MULTI",
+ "T83x_TEX_RECIRC_PMISS",
+ "T83x_TEX_RECIRC_CONF",
+ "T83x_LSC_READ_HITS",
+ "T83x_LSC_READ_OP",
+ "T83x_LSC_WRITE_HITS",
+ "T83x_LSC_WRITE_OP",
+ "T83x_LSC_ATOMIC_HITS",
+ "T83x_LSC_ATOMIC_OP",
+ "T83x_LSC_LINE_FETCHES",
+ "T83x_LSC_DIRTY_LINE",
+ "T83x_LSC_SNOOPS",
+ "T83x_AXI_TLB_STALL",
+ "T83x_AXI_TLB_MISS",
+ "T83x_AXI_TLB_TRANSACTION",
+ "T83x_LS_TLB_MISS",
+ "T83x_LS_TLB_HIT",
+ "T83x_AXI_BEATS_READ",
+ "T83x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T83x_MMU_HIT",
+ "T83x_MMU_NEW_MISS",
+ "T83x_MMU_REPLAY_FULL",
+ "T83x_MMU_REPLAY_MISS",
+ "T83x_MMU_TABLE_WALK",
+ "T83x_MMU_REQUESTS",
+ "",
+ "",
+ "T83x_UTLB_HIT",
+ "T83x_UTLB_NEW_MISS",
+ "T83x_UTLB_REPLAY_FULL",
+ "T83x_UTLB_REPLAY_MISS",
+ "T83x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T83x_L2_EXT_WRITE_BEATS",
+ "T83x_L2_EXT_READ_BEATS",
+ "T83x_L2_ANY_LOOKUP",
+ "T83x_L2_READ_LOOKUP",
+ "T83x_L2_SREAD_LOOKUP",
+ "T83x_L2_READ_REPLAY",
+ "T83x_L2_READ_SNOOP",
+ "T83x_L2_READ_HIT",
+ "T83x_L2_CLEAN_MISS",
+ "T83x_L2_WRITE_LOOKUP",
+ "T83x_L2_SWRITE_LOOKUP",
+ "T83x_L2_WRITE_REPLAY",
+ "T83x_L2_WRITE_SNOOP",
+ "T83x_L2_WRITE_HIT",
+ "T83x_L2_EXT_READ_FULL",
+ "",
+ "T83x_L2_EXT_WRITE_FULL",
+ "T83x_L2_EXT_R_W_HAZARD",
+ "T83x_L2_EXT_READ",
+ "T83x_L2_EXT_READ_LINE",
+ "T83x_L2_EXT_WRITE",
+ "T83x_L2_EXT_WRITE_LINE",
+ "T83x_L2_EXT_WRITE_SMALL",
+ "T83x_L2_EXT_BARRIER",
+ "T83x_L2_EXT_AR_STALL",
+ "T83x_L2_EXT_R_BUF_FULL",
+ "T83x_L2_EXT_RD_BUF_FULL",
+ "T83x_L2_EXT_R_RAW",
+ "T83x_L2_EXT_W_STALL",
+ "T83x_L2_EXT_W_BUF_FULL",
+ "T83x_L2_EXT_R_BUF_FULL",
+ "T83x_L2_TAG_HAZARD",
+ "T83x_L2_SNOOP_FULL",
+ "T83x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t86x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T86x_MESSAGES_SENT",
+ "T86x_MESSAGES_RECEIVED",
+ "T86x_GPU_ACTIVE",
+ "T86x_IRQ_ACTIVE",
+ "T86x_JS0_JOBS",
+ "T86x_JS0_TASKS",
+ "T86x_JS0_ACTIVE",
+ "",
+ "T86x_JS0_WAIT_READ",
+ "T86x_JS0_WAIT_ISSUE",
+ "T86x_JS0_WAIT_DEPEND",
+ "T86x_JS0_WAIT_FINISH",
+ "T86x_JS1_JOBS",
+ "T86x_JS1_TASKS",
+ "T86x_JS1_ACTIVE",
+ "",
+ "T86x_JS1_WAIT_READ",
+ "T86x_JS1_WAIT_ISSUE",
+ "T86x_JS1_WAIT_DEPEND",
+ "T86x_JS1_WAIT_FINISH",
+ "T86x_JS2_JOBS",
+ "T86x_JS2_TASKS",
+ "T86x_JS2_ACTIVE",
+ "",
+ "T86x_JS2_WAIT_READ",
+ "T86x_JS2_WAIT_ISSUE",
+ "T86x_JS2_WAIT_DEPEND",
+ "T86x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T86x_TI_JOBS_PROCESSED",
+ "T86x_TI_TRIANGLES",
+ "T86x_TI_QUADS",
+ "T86x_TI_POLYGONS",
+ "T86x_TI_POINTS",
+ "T86x_TI_LINES",
+ "T86x_TI_VCACHE_HIT",
+ "T86x_TI_VCACHE_MISS",
+ "T86x_TI_FRONT_FACING",
+ "T86x_TI_BACK_FACING",
+ "T86x_TI_PRIM_VISIBLE",
+ "T86x_TI_PRIM_CULLED",
+ "T86x_TI_PRIM_CLIPPED",
+ "T86x_TI_LEVEL0",
+ "T86x_TI_LEVEL1",
+ "T86x_TI_LEVEL2",
+ "T86x_TI_LEVEL3",
+ "T86x_TI_LEVEL4",
+ "T86x_TI_LEVEL5",
+ "T86x_TI_LEVEL6",
+ "T86x_TI_LEVEL7",
+ "T86x_TI_COMMAND_1",
+ "T86x_TI_COMMAND_2",
+ "T86x_TI_COMMAND_3",
+ "T86x_TI_COMMAND_4",
+ "T86x_TI_COMMAND_5_7",
+ "T86x_TI_COMMAND_8_15",
+ "T86x_TI_COMMAND_16_63",
+ "T86x_TI_COMMAND_64",
+ "T86x_TI_COMPRESS_IN",
+ "T86x_TI_COMPRESS_OUT",
+ "T86x_TI_COMPRESS_FLUSH",
+ "T86x_TI_TIMESTAMPS",
+ "T86x_TI_PCACHE_HIT",
+ "T86x_TI_PCACHE_MISS",
+ "T86x_TI_PCACHE_LINE",
+ "T86x_TI_PCACHE_STALL",
+ "T86x_TI_WRBUF_HIT",
+ "T86x_TI_WRBUF_MISS",
+ "T86x_TI_WRBUF_LINE",
+ "T86x_TI_WRBUF_PARTIAL",
+ "T86x_TI_WRBUF_STALL",
+ "T86x_TI_ACTIVE",
+ "T86x_TI_LOADING_DESC",
+ "T86x_TI_INDEX_WAIT",
+ "T86x_TI_INDEX_RANGE_WAIT",
+ "T86x_TI_VERTEX_WAIT",
+ "T86x_TI_PCACHE_WAIT",
+ "T86x_TI_WRBUF_WAIT",
+ "T86x_TI_BUS_READ",
+ "T86x_TI_BUS_WRITE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T86x_TI_UTLB_HIT",
+ "T86x_TI_UTLB_NEW_MISS",
+ "T86x_TI_UTLB_REPLAY_FULL",
+ "T86x_TI_UTLB_REPLAY_MISS",
+ "T86x_TI_UTLB_STALL",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T86x_FRAG_ACTIVE",
+ "T86x_FRAG_PRIMITIVES",
+ "T86x_FRAG_PRIMITIVES_DROPPED",
+ "T86x_FRAG_CYCLES_DESC",
+ "T86x_FRAG_CYCLES_FPKQ_ACTIVE",
+ "T86x_FRAG_CYCLES_VERT",
+ "T86x_FRAG_CYCLES_TRISETUP",
+ "T86x_FRAG_CYCLES_EZS_ACTIVE",
+ "T86x_FRAG_THREADS",
+ "T86x_FRAG_DUMMY_THREADS",
+ "T86x_FRAG_QUADS_RAST",
+ "T86x_FRAG_QUADS_EZS_TEST",
+ "T86x_FRAG_QUADS_EZS_KILLED",
+ "T86x_FRAG_THREADS_LZS_TEST",
+ "T86x_FRAG_THREADS_LZS_KILLED",
+ "T86x_FRAG_CYCLES_NO_TILE",
+ "T86x_FRAG_NUM_TILES",
+ "T86x_FRAG_TRANS_ELIM",
+ "T86x_COMPUTE_ACTIVE",
+ "T86x_COMPUTE_TASKS",
+ "T86x_COMPUTE_THREADS",
+ "T86x_COMPUTE_CYCLES_DESC",
+ "T86x_TRIPIPE_ACTIVE",
+ "T86x_ARITH_WORDS",
+ "T86x_ARITH_CYCLES_REG",
+ "T86x_ARITH_CYCLES_L0",
+ "T86x_ARITH_FRAG_DEPEND",
+ "T86x_LS_WORDS",
+ "T86x_LS_ISSUES",
+ "T86x_LS_REISSUE_ATTR",
+ "T86x_LS_REISSUES_VARY",
+ "T86x_LS_VARY_RV_MISS",
+ "T86x_LS_VARY_RV_HIT",
+ "T86x_LS_NO_UNPARK",
+ "T86x_TEX_WORDS",
+ "T86x_TEX_BUBBLES",
+ "T86x_TEX_WORDS_L0",
+ "T86x_TEX_WORDS_DESC",
+ "T86x_TEX_ISSUES",
+ "T86x_TEX_RECIRC_FMISS",
+ "T86x_TEX_RECIRC_DESC",
+ "T86x_TEX_RECIRC_MULTI",
+ "T86x_TEX_RECIRC_PMISS",
+ "T86x_TEX_RECIRC_CONF",
+ "T86x_LSC_READ_HITS",
+ "T86x_LSC_READ_OP",
+ "T86x_LSC_WRITE_HITS",
+ "T86x_LSC_WRITE_OP",
+ "T86x_LSC_ATOMIC_HITS",
+ "T86x_LSC_ATOMIC_OP",
+ "T86x_LSC_LINE_FETCHES",
+ "T86x_LSC_DIRTY_LINE",
+ "T86x_LSC_SNOOPS",
+ "T86x_AXI_TLB_STALL",
+ "T86x_AXI_TLB_MISS",
+ "T86x_AXI_TLB_TRANSACTION",
+ "T86x_LS_TLB_MISS",
+ "T86x_LS_TLB_HIT",
+ "T86x_AXI_BEATS_READ",
+ "T86x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T86x_MMU_HIT",
+ "T86x_MMU_NEW_MISS",
+ "T86x_MMU_REPLAY_FULL",
+ "T86x_MMU_REPLAY_MISS",
+ "T86x_MMU_TABLE_WALK",
+ "T86x_MMU_REQUESTS",
+ "",
+ "",
+ "T86x_UTLB_HIT",
+ "T86x_UTLB_NEW_MISS",
+ "T86x_UTLB_REPLAY_FULL",
+ "T86x_UTLB_REPLAY_MISS",
+ "T86x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T86x_L2_EXT_WRITE_BEATS",
+ "T86x_L2_EXT_READ_BEATS",
+ "T86x_L2_ANY_LOOKUP",
+ "T86x_L2_READ_LOOKUP",
+ "T86x_L2_SREAD_LOOKUP",
+ "T86x_L2_READ_REPLAY",
+ "T86x_L2_READ_SNOOP",
+ "T86x_L2_READ_HIT",
+ "T86x_L2_CLEAN_MISS",
+ "T86x_L2_WRITE_LOOKUP",
+ "T86x_L2_SWRITE_LOOKUP",
+ "T86x_L2_WRITE_REPLAY",
+ "T86x_L2_WRITE_SNOOP",
+ "T86x_L2_WRITE_HIT",
+ "T86x_L2_EXT_READ_FULL",
+ "",
+ "T86x_L2_EXT_WRITE_FULL",
+ "T86x_L2_EXT_R_W_HAZARD",
+ "T86x_L2_EXT_READ",
+ "T86x_L2_EXT_READ_LINE",
+ "T86x_L2_EXT_WRITE",
+ "T86x_L2_EXT_WRITE_LINE",
+ "T86x_L2_EXT_WRITE_SMALL",
+ "T86x_L2_EXT_BARRIER",
+ "T86x_L2_EXT_AR_STALL",
+ "T86x_L2_EXT_R_BUF_FULL",
+ "T86x_L2_EXT_RD_BUF_FULL",
+ "T86x_L2_EXT_R_RAW",
+ "T86x_L2_EXT_W_STALL",
+ "T86x_L2_EXT_W_BUF_FULL",
+ "T86x_L2_EXT_R_BUF_FULL",
+ "T86x_L2_TAG_HAZARD",
+ "T86x_L2_SNOOP_FULL",
+ "T86x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t88x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T88x_MESSAGES_SENT",
+ "T88x_MESSAGES_RECEIVED",
+ "T88x_GPU_ACTIVE",
+ "T88x_IRQ_ACTIVE",
+ "T88x_JS0_JOBS",
+ "T88x_JS0_TASKS",
+ "T88x_JS0_ACTIVE",
+ "",
+ "T88x_JS0_WAIT_READ",
+ "T88x_JS0_WAIT_ISSUE",
+ "T88x_JS0_WAIT_DEPEND",
+ "T88x_JS0_WAIT_FINISH",
+ "T88x_JS1_JOBS",
+ "T88x_JS1_TASKS",
+ "T88x_JS1_ACTIVE",
+ "",
+ "T88x_JS1_WAIT_READ",
+ "T88x_JS1_WAIT_ISSUE",
+ "T88x_JS1_WAIT_DEPEND",
+ "T88x_JS1_WAIT_FINISH",
+ "T88x_JS2_JOBS",
+ "T88x_JS2_TASKS",
+ "T88x_JS2_ACTIVE",
+ "",
+ "T88x_JS2_WAIT_READ",
+ "T88x_JS2_WAIT_ISSUE",
+ "T88x_JS2_WAIT_DEPEND",
+ "T88x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T88x_TI_JOBS_PROCESSED",
+ "T88x_TI_TRIANGLES",
+ "T88x_TI_QUADS",
+ "T88x_TI_POLYGONS",
+ "T88x_TI_POINTS",
+ "T88x_TI_LINES",
+ "T88x_TI_VCACHE_HIT",
+ "T88x_TI_VCACHE_MISS",
+ "T88x_TI_FRONT_FACING",
+ "T88x_TI_BACK_FACING",
+ "T88x_TI_PRIM_VISIBLE",
+ "T88x_TI_PRIM_CULLED",
+ "T88x_TI_PRIM_CLIPPED",
+ "T88x_TI_LEVEL0",
+ "T88x_TI_LEVEL1",
+ "T88x_TI_LEVEL2",
+ "T88x_TI_LEVEL3",
+ "T88x_TI_LEVEL4",
+ "T88x_TI_LEVEL5",
+ "T88x_TI_LEVEL6",
+ "T88x_TI_LEVEL7",
+ "T88x_TI_COMMAND_1",
+ "T88x_TI_COMMAND_2",
+ "T88x_TI_COMMAND_3",
+ "T88x_TI_COMMAND_4",
+ "T88x_TI_COMMAND_5_7",
+ "T88x_TI_COMMAND_8_15",
+ "T88x_TI_COMMAND_16_63",
+ "T88x_TI_COMMAND_64",
+ "T88x_TI_COMPRESS_IN",
+ "T88x_TI_COMPRESS_OUT",
+ "T88x_TI_COMPRESS_FLUSH",
+ "T88x_TI_TIMESTAMPS",
+ "T88x_TI_PCACHE_HIT",
+ "T88x_TI_PCACHE_MISS",
+ "T88x_TI_PCACHE_LINE",
+ "T88x_TI_PCACHE_STALL",
+ "T88x_TI_WRBUF_HIT",
+ "T88x_TI_WRBUF_MISS",
+ "T88x_TI_WRBUF_LINE",
+ "T88x_TI_WRBUF_PARTIAL",
+ "T88x_TI_WRBUF_STALL",
+ "T88x_TI_ACTIVE",
+ "T88x_TI_LOADING_DESC",
+ "T88x_TI_INDEX_WAIT",
+ "T88x_TI_INDEX_RANGE_WAIT",
+ "T88x_TI_VERTEX_WAIT",
+ "T88x_TI_PCACHE_WAIT",
+ "T88x_TI_WRBUF_WAIT",
+ "T88x_TI_BUS_READ",
+ "T88x_TI_BUS_WRITE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T88x_TI_UTLB_HIT",
+ "T88x_TI_UTLB_NEW_MISS",
+ "T88x_TI_UTLB_REPLAY_FULL",
+ "T88x_TI_UTLB_REPLAY_MISS",
+ "T88x_TI_UTLB_STALL",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T88x_FRAG_ACTIVE",
+ "T88x_FRAG_PRIMITIVES",
+ "T88x_FRAG_PRIMITIVES_DROPPED",
+ "T88x_FRAG_CYCLES_DESC",
+ "T88x_FRAG_CYCLES_FPKQ_ACTIVE",
+ "T88x_FRAG_CYCLES_VERT",
+ "T88x_FRAG_CYCLES_TRISETUP",
+ "T88x_FRAG_CYCLES_EZS_ACTIVE",
+ "T88x_FRAG_THREADS",
+ "T88x_FRAG_DUMMY_THREADS",
+ "T88x_FRAG_QUADS_RAST",
+ "T88x_FRAG_QUADS_EZS_TEST",
+ "T88x_FRAG_QUADS_EZS_KILLED",
+ "T88x_FRAG_THREADS_LZS_TEST",
+ "T88x_FRAG_THREADS_LZS_KILLED",
+ "T88x_FRAG_CYCLES_NO_TILE",
+ "T88x_FRAG_NUM_TILES",
+ "T88x_FRAG_TRANS_ELIM",
+ "T88x_COMPUTE_ACTIVE",
+ "T88x_COMPUTE_TASKS",
+ "T88x_COMPUTE_THREADS",
+ "T88x_COMPUTE_CYCLES_DESC",
+ "T88x_TRIPIPE_ACTIVE",
+ "T88x_ARITH_WORDS",
+ "T88x_ARITH_CYCLES_REG",
+ "T88x_ARITH_CYCLES_L0",
+ "T88x_ARITH_FRAG_DEPEND",
+ "T88x_LS_WORDS",
+ "T88x_LS_ISSUES",
+ "T88x_LS_REISSUE_ATTR",
+ "T88x_LS_REISSUES_VARY",
+ "T88x_LS_VARY_RV_MISS",
+ "T88x_LS_VARY_RV_HIT",
+ "T88x_LS_NO_UNPARK",
+ "T88x_TEX_WORDS",
+ "T88x_TEX_BUBBLES",
+ "T88x_TEX_WORDS_L0",
+ "T88x_TEX_WORDS_DESC",
+ "T88x_TEX_ISSUES",
+ "T88x_TEX_RECIRC_FMISS",
+ "T88x_TEX_RECIRC_DESC",
+ "T88x_TEX_RECIRC_MULTI",
+ "T88x_TEX_RECIRC_PMISS",
+ "T88x_TEX_RECIRC_CONF",
+ "T88x_LSC_READ_HITS",
+ "T88x_LSC_READ_OP",
+ "T88x_LSC_WRITE_HITS",
+ "T88x_LSC_WRITE_OP",
+ "T88x_LSC_ATOMIC_HITS",
+ "T88x_LSC_ATOMIC_OP",
+ "T88x_LSC_LINE_FETCHES",
+ "T88x_LSC_DIRTY_LINE",
+ "T88x_LSC_SNOOPS",
+ "T88x_AXI_TLB_STALL",
+ "T88x_AXI_TLB_MISS",
+ "T88x_AXI_TLB_TRANSACTION",
+ "T88x_LS_TLB_MISS",
+ "T88x_LS_TLB_HIT",
+ "T88x_AXI_BEATS_READ",
+ "T88x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T88x_MMU_HIT",
+ "T88x_MMU_NEW_MISS",
+ "T88x_MMU_REPLAY_FULL",
+ "T88x_MMU_REPLAY_MISS",
+ "T88x_MMU_TABLE_WALK",
+ "T88x_MMU_REQUESTS",
+ "",
+ "",
+ "T88x_UTLB_HIT",
+ "T88x_UTLB_NEW_MISS",
+ "T88x_UTLB_REPLAY_FULL",
+ "T88x_UTLB_REPLAY_MISS",
+ "T88x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T88x_L2_EXT_WRITE_BEATS",
+ "T88x_L2_EXT_READ_BEATS",
+ "T88x_L2_ANY_LOOKUP",
+ "T88x_L2_READ_LOOKUP",
+ "T88x_L2_SREAD_LOOKUP",
+ "T88x_L2_READ_REPLAY",
+ "T88x_L2_READ_SNOOP",
+ "T88x_L2_READ_HIT",
+ "T88x_L2_CLEAN_MISS",
+ "T88x_L2_WRITE_LOOKUP",
+ "T88x_L2_SWRITE_LOOKUP",
+ "T88x_L2_WRITE_REPLAY",
+ "T88x_L2_WRITE_SNOOP",
+ "T88x_L2_WRITE_HIT",
+ "T88x_L2_EXT_READ_FULL",
+ "",
+ "T88x_L2_EXT_WRITE_FULL",
+ "T88x_L2_EXT_R_W_HAZARD",
+ "T88x_L2_EXT_READ",
+ "T88x_L2_EXT_READ_LINE",
+ "T88x_L2_EXT_WRITE",
+ "T88x_L2_EXT_WRITE_LINE",
+ "T88x_L2_EXT_WRITE_SMALL",
+ "T88x_L2_EXT_BARRIER",
+ "T88x_L2_EXT_AR_STALL",
+ "T88x_L2_EXT_R_BUF_FULL",
+ "T88x_L2_EXT_RD_BUF_FULL",
+ "T88x_L2_EXT_R_RAW",
+ "T88x_L2_EXT_W_STALL",
+ "T88x_L2_EXT_W_BUF_FULL",
+ "T88x_L2_EXT_R_BUF_FULL",
+ "T88x_L2_TAG_HAZARD",
+ "T88x_L2_SNOOP_FULL",
+ "T88x_L2_REPLAY_FULL"
+};
+
+#include "mali_kbase_gator_hwcnt_names_tmix.h"
+
+#include "mali_kbase_gator_hwcnt_names_thex.h"
+
+#include "mali_kbase_gator_hwcnt_names_tsix.h"
+
+
+#ifdef MALI_INCLUDE_TKAX
+#include "mali_kbase_gator_hwcnt_names_tkax.h"
+#endif /* MALI_INCLUDE_TKAX */
+
+#endif
diff --git a/drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names_thex.h b/drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names_thex.h
new file mode 100644
index 000000000000..15fd4efdc6ca
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names_thex.h
@@ -0,0 +1,291 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_THEX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_THEX_H_
+
+static const char * const hardware_counters_mali_tHEx[] = {
+ /* Performance counters for the Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "THEx_MESSAGES_SENT",
+ "THEx_MESSAGES_RECEIVED",
+ "THEx_GPU_ACTIVE",
+ "THEx_IRQ_ACTIVE",
+ "THEx_JS0_JOBS",
+ "THEx_JS0_TASKS",
+ "THEx_JS0_ACTIVE",
+ "",
+ "THEx_JS0_WAIT_READ",
+ "THEx_JS0_WAIT_ISSUE",
+ "THEx_JS0_WAIT_DEPEND",
+ "THEx_JS0_WAIT_FINISH",
+ "THEx_JS1_JOBS",
+ "THEx_JS1_TASKS",
+ "THEx_JS1_ACTIVE",
+ "",
+ "THEx_JS1_WAIT_READ",
+ "THEx_JS1_WAIT_ISSUE",
+ "THEx_JS1_WAIT_DEPEND",
+ "THEx_JS1_WAIT_FINISH",
+ "THEx_JS2_JOBS",
+ "THEx_JS2_TASKS",
+ "THEx_JS2_ACTIVE",
+ "",
+ "THEx_JS2_WAIT_READ",
+ "THEx_JS2_WAIT_ISSUE",
+ "THEx_JS2_WAIT_DEPEND",
+ "THEx_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Performance counters for the Tiler */
+ "",
+ "",
+ "",
+ "",
+ "THEx_TILER_ACTIVE",
+ "THEx_JOBS_PROCESSED",
+ "THEx_TRIANGLES",
+ "THEx_LINES",
+ "THEx_POINTS",
+ "THEx_FRONT_FACING",
+ "THEx_BACK_FACING",
+ "THEx_PRIM_VISIBLE",
+ "THEx_PRIM_CULLED",
+ "THEx_PRIM_CLIPPED",
+ "THEx_PRIM_SAT_CULLED",
+ "THEx_BIN_ALLOC_INIT",
+ "THEx_BIN_ALLOC_OVERFLOW",
+ "THEx_BUS_READ",
+ "",
+ "THEx_BUS_WRITE",
+ "THEx_LOADING_DESC",
+ "THEx_IDVS_POS_SHAD_REQ",
+ "THEx_IDVS_POS_SHAD_WAIT",
+ "THEx_IDVS_POS_SHAD_STALL",
+ "THEx_IDVS_POS_FIFO_FULL",
+ "THEx_PREFETCH_STALL",
+ "THEx_VCACHE_HIT",
+ "THEx_VCACHE_MISS",
+ "THEx_VCACHE_LINE_WAIT",
+ "THEx_VFETCH_POS_READ_WAIT",
+ "THEx_VFETCH_VERTEX_WAIT",
+ "THEx_VFETCH_STALL",
+ "THEx_PRIMASSY_STALL",
+ "THEx_BBOX_GEN_STALL",
+ "THEx_IDVS_VBU_HIT",
+ "THEx_IDVS_VBU_MISS",
+ "THEx_IDVS_VBU_LINE_DEALLOCATE",
+ "THEx_IDVS_VAR_SHAD_REQ",
+ "THEx_IDVS_VAR_SHAD_STALL",
+ "THEx_BINNER_STALL",
+ "THEx_ITER_STALL",
+ "THEx_COMPRESS_MISS",
+ "THEx_COMPRESS_STALL",
+ "THEx_PCACHE_HIT",
+ "THEx_PCACHE_MISS",
+ "THEx_PCACHE_MISS_STALL",
+ "THEx_PCACHE_EVICT_STALL",
+ "THEx_PMGR_PTR_WR_STALL",
+ "THEx_PMGR_PTR_RD_STALL",
+ "THEx_PMGR_CMD_WR_STALL",
+ "THEx_WRBUF_ACTIVE",
+ "THEx_WRBUF_HIT",
+ "THEx_WRBUF_MISS",
+ "THEx_WRBUF_NO_FREE_LINE_STALL",
+ "THEx_WRBUF_NO_AXI_ID_STALL",
+ "THEx_WRBUF_AXI_STALL",
+ "",
+ "",
+ "",
+ "THEx_UTLB_TRANS",
+ "THEx_UTLB_TRANS_HIT",
+ "THEx_UTLB_TRANS_STALL",
+ "THEx_UTLB_TRANS_MISS_DELAY",
+ "THEx_UTLB_MMU_REQ",
+
+ /* Performance counters for the Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "THEx_FRAG_ACTIVE",
+ "THEx_FRAG_PRIMITIVES",
+ "THEx_FRAG_PRIM_RAST",
+ "THEx_FRAG_FPK_ACTIVE",
+ "THEx_FRAG_STARVING",
+ "THEx_FRAG_WARPS",
+ "THEx_FRAG_PARTIAL_WARPS",
+ "THEx_FRAG_QUADS_RAST",
+ "THEx_FRAG_QUADS_EZS_TEST",
+ "THEx_FRAG_QUADS_EZS_UPDATE",
+ "THEx_FRAG_QUADS_EZS_KILL",
+ "THEx_FRAG_LZS_TEST",
+ "THEx_FRAG_LZS_KILL",
+ "",
+ "THEx_FRAG_PTILES",
+ "THEx_FRAG_TRANS_ELIM",
+ "THEx_QUAD_FPK_KILLER",
+ "",
+ "THEx_COMPUTE_ACTIVE",
+ "THEx_COMPUTE_TASKS",
+ "THEx_COMPUTE_WARPS",
+ "THEx_COMPUTE_STARVING",
+ "THEx_EXEC_CORE_ACTIVE",
+ "THEx_EXEC_ACTIVE",
+ "THEx_EXEC_INSTR_COUNT",
+ "THEx_EXEC_INSTR_DIVERGED",
+ "THEx_EXEC_INSTR_STARVING",
+ "THEx_ARITH_INSTR_SINGLE_FMA",
+ "THEx_ARITH_INSTR_DOUBLE",
+ "THEx_ARITH_INSTR_MSG",
+ "THEx_ARITH_INSTR_MSG_ONLY",
+ "THEx_TEX_INSTR",
+ "THEx_TEX_INSTR_MIPMAP",
+ "THEx_TEX_INSTR_COMPRESSED",
+ "THEx_TEX_INSTR_3D",
+ "THEx_TEX_INSTR_TRILINEAR",
+ "THEx_TEX_COORD_ISSUE",
+ "THEx_TEX_COORD_STALL",
+ "THEx_TEX_STARVE_CACHE",
+ "THEx_TEX_STARVE_FILTER",
+ "THEx_LS_MEM_READ_FULL",
+ "THEx_LS_MEM_READ_SHORT",
+ "THEx_LS_MEM_WRITE_FULL",
+ "THEx_LS_MEM_WRITE_SHORT",
+ "THEx_LS_MEM_ATOMIC",
+ "THEx_VARY_INSTR",
+ "THEx_VARY_SLOT_32",
+ "THEx_VARY_SLOT_16",
+ "THEx_ATTR_INSTR",
+ "THEx_ARITH_INSTR_FP_MUL",
+ "THEx_BEATS_RD_FTC",
+ "THEx_BEATS_RD_FTC_EXT",
+ "THEx_BEATS_RD_LSC",
+ "THEx_BEATS_RD_LSC_EXT",
+ "THEx_BEATS_RD_TEX",
+ "THEx_BEATS_RD_TEX_EXT",
+ "THEx_BEATS_RD_OTHER",
+ "THEx_BEATS_WR_LSC",
+ "THEx_BEATS_WR_TIB",
+ "",
+
+ /* Performance counters for the Memory System */
+ "",
+ "",
+ "",
+ "",
+ "THEx_MMU_REQUESTS",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "THEx_L2_RD_MSG_IN",
+ "THEx_L2_RD_MSG_IN_STALL",
+ "THEx_L2_WR_MSG_IN",
+ "THEx_L2_WR_MSG_IN_STALL",
+ "THEx_L2_SNP_MSG_IN",
+ "THEx_L2_SNP_MSG_IN_STALL",
+ "THEx_L2_RD_MSG_OUT",
+ "THEx_L2_RD_MSG_OUT_STALL",
+ "THEx_L2_WR_MSG_OUT",
+ "THEx_L2_ANY_LOOKUP",
+ "THEx_L2_READ_LOOKUP",
+ "THEx_L2_WRITE_LOOKUP",
+ "THEx_L2_EXT_SNOOP_LOOKUP",
+ "THEx_L2_EXT_READ",
+ "THEx_L2_EXT_READ_NOSNP",
+ "THEx_L2_EXT_READ_UNIQUE",
+ "THEx_L2_EXT_READ_BEATS",
+ "THEx_L2_EXT_AR_STALL",
+ "THEx_L2_EXT_AR_CNT_Q1",
+ "THEx_L2_EXT_AR_CNT_Q2",
+ "THEx_L2_EXT_AR_CNT_Q3",
+ "THEx_L2_EXT_RRESP_0_127",
+ "THEx_L2_EXT_RRESP_128_191",
+ "THEx_L2_EXT_RRESP_192_255",
+ "THEx_L2_EXT_RRESP_256_319",
+ "THEx_L2_EXT_RRESP_320_383",
+ "THEx_L2_EXT_WRITE",
+ "THEx_L2_EXT_WRITE_NOSNP_FULL",
+ "THEx_L2_EXT_WRITE_NOSNP_PTL",
+ "THEx_L2_EXT_WRITE_SNP_FULL",
+ "THEx_L2_EXT_WRITE_SNP_PTL",
+ "THEx_L2_EXT_WRITE_BEATS",
+ "THEx_L2_EXT_W_STALL",
+ "THEx_L2_EXT_AW_CNT_Q1",
+ "THEx_L2_EXT_AW_CNT_Q2",
+ "THEx_L2_EXT_AW_CNT_Q3",
+ "THEx_L2_EXT_SNOOP",
+ "THEx_L2_EXT_SNOOP_STALL",
+ "THEx_L2_EXT_SNOOP_RESP_CLEAN",
+ "THEx_L2_EXT_SNOOP_RESP_DATA",
+ "THEx_L2_EXT_SNOOP_INTERNAL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_THEX_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names_tmix.h b/drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names_tmix.h
new file mode 100644
index 000000000000..8a215f723570
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names_tmix.h
@@ -0,0 +1,291 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TMIX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TMIX_H_
+
+static const char * const hardware_counters_mali_tMIx[] = {
+ /* Performance counters for the Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "TMIx_MESSAGES_SENT",
+ "TMIx_MESSAGES_RECEIVED",
+ "TMIx_GPU_ACTIVE",
+ "TMIx_IRQ_ACTIVE",
+ "TMIx_JS0_JOBS",
+ "TMIx_JS0_TASKS",
+ "TMIx_JS0_ACTIVE",
+ "",
+ "TMIx_JS0_WAIT_READ",
+ "TMIx_JS0_WAIT_ISSUE",
+ "TMIx_JS0_WAIT_DEPEND",
+ "TMIx_JS0_WAIT_FINISH",
+ "TMIx_JS1_JOBS",
+ "TMIx_JS1_TASKS",
+ "TMIx_JS1_ACTIVE",
+ "",
+ "TMIx_JS1_WAIT_READ",
+ "TMIx_JS1_WAIT_ISSUE",
+ "TMIx_JS1_WAIT_DEPEND",
+ "TMIx_JS1_WAIT_FINISH",
+ "TMIx_JS2_JOBS",
+ "TMIx_JS2_TASKS",
+ "TMIx_JS2_ACTIVE",
+ "",
+ "TMIx_JS2_WAIT_READ",
+ "TMIx_JS2_WAIT_ISSUE",
+ "TMIx_JS2_WAIT_DEPEND",
+ "TMIx_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Performance counters for the Tiler */
+ "",
+ "",
+ "",
+ "",
+ "TMIx_TILER_ACTIVE",
+ "TMIx_JOBS_PROCESSED",
+ "TMIx_TRIANGLES",
+ "TMIx_LINES",
+ "TMIx_POINTS",
+ "TMIx_FRONT_FACING",
+ "TMIx_BACK_FACING",
+ "TMIx_PRIM_VISIBLE",
+ "TMIx_PRIM_CULLED",
+ "TMIx_PRIM_CLIPPED",
+ "TMIx_PRIM_SAT_CULLED",
+ "TMIx_BIN_ALLOC_INIT",
+ "TMIx_BIN_ALLOC_OVERFLOW",
+ "TMIx_BUS_READ",
+ "",
+ "TMIx_BUS_WRITE",
+ "TMIx_LOADING_DESC",
+ "TMIx_IDVS_POS_SHAD_REQ",
+ "TMIx_IDVS_POS_SHAD_WAIT",
+ "TMIx_IDVS_POS_SHAD_STALL",
+ "TMIx_IDVS_POS_FIFO_FULL",
+ "TMIx_PREFETCH_STALL",
+ "TMIx_VCACHE_HIT",
+ "TMIx_VCACHE_MISS",
+ "TMIx_VCACHE_LINE_WAIT",
+ "TMIx_VFETCH_POS_READ_WAIT",
+ "TMIx_VFETCH_VERTEX_WAIT",
+ "TMIx_VFETCH_STALL",
+ "TMIx_PRIMASSY_STALL",
+ "TMIx_BBOX_GEN_STALL",
+ "TMIx_IDVS_VBU_HIT",
+ "TMIx_IDVS_VBU_MISS",
+ "TMIx_IDVS_VBU_LINE_DEALLOCATE",
+ "TMIx_IDVS_VAR_SHAD_REQ",
+ "TMIx_IDVS_VAR_SHAD_STALL",
+ "TMIx_BINNER_STALL",
+ "TMIx_ITER_STALL",
+ "TMIx_COMPRESS_MISS",
+ "TMIx_COMPRESS_STALL",
+ "TMIx_PCACHE_HIT",
+ "TMIx_PCACHE_MISS",
+ "TMIx_PCACHE_MISS_STALL",
+ "TMIx_PCACHE_EVICT_STALL",
+ "TMIx_PMGR_PTR_WR_STALL",
+ "TMIx_PMGR_PTR_RD_STALL",
+ "TMIx_PMGR_CMD_WR_STALL",
+ "TMIx_WRBUF_ACTIVE",
+ "TMIx_WRBUF_HIT",
+ "TMIx_WRBUF_MISS",
+ "TMIx_WRBUF_NO_FREE_LINE_STALL",
+ "TMIx_WRBUF_NO_AXI_ID_STALL",
+ "TMIx_WRBUF_AXI_STALL",
+ "",
+ "",
+ "",
+ "TMIx_UTLB_TRANS",
+ "TMIx_UTLB_TRANS_HIT",
+ "TMIx_UTLB_TRANS_STALL",
+ "TMIx_UTLB_TRANS_MISS_DELAY",
+ "TMIx_UTLB_MMU_REQ",
+
+ /* Performance counters for the Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "TMIx_FRAG_ACTIVE",
+ "TMIx_FRAG_PRIMITIVES",
+ "TMIx_FRAG_PRIM_RAST",
+ "TMIx_FRAG_FPK_ACTIVE",
+ "TMIx_FRAG_STARVING",
+ "TMIx_FRAG_WARPS",
+ "TMIx_FRAG_PARTIAL_WARPS",
+ "TMIx_FRAG_QUADS_RAST",
+ "TMIx_FRAG_QUADS_EZS_TEST",
+ "TMIx_FRAG_QUADS_EZS_UPDATE",
+ "TMIx_FRAG_QUADS_EZS_KILL",
+ "TMIx_FRAG_LZS_TEST",
+ "TMIx_FRAG_LZS_KILL",
+ "",
+ "TMIx_FRAG_PTILES",
+ "TMIx_FRAG_TRANS_ELIM",
+ "TMIx_QUAD_FPK_KILLER",
+ "",
+ "TMIx_COMPUTE_ACTIVE",
+ "TMIx_COMPUTE_TASKS",
+ "TMIx_COMPUTE_WARPS",
+ "TMIx_COMPUTE_STARVING",
+ "TMIx_EXEC_CORE_ACTIVE",
+ "TMIx_EXEC_ACTIVE",
+ "TMIx_EXEC_INSTR_COUNT",
+ "TMIx_EXEC_INSTR_DIVERGED",
+ "TMIx_EXEC_INSTR_STARVING",
+ "TMIx_ARITH_INSTR_SINGLE_FMA",
+ "TMIx_ARITH_INSTR_DOUBLE",
+ "TMIx_ARITH_INSTR_MSG",
+ "TMIx_ARITH_INSTR_MSG_ONLY",
+ "TMIx_TEX_INSTR",
+ "TMIx_TEX_INSTR_MIPMAP",
+ "TMIx_TEX_INSTR_COMPRESSED",
+ "TMIx_TEX_INSTR_3D",
+ "TMIx_TEX_INSTR_TRILINEAR",
+ "TMIx_TEX_COORD_ISSUE",
+ "TMIx_TEX_COORD_STALL",
+ "TMIx_TEX_STARVE_CACHE",
+ "TMIx_TEX_STARVE_FILTER",
+ "TMIx_LS_MEM_READ_FULL",
+ "TMIx_LS_MEM_READ_SHORT",
+ "TMIx_LS_MEM_WRITE_FULL",
+ "TMIx_LS_MEM_WRITE_SHORT",
+ "TMIx_LS_MEM_ATOMIC",
+ "TMIx_VARY_INSTR",
+ "TMIx_VARY_SLOT_32",
+ "TMIx_VARY_SLOT_16",
+ "TMIx_ATTR_INSTR",
+ "TMIx_ARITH_INSTR_FP_MUL",
+ "TMIx_BEATS_RD_FTC",
+ "TMIx_BEATS_RD_FTC_EXT",
+ "TMIx_BEATS_RD_LSC",
+ "TMIx_BEATS_RD_LSC_EXT",
+ "TMIx_BEATS_RD_TEX",
+ "TMIx_BEATS_RD_TEX_EXT",
+ "TMIx_BEATS_RD_OTHER",
+ "TMIx_BEATS_WR_LSC",
+ "TMIx_BEATS_WR_TIB",
+ "",
+
+ /* Performance counters for the Memory System */
+ "",
+ "",
+ "",
+ "",
+ "TMIx_MMU_REQUESTS",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "TMIx_L2_RD_MSG_IN",
+ "TMIx_L2_RD_MSG_IN_STALL",
+ "TMIx_L2_WR_MSG_IN",
+ "TMIx_L2_WR_MSG_IN_STALL",
+ "TMIx_L2_SNP_MSG_IN",
+ "TMIx_L2_SNP_MSG_IN_STALL",
+ "TMIx_L2_RD_MSG_OUT",
+ "TMIx_L2_RD_MSG_OUT_STALL",
+ "TMIx_L2_WR_MSG_OUT",
+ "TMIx_L2_ANY_LOOKUP",
+ "TMIx_L2_READ_LOOKUP",
+ "TMIx_L2_WRITE_LOOKUP",
+ "TMIx_L2_EXT_SNOOP_LOOKUP",
+ "TMIx_L2_EXT_READ",
+ "TMIx_L2_EXT_READ_NOSNP",
+ "TMIx_L2_EXT_READ_UNIQUE",
+ "TMIx_L2_EXT_READ_BEATS",
+ "TMIx_L2_EXT_AR_STALL",
+ "TMIx_L2_EXT_AR_CNT_Q1",
+ "TMIx_L2_EXT_AR_CNT_Q2",
+ "TMIx_L2_EXT_AR_CNT_Q3",
+ "TMIx_L2_EXT_RRESP_0_127",
+ "TMIx_L2_EXT_RRESP_128_191",
+ "TMIx_L2_EXT_RRESP_192_255",
+ "TMIx_L2_EXT_RRESP_256_319",
+ "TMIx_L2_EXT_RRESP_320_383",
+ "TMIx_L2_EXT_WRITE",
+ "TMIx_L2_EXT_WRITE_NOSNP_FULL",
+ "TMIx_L2_EXT_WRITE_NOSNP_PTL",
+ "TMIx_L2_EXT_WRITE_SNP_FULL",
+ "TMIx_L2_EXT_WRITE_SNP_PTL",
+ "TMIx_L2_EXT_WRITE_BEATS",
+ "TMIx_L2_EXT_W_STALL",
+ "TMIx_L2_EXT_AW_CNT_Q1",
+ "TMIx_L2_EXT_AW_CNT_Q2",
+ "TMIx_L2_EXT_AW_CNT_Q3",
+ "TMIx_L2_EXT_SNOOP",
+ "TMIx_L2_EXT_SNOOP_STALL",
+ "TMIx_L2_EXT_SNOOP_RESP_CLEAN",
+ "TMIx_L2_EXT_SNOOP_RESP_DATA",
+ "TMIx_L2_EXT_SNOOP_INTERNAL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TMIX_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names_tsix.h b/drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names_tsix.h
new file mode 100644
index 000000000000..fb6a1437a1f6
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_gator_hwcnt_names_tsix.h
@@ -0,0 +1,291 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TSIX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TSIX_H_
+
+static const char * const hardware_counters_mali_tSIx[] = {
+ /* Performance counters for the Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "TSIx_MESSAGES_SENT",
+ "TSIx_MESSAGES_RECEIVED",
+ "TSIx_GPU_ACTIVE",
+ "TSIx_IRQ_ACTIVE",
+ "TSIx_JS0_JOBS",
+ "TSIx_JS0_TASKS",
+ "TSIx_JS0_ACTIVE",
+ "",
+ "TSIx_JS0_WAIT_READ",
+ "TSIx_JS0_WAIT_ISSUE",
+ "TSIx_JS0_WAIT_DEPEND",
+ "TSIx_JS0_WAIT_FINISH",
+ "TSIx_JS1_JOBS",
+ "TSIx_JS1_TASKS",
+ "TSIx_JS1_ACTIVE",
+ "",
+ "TSIx_JS1_WAIT_READ",
+ "TSIx_JS1_WAIT_ISSUE",
+ "TSIx_JS1_WAIT_DEPEND",
+ "TSIx_JS1_WAIT_FINISH",
+ "TSIx_JS2_JOBS",
+ "TSIx_JS2_TASKS",
+ "TSIx_JS2_ACTIVE",
+ "",
+ "TSIx_JS2_WAIT_READ",
+ "TSIx_JS2_WAIT_ISSUE",
+ "TSIx_JS2_WAIT_DEPEND",
+ "TSIx_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Performance counters for the Tiler */
+ "",
+ "",
+ "",
+ "",
+ "TSIx_TILER_ACTIVE",
+ "TSIx_JOBS_PROCESSED",
+ "TSIx_TRIANGLES",
+ "TSIx_LINES",
+ "TSIx_POINTS",
+ "TSIx_FRONT_FACING",
+ "TSIx_BACK_FACING",
+ "TSIx_PRIM_VISIBLE",
+ "TSIx_PRIM_CULLED",
+ "TSIx_PRIM_CLIPPED",
+ "TSIx_PRIM_SAT_CULLED",
+ "TSIx_BIN_ALLOC_INIT",
+ "TSIx_BIN_ALLOC_OVERFLOW",
+ "TSIx_BUS_READ",
+ "",
+ "TSIx_BUS_WRITE",
+ "TSIx_LOADING_DESC",
+ "TSIx_IDVS_POS_SHAD_REQ",
+ "TSIx_IDVS_POS_SHAD_WAIT",
+ "TSIx_IDVS_POS_SHAD_STALL",
+ "TSIx_IDVS_POS_FIFO_FULL",
+ "TSIx_PREFETCH_STALL",
+ "TSIx_VCACHE_HIT",
+ "TSIx_VCACHE_MISS",
+ "TSIx_VCACHE_LINE_WAIT",
+ "TSIx_VFETCH_POS_READ_WAIT",
+ "TSIx_VFETCH_VERTEX_WAIT",
+ "TSIx_VFETCH_STALL",
+ "TSIx_PRIMASSY_STALL",
+ "TSIx_BBOX_GEN_STALL",
+ "TSIx_IDVS_VBU_HIT",
+ "TSIx_IDVS_VBU_MISS",
+ "TSIx_IDVS_VBU_LINE_DEALLOCATE",
+ "TSIx_IDVS_VAR_SHAD_REQ",
+ "TSIx_IDVS_VAR_SHAD_STALL",
+ "TSIx_BINNER_STALL",
+ "TSIx_ITER_STALL",
+ "TSIx_COMPRESS_MISS",
+ "TSIx_COMPRESS_STALL",
+ "TSIx_PCACHE_HIT",
+ "TSIx_PCACHE_MISS",
+ "TSIx_PCACHE_MISS_STALL",
+ "TSIx_PCACHE_EVICT_STALL",
+ "TSIx_PMGR_PTR_WR_STALL",
+ "TSIx_PMGR_PTR_RD_STALL",
+ "TSIx_PMGR_CMD_WR_STALL",
+ "TSIx_WRBUF_ACTIVE",
+ "TSIx_WRBUF_HIT",
+ "TSIx_WRBUF_MISS",
+ "TSIx_WRBUF_NO_FREE_LINE_STALL",
+ "TSIx_WRBUF_NO_AXI_ID_STALL",
+ "TSIx_WRBUF_AXI_STALL",
+ "",
+ "",
+ "",
+ "TSIx_UTLB_TRANS",
+ "TSIx_UTLB_TRANS_HIT",
+ "TSIx_UTLB_TRANS_STALL",
+ "TSIx_UTLB_TRANS_MISS_DELAY",
+ "TSIx_UTLB_MMU_REQ",
+
+ /* Performance counters for the Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "TSIx_FRAG_ACTIVE",
+ "TSIx_FRAG_PRIMITIVES",
+ "TSIx_FRAG_PRIM_RAST",
+ "TSIx_FRAG_FPK_ACTIVE",
+ "TSIx_FRAG_STARVING",
+ "TSIx_FRAG_WARPS",
+ "TSIx_FRAG_PARTIAL_WARPS",
+ "TSIx_FRAG_QUADS_RAST",
+ "TSIx_FRAG_QUADS_EZS_TEST",
+ "TSIx_FRAG_QUADS_EZS_UPDATE",
+ "TSIx_FRAG_QUADS_EZS_KILL",
+ "TSIx_FRAG_LZS_TEST",
+ "TSIx_FRAG_LZS_KILL",
+ "",
+ "TSIx_FRAG_PTILES",
+ "TSIx_FRAG_TRANS_ELIM",
+ "TSIx_QUAD_FPK_KILLER",
+ "",
+ "TSIx_COMPUTE_ACTIVE",
+ "TSIx_COMPUTE_TASKS",
+ "TSIx_COMPUTE_WARPS",
+ "TSIx_COMPUTE_STARVING",
+ "TSIx_EXEC_CORE_ACTIVE",
+ "TSIx_EXEC_ACTIVE",
+ "TSIx_EXEC_INSTR_COUNT",
+ "TSIx_EXEC_INSTR_DIVERGED",
+ "TSIx_EXEC_INSTR_STARVING",
+ "TSIx_ARITH_INSTR_SINGLE_FMA",
+ "TSIx_ARITH_INSTR_DOUBLE",
+ "TSIx_ARITH_INSTR_MSG",
+ "TSIx_ARITH_INSTR_MSG_ONLY",
+ "TSIx_TEX_MSGI_NUM_QUADS",
+ "TSIx_TEX_DFCH_NUM_PASSES",
+ "TSIx_TEX_DFCH_NUM_PASSES_MISS",
+ "TSIx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+ "TSIx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+ "TSIx_TEX_TFCH_NUM_LINES_FETCHED",
+ "TSIx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+ "TSIx_TEX_TFCH_NUM_OPERATIONS",
+ "TSIx_TEX_FILT_NUM_OPERATIONS",
+ "TSIx_LS_MEM_READ_FULL",
+ "TSIx_LS_MEM_READ_SHORT",
+ "TSIx_LS_MEM_WRITE_FULL",
+ "TSIx_LS_MEM_WRITE_SHORT",
+ "TSIx_LS_MEM_ATOMIC",
+ "TSIx_VARY_INSTR",
+ "TSIx_VARY_SLOT_32",
+ "TSIx_VARY_SLOT_16",
+ "TSIx_ATTR_INSTR",
+ "TSIx_ARITH_INSTR_FP_MUL",
+ "TSIx_BEATS_RD_FTC",
+ "TSIx_BEATS_RD_FTC_EXT",
+ "TSIx_BEATS_RD_LSC",
+ "TSIx_BEATS_RD_LSC_EXT",
+ "TSIx_BEATS_RD_TEX",
+ "TSIx_BEATS_RD_TEX_EXT",
+ "TSIx_BEATS_RD_OTHER",
+ "TSIx_BEATS_WR_LSC",
+ "TSIx_BEATS_WR_TIB",
+ "",
+
+ /* Performance counters for the Memory System */
+ "",
+ "",
+ "",
+ "",
+ "TSIx_MMU_REQUESTS",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "TSIx_L2_RD_MSG_IN",
+ "TSIx_L2_RD_MSG_IN_STALL",
+ "TSIx_L2_WR_MSG_IN",
+ "TSIx_L2_WR_MSG_IN_STALL",
+ "TSIx_L2_SNP_MSG_IN",
+ "TSIx_L2_SNP_MSG_IN_STALL",
+ "TSIx_L2_RD_MSG_OUT",
+ "TSIx_L2_RD_MSG_OUT_STALL",
+ "TSIx_L2_WR_MSG_OUT",
+ "TSIx_L2_ANY_LOOKUP",
+ "TSIx_L2_READ_LOOKUP",
+ "TSIx_L2_WRITE_LOOKUP",
+ "TSIx_L2_EXT_SNOOP_LOOKUP",
+ "TSIx_L2_EXT_READ",
+ "TSIx_L2_EXT_READ_NOSNP",
+ "TSIx_L2_EXT_READ_UNIQUE",
+ "TSIx_L2_EXT_READ_BEATS",
+ "TSIx_L2_EXT_AR_STALL",
+ "TSIx_L2_EXT_AR_CNT_Q1",
+ "TSIx_L2_EXT_AR_CNT_Q2",
+ "TSIx_L2_EXT_AR_CNT_Q3",
+ "TSIx_L2_EXT_RRESP_0_127",
+ "TSIx_L2_EXT_RRESP_128_191",
+ "TSIx_L2_EXT_RRESP_192_255",
+ "TSIx_L2_EXT_RRESP_256_319",
+ "TSIx_L2_EXT_RRESP_320_383",
+ "TSIx_L2_EXT_WRITE",
+ "TSIx_L2_EXT_WRITE_NOSNP_FULL",
+ "TSIx_L2_EXT_WRITE_NOSNP_PTL",
+ "TSIx_L2_EXT_WRITE_SNP_FULL",
+ "TSIx_L2_EXT_WRITE_SNP_PTL",
+ "TSIx_L2_EXT_WRITE_BEATS",
+ "TSIx_L2_EXT_W_STALL",
+ "TSIx_L2_EXT_AW_CNT_Q1",
+ "TSIx_L2_EXT_AW_CNT_Q2",
+ "TSIx_L2_EXT_AW_CNT_Q3",
+ "TSIx_L2_EXT_SNOOP",
+ "TSIx_L2_EXT_SNOOP_STALL",
+ "TSIx_L2_EXT_SNOOP_RESP_CLEAN",
+ "TSIx_L2_EXT_SNOOP_RESP_DATA",
+ "TSIx_L2_EXT_SNOOP_INTERNAL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TSIX_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_gpu_id.h b/drivers/gpu/arm_gpu/mali_kbase_gpu_id.h
new file mode 100644
index 000000000000..9763f9673c8f
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_gpu_id.h
@@ -0,0 +1,129 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+#ifndef _KBASE_GPU_ID_H_
+#define _KBASE_GPU_ID_H_
+
+/* GPU_ID register */
+#define GPU_ID_VERSION_STATUS_SHIFT 0
+#define GPU_ID_VERSION_MINOR_SHIFT 4
+#define GPU_ID_VERSION_MAJOR_SHIFT 12
+#define GPU_ID_VERSION_PRODUCT_ID_SHIFT 16
+#define GPU_ID_VERSION_STATUS (0xF << GPU_ID_VERSION_STATUS_SHIFT)
+#define GPU_ID_VERSION_MINOR (0xFF << GPU_ID_VERSION_MINOR_SHIFT)
+#define GPU_ID_VERSION_MAJOR (0xF << GPU_ID_VERSION_MAJOR_SHIFT)
+#define GPU_ID_VERSION_PRODUCT_ID (0xFFFF << GPU_ID_VERSION_PRODUCT_ID_SHIFT)
+
+/* Values for GPU_ID_VERSION_PRODUCT_ID bitfield */
+#define GPU_ID_PI_T60X 0x6956
+#define GPU_ID_PI_T62X 0x0620
+#define GPU_ID_PI_T76X 0x0750
+#define GPU_ID_PI_T72X 0x0720
+#define GPU_ID_PI_TFRX 0x0880
+#define GPU_ID_PI_T86X 0x0860
+#define GPU_ID_PI_T82X 0x0820
+#define GPU_ID_PI_T83X 0x0830
+
+/* New GPU ID format when PRODUCT_ID is >= 0x1000 (and not 0x6956) */
+#define GPU_ID_PI_NEW_FORMAT_START 0x1000
+#define GPU_ID_IS_NEW_FORMAT(product_id) ((product_id) != GPU_ID_PI_T60X && \
+ (product_id) >= \
+ GPU_ID_PI_NEW_FORMAT_START)
+
+#define GPU_ID2_VERSION_STATUS_SHIFT 0
+#define GPU_ID2_VERSION_MINOR_SHIFT 4
+#define GPU_ID2_VERSION_MAJOR_SHIFT 12
+#define GPU_ID2_PRODUCT_MAJOR_SHIFT 16
+#define GPU_ID2_ARCH_REV_SHIFT 20
+#define GPU_ID2_ARCH_MINOR_SHIFT 24
+#define GPU_ID2_ARCH_MAJOR_SHIFT 28
+#define GPU_ID2_VERSION_STATUS (0xF << GPU_ID2_VERSION_STATUS_SHIFT)
+#define GPU_ID2_VERSION_MINOR (0xFF << GPU_ID2_VERSION_MINOR_SHIFT)
+#define GPU_ID2_VERSION_MAJOR (0xF << GPU_ID2_VERSION_MAJOR_SHIFT)
+#define GPU_ID2_PRODUCT_MAJOR (0xF << GPU_ID2_PRODUCT_MAJOR_SHIFT)
+#define GPU_ID2_ARCH_REV (0xF << GPU_ID2_ARCH_REV_SHIFT)
+#define GPU_ID2_ARCH_MINOR (0xF << GPU_ID2_ARCH_MINOR_SHIFT)
+#define GPU_ID2_ARCH_MAJOR (0xF << GPU_ID2_ARCH_MAJOR_SHIFT)
+#define GPU_ID2_PRODUCT_MODEL (GPU_ID2_ARCH_MAJOR | GPU_ID2_PRODUCT_MAJOR)
+#define GPU_ID2_VERSION (GPU_ID2_VERSION_MAJOR | \
+ GPU_ID2_VERSION_MINOR | \
+ GPU_ID2_VERSION_STATUS)
+
+/* Helper macro to create a partial GPU_ID (new format) that defines
+ a product ignoring its version. */
+#define GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, product_major) \
+ (((arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
+ ((arch_minor) << GPU_ID2_ARCH_MINOR_SHIFT) | \
+ ((arch_rev) << GPU_ID2_ARCH_REV_SHIFT) | \
+ ((product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
+
+/* Helper macro to create a partial GPU_ID (new format) that specifies the
+ revision (major, minor, status) of a product */
+#define GPU_ID2_VERSION_MAKE(version_major, version_minor, version_status) \
+ (((version_major) << GPU_ID2_VERSION_MAJOR_SHIFT) | \
+ ((version_minor) << GPU_ID2_VERSION_MINOR_SHIFT) | \
+ ((version_status) << GPU_ID2_VERSION_STATUS_SHIFT))
+
+/* Helper macro to create a complete GPU_ID (new format) */
+#define GPU_ID2_MAKE(arch_major, arch_minor, arch_rev, product_major, \
+ version_major, version_minor, version_status) \
+ (GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, \
+ product_major) | \
+ GPU_ID2_VERSION_MAKE(version_major, version_minor, \
+ version_status))
+
+/* Helper macro to create a partial GPU_ID (new format) that identifies
+ a particular GPU model by its arch_major and product_major. */
+#define GPU_ID2_MODEL_MAKE(arch_major, product_major) \
+ (((arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
+ ((product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
+
+/* Strip off the non-relevant bits from a product_id value and make it suitable
+ for comparison against the GPU_ID2_PRODUCT_xxx values which identify a GPU
+ model. */
+#define GPU_ID2_MODEL_MATCH_VALUE(product_id) \
+ (((product_id) << GPU_ID2_PRODUCT_MAJOR_SHIFT) & \
+ GPU_ID2_PRODUCT_MODEL)
+
+#define GPU_ID2_PRODUCT_TMIX GPU_ID2_MODEL_MAKE(6u, 0)
+#define GPU_ID2_PRODUCT_THEX GPU_ID2_MODEL_MAKE(6u, 1)
+#define GPU_ID2_PRODUCT_TSIX GPU_ID2_MODEL_MAKE(7u, 0)
+#ifdef MALI_INCLUDE_TDVX
+#define GPU_ID2_PRODUCT_TDVX GPU_ID2_MODEL_MAKE(7u, 3)
+#endif /* MALI_INCLUDE_TDVX */
+#ifdef MALI_INCLUDE_TGOX
+#define GPU_ID2_PRODUCT_TGOX GPU_ID2_MODEL_MAKE(7u, 2)
+#endif /* MALI_INCLUDE_TGOX */
+#ifdef MALI_INCLUDE_TKAX
+#define GPU_ID2_PRODUCT_TKAX GPU_ID2_MODEL_MAKE(9u, 0)
+#endif /* MALI_INCLUDE_TKAX */
+#ifdef MALI_INCLUDE_TTRX
+#define GPU_ID2_PRODUCT_TTRX GPU_ID2_MODEL_MAKE(10u, 0)
+#endif /* MALI_INCLUDE_TTRX */
+
+/* Values for GPU_ID_VERSION_STATUS field for PRODUCT_ID GPU_ID_PI_T60X */
+#define GPU_ID_S_15DEV0 0x1
+#define GPU_ID_S_EAC 0x2
+
+/* Helper macro to create a GPU_ID assuming valid values for id, major,
+ minor, status */
+#define GPU_ID_MAKE(id, major, minor, status) \
+ (((id) << GPU_ID_VERSION_PRODUCT_ID_SHIFT) | \
+ ((major) << GPU_ID_VERSION_MAJOR_SHIFT) | \
+ ((minor) << GPU_ID_VERSION_MINOR_SHIFT) | \
+ ((status) << GPU_ID_VERSION_STATUS_SHIFT))
+
+#endif /* _KBASE_GPU_ID_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_gpu_memory_debugfs.c b/drivers/gpu/arm_gpu/mali_kbase_gpu_memory_debugfs.c
new file mode 100644
index 000000000000..6df0a1cb1264
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_gpu_memory_debugfs.c
@@ -0,0 +1,97 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+
+#ifdef CONFIG_DEBUG_FS
+/** Show callback for the @c gpu_memory debugfs file.
+ *
+ * This function is called to get the contents of the @c gpu_memory debugfs
+ * file. This is a report of current gpu memory usage.
+ *
+ * @param sfile The debugfs entry
+ * @param data Data associated with the entry
+ *
+ * @return 0 if successfully prints data in debugfs entry file
+ * -1 if it encountered an error
+ */
+
+static int kbasep_gpu_memory_seq_show(struct seq_file *sfile, void *data)
+{
+ struct list_head *entry;
+ const struct list_head *kbdev_list;
+
+ kbdev_list = kbase_dev_list_get();
+ list_for_each(entry, kbdev_list) {
+ struct kbase_device *kbdev = NULL;
+ struct kbasep_kctx_list_element *element;
+
+ kbdev = list_entry(entry, struct kbase_device, entry);
+ /* output the total memory usage and cap for this device */
+ seq_printf(sfile, "%-16s %10u\n",
+ kbdev->devname,
+ atomic_read(&(kbdev->memdev.used_pages)));
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_for_each_entry(element, &kbdev->kctx_list, link) {
+ /* output the memory usage and cap for each kctx
+ * opened on this device */
+ seq_printf(sfile, " %s-0x%p %10u\n",
+ "kctx",
+ element->kctx,
+ atomic_read(&(element->kctx->used_pages)));
+ }
+ mutex_unlock(&kbdev->kctx_list_lock);
+ }
+ kbase_dev_list_put(kbdev_list);
+ return 0;
+}
+
+/*
+ * File operations related to debugfs entry for gpu_memory
+ */
+static int kbasep_gpu_memory_debugfs_open(struct inode *in, struct file *file)
+{
+ return single_open(file, kbasep_gpu_memory_seq_show , NULL);
+}
+
+static const struct file_operations kbasep_gpu_memory_debugfs_fops = {
+ .open = kbasep_gpu_memory_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * Initialize debugfs entry for gpu_memory
+ */
+void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev)
+{
+ debugfs_create_file("gpu_memory", S_IRUGO,
+ kbdev->mali_debugfs_directory, NULL,
+ &kbasep_gpu_memory_debugfs_fops);
+ return;
+}
+
+#else
+/*
+ * Stub functions for when debugfs is disabled
+ */
+void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev)
+{
+ return;
+}
+#endif
diff --git a/drivers/gpu/arm_gpu/mali_kbase_gpu_memory_debugfs.h b/drivers/gpu/arm_gpu/mali_kbase_gpu_memory_debugfs.h
new file mode 100644
index 000000000000..7045693eb910
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_gpu_memory_debugfs.h
@@ -0,0 +1,37 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_gpu_memory_debugfs.h
+ * Header file for gpu_memory entry in debugfs
+ *
+ */
+
+#ifndef _KBASE_GPU_MEMORY_DEBUGFS_H
+#define _KBASE_GPU_MEMORY_DEBUGFS_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+/**
+ * @brief Initialize gpu_memory debugfs entry
+ */
+void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev);
+
+#endif /*_KBASE_GPU_MEMORY_DEBUGFS_H*/
diff --git a/drivers/gpu/arm_gpu/mali_kbase_gpuprops.c b/drivers/gpu/arm_gpu/mali_kbase_gpuprops.c
new file mode 100644
index 000000000000..baf3c491c719
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_gpuprops.c
@@ -0,0 +1,514 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Base kernel property query APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_gpuprops.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_kbase_hwaccess_gpuprops.h>
+#include "mali_kbase_ioctl.h"
+#include <linux/clk.h>
+
+/**
+ * KBASE_UBFX32 - Extracts bits from a 32-bit bitfield.
+ * @value: The value from which to extract bits.
+ * @offset: The first bit to extract (0 being the LSB).
+ * @size: The number of bits to extract.
+ *
+ * Context: @offset + @size <= 32.
+ *
+ * Return: Bits [@offset, @offset + @size) from @value.
+ */
+/* from mali_cdsb.h */
+#define KBASE_UBFX32(value, offset, size) \
+ (((u32)(value) >> (u32)(offset)) & (u32)((1ULL << (u32)(size)) - 1))
+
+int kbase_gpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_gpuprops * const kbase_props)
+{
+ kbase_gpu_clk_speed_func get_gpu_speed_mhz;
+ u32 gpu_speed_mhz;
+ int rc = 1;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != kbase_props);
+
+ /* Current GPU speed is requested from the system integrator via the GPU_SPEED_FUNC function.
+ * If that function fails, or the function is not provided by the system integrator, we report the maximum
+ * GPU speed as specified by GPU_FREQ_KHZ_MAX.
+ */
+ get_gpu_speed_mhz = (kbase_gpu_clk_speed_func) GPU_SPEED_FUNC;
+ if (get_gpu_speed_mhz != NULL) {
+ rc = get_gpu_speed_mhz(&gpu_speed_mhz);
+#ifdef CONFIG_MALI_DEBUG
+ /* Issue a warning message when the reported GPU speed falls outside the min/max range */
+ if (rc == 0) {
+ u32 gpu_speed_khz = gpu_speed_mhz * 1000;
+
+ if (gpu_speed_khz < kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min ||
+ gpu_speed_khz > kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max)
+ dev_warn(kctx->kbdev->dev, "GPU Speed is outside of min/max range (got %lu Khz, min %lu Khz, max %lu Khz)\n",
+ (unsigned long)gpu_speed_khz,
+ (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min,
+ (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max);
+ }
+#endif /* CONFIG_MALI_DEBUG */
+ }
+ if (kctx->kbdev->clock) {
+ gpu_speed_mhz = clk_get_rate(kctx->kbdev->clock) / 1000000;
+ rc = 0;
+ }
+ if (rc != 0)
+ gpu_speed_mhz = kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max / 1000;
+
+ kctx->kbdev->gpu_props.props.core_props.gpu_speed_mhz = gpu_speed_mhz;
+
+ memcpy(&kbase_props->props, &kctx->kbdev->gpu_props.props, sizeof(kbase_props->props));
+
+ /* Before API 8.2 they expect L3 cache info here, which was always 0 */
+ if (kctx->api_version < KBASE_API_VERSION(8, 2))
+ kbase_props->props.raw_props.suspend_size = 0;
+
+ return 0;
+}
+
+static void kbase_gpuprops_construct_coherent_groups(base_gpu_props * const props)
+{
+ struct mali_base_gpu_coherent_group *current_group;
+ u64 group_present;
+ u64 group_mask;
+ u64 first_set, first_set_prev;
+ u32 num_groups = 0;
+
+ KBASE_DEBUG_ASSERT(NULL != props);
+
+ props->coherency_info.coherency = props->raw_props.mem_features;
+ props->coherency_info.num_core_groups = hweight64(props->raw_props.l2_present);
+
+ if (props->coherency_info.coherency & GROUPS_L2_COHERENT) {
+ /* Group is l2 coherent */
+ group_present = props->raw_props.l2_present;
+ } else {
+ /* Group is l1 coherent */
+ group_present = props->raw_props.shader_present;
+ }
+
+ /*
+ * The coherent group mask can be computed from the l2 present
+ * register.
+ *
+ * For the coherent group n:
+ * group_mask[n] = (first_set[n] - 1) & ~(first_set[n-1] - 1)
+ * where first_set is group_present with only its nth set-bit kept
+ * (i.e. the position from where a new group starts).
+ *
+ * For instance if the groups are l2 coherent and l2_present=0x0..01111:
+ * The first mask is:
+ * group_mask[1] = (first_set[1] - 1) & ~(first_set[0] - 1)
+ * = (0x0..010 - 1) & ~(0x0..01 - 1)
+ * = 0x0..00f
+ * The second mask is:
+ * group_mask[2] = (first_set[2] - 1) & ~(first_set[1] - 1)
+ * = (0x0..100 - 1) & ~(0x0..010 - 1)
+ * = 0x0..0f0
+ * And so on until all the bits from group_present have been cleared
+ * (i.e. there is no group left).
+ */
+
+ current_group = props->coherency_info.group;
+ first_set = group_present & ~(group_present - 1);
+
+ while (group_present != 0 && num_groups < BASE_MAX_COHERENT_GROUPS) {
+ group_present -= first_set; /* Clear the current group bit */
+ first_set_prev = first_set;
+
+ first_set = group_present & ~(group_present - 1);
+ group_mask = (first_set - 1) & ~(first_set_prev - 1);
+
+ /* Populate the coherent_group structure for each group */
+ current_group->core_mask = group_mask & props->raw_props.shader_present;
+ current_group->num_cores = hweight64(current_group->core_mask);
+
+ num_groups++;
+ current_group++;
+ }
+
+ if (group_present != 0)
+ pr_warn("Too many coherent groups (keeping only %d groups).\n", BASE_MAX_COHERENT_GROUPS);
+
+ props->coherency_info.num_groups = num_groups;
+}
+
+/**
+ * kbase_gpuprops_get_props - Get the GPU configuration
+ * @gpu_props: The &base_gpu_props structure
+ * @kbdev: The &struct kbase_device structure for the device
+ *
+ * Fill the &base_gpu_props structure with values from the GPU configuration
+ * registers. Only the raw properties are filled in this function
+ */
+static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev)
+{
+ struct kbase_gpuprops_regdump regdump;
+ int i;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ KBASE_DEBUG_ASSERT(NULL != gpu_props);
+
+ /* Dump relevant registers */
+ kbase_backend_gpuprops_get(kbdev, &regdump);
+
+ gpu_props->raw_props.gpu_id = regdump.gpu_id;
+ gpu_props->raw_props.tiler_features = regdump.tiler_features;
+ gpu_props->raw_props.mem_features = regdump.mem_features;
+ gpu_props->raw_props.mmu_features = regdump.mmu_features;
+ gpu_props->raw_props.l2_features = regdump.l2_features;
+ gpu_props->raw_props.suspend_size = regdump.suspend_size;
+
+ gpu_props->raw_props.as_present = regdump.as_present;
+ gpu_props->raw_props.js_present = regdump.js_present;
+ gpu_props->raw_props.shader_present =
+ ((u64) regdump.shader_present_hi << 32) +
+ regdump.shader_present_lo;
+ gpu_props->raw_props.tiler_present =
+ ((u64) regdump.tiler_present_hi << 32) +
+ regdump.tiler_present_lo;
+ gpu_props->raw_props.l2_present =
+ ((u64) regdump.l2_present_hi << 32) +
+ regdump.l2_present_lo;
+#ifdef CONFIG_MALI_CORESTACK
+ gpu_props->raw_props.stack_present =
+ ((u64) regdump.stack_present_hi << 32) +
+ regdump.stack_present_lo;
+#else /* CONFIG_MALI_CORESTACK */
+ gpu_props->raw_props.stack_present = 0;
+#endif /* CONFIG_MALI_CORESTACK */
+
+ for (i = 0; i < GPU_MAX_JOB_SLOTS; i++)
+ gpu_props->raw_props.js_features[i] = regdump.js_features[i];
+
+ for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+ gpu_props->raw_props.texture_features[i] = regdump.texture_features[i];
+
+ gpu_props->raw_props.thread_max_barrier_size = regdump.thread_max_barrier_size;
+ gpu_props->raw_props.thread_max_threads = regdump.thread_max_threads;
+ gpu_props->raw_props.thread_max_workgroup_size = regdump.thread_max_workgroup_size;
+ gpu_props->raw_props.thread_features = regdump.thread_features;
+}
+
+void kbase_gpuprops_update_core_props_gpu_id(base_gpu_props * const gpu_props)
+{
+ gpu_props->core_props.version_status =
+ KBASE_UBFX32(gpu_props->raw_props.gpu_id, 0U, 4);
+ gpu_props->core_props.minor_revision =
+ KBASE_UBFX32(gpu_props->raw_props.gpu_id, 4U, 8);
+ gpu_props->core_props.major_revision =
+ KBASE_UBFX32(gpu_props->raw_props.gpu_id, 12U, 4);
+ gpu_props->core_props.product_id =
+ KBASE_UBFX32(gpu_props->raw_props.gpu_id, 16U, 16);
+}
+
+/**
+ * kbase_gpuprops_calculate_props - Calculate the derived properties
+ * @gpu_props: The &base_gpu_props structure
+ * @kbdev: The &struct kbase_device structure for the device
+ *
+ * Fill the &base_gpu_props structure with values derived from the GPU
+ * configuration registers
+ */
+static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev)
+{
+ int i;
+
+ /* Populate the base_gpu_props structure */
+ kbase_gpuprops_update_core_props_gpu_id(gpu_props);
+ gpu_props->core_props.log2_program_counter_size = KBASE_GPU_PC_SIZE_LOG2;
+ gpu_props->core_props.gpu_available_memory_size = totalram_pages << PAGE_SHIFT;
+
+ for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+ gpu_props->core_props.texture_features[i] = gpu_props->raw_props.texture_features[i];
+
+ gpu_props->l2_props.log2_line_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 0U, 8);
+ gpu_props->l2_props.log2_cache_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 16U, 8);
+
+ /* Field with number of l2 slices is added to MEM_FEATURES register
+ * since t76x. Below code assumes that for older GPU reserved bits will
+ * be read as zero. */
+ gpu_props->l2_props.num_l2_slices =
+ KBASE_UBFX32(gpu_props->raw_props.mem_features, 8U, 4) + 1;
+
+ gpu_props->tiler_props.bin_size_bytes = 1 << KBASE_UBFX32(gpu_props->raw_props.tiler_features, 0U, 6);
+ gpu_props->tiler_props.max_active_levels = KBASE_UBFX32(gpu_props->raw_props.tiler_features, 8U, 4);
+
+ if (gpu_props->raw_props.thread_max_threads == 0)
+ gpu_props->thread_props.max_threads = THREAD_MT_DEFAULT;
+ else
+ gpu_props->thread_props.max_threads = gpu_props->raw_props.thread_max_threads;
+
+ if (gpu_props->raw_props.thread_max_workgroup_size == 0)
+ gpu_props->thread_props.max_workgroup_size = THREAD_MWS_DEFAULT;
+ else
+ gpu_props->thread_props.max_workgroup_size = gpu_props->raw_props.thread_max_workgroup_size;
+
+ if (gpu_props->raw_props.thread_max_barrier_size == 0)
+ gpu_props->thread_props.max_barrier_size = THREAD_MBS_DEFAULT;
+ else
+ gpu_props->thread_props.max_barrier_size = gpu_props->raw_props.thread_max_barrier_size;
+
+ gpu_props->thread_props.max_registers = KBASE_UBFX32(gpu_props->raw_props.thread_features, 0U, 16);
+ gpu_props->thread_props.max_task_queue = KBASE_UBFX32(gpu_props->raw_props.thread_features, 16U, 8);
+ gpu_props->thread_props.max_thread_group_split = KBASE_UBFX32(gpu_props->raw_props.thread_features, 24U, 6);
+ gpu_props->thread_props.impl_tech = KBASE_UBFX32(gpu_props->raw_props.thread_features, 30U, 2);
+
+ /* If values are not specified, then use defaults */
+ if (gpu_props->thread_props.max_registers == 0) {
+ gpu_props->thread_props.max_registers = THREAD_MR_DEFAULT;
+ gpu_props->thread_props.max_task_queue = THREAD_MTQ_DEFAULT;
+ gpu_props->thread_props.max_thread_group_split = THREAD_MTGS_DEFAULT;
+ }
+ /* Initialize the coherent_group structure for each group */
+ kbase_gpuprops_construct_coherent_groups(gpu_props);
+}
+
+void kbase_gpuprops_set(struct kbase_device *kbdev)
+{
+ struct kbase_gpu_props *gpu_props;
+ struct gpu_raw_gpu_props *raw;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ gpu_props = &kbdev->gpu_props;
+ raw = &gpu_props->props.raw_props;
+
+ /* Initialize the base_gpu_props structure from the hardware */
+ kbase_gpuprops_get_props(&gpu_props->props, kbdev);
+
+ /* Populate the derived properties */
+ kbase_gpuprops_calculate_props(&gpu_props->props, kbdev);
+
+ /* Populate kbase-only fields */
+ gpu_props->l2_props.associativity = KBASE_UBFX32(raw->l2_features, 8U, 8);
+ gpu_props->l2_props.external_bus_width = KBASE_UBFX32(raw->l2_features, 24U, 8);
+
+ gpu_props->mem.core_group = KBASE_UBFX32(raw->mem_features, 0U, 1);
+
+ gpu_props->mmu.va_bits = KBASE_UBFX32(raw->mmu_features, 0U, 8);
+ gpu_props->mmu.pa_bits = KBASE_UBFX32(raw->mmu_features, 8U, 8);
+
+ gpu_props->num_cores = hweight64(raw->shader_present);
+ gpu_props->num_core_groups = hweight64(raw->l2_present);
+ gpu_props->num_address_spaces = hweight32(raw->as_present);
+ gpu_props->num_job_slots = hweight32(raw->js_present);
+}
+
+void kbase_gpuprops_set_features(struct kbase_device *kbdev)
+{
+ base_gpu_props *gpu_props;
+ struct kbase_gpuprops_regdump regdump;
+
+ gpu_props = &kbdev->gpu_props.props;
+
+ /* Dump relevant registers */
+ kbase_backend_gpuprops_get_features(kbdev, &regdump);
+
+ /*
+ * Copy the raw value from the register, later this will get turned
+ * into the selected coherency mode.
+ * Additionally, add non-coherent mode, as this is always supported.
+ */
+ gpu_props->raw_props.coherency_mode = regdump.coherency_features |
+ COHERENCY_FEATURE_BIT(COHERENCY_NONE);
+}
+
+static struct {
+ u32 type;
+ size_t offset;
+ int size;
+} gpu_property_mapping[] = {
+#define PROP(name, member) \
+ {KBASE_GPUPROP_ ## name, offsetof(struct mali_base_gpu_props, member), \
+ sizeof(((struct mali_base_gpu_props *)0)->member)}
+ PROP(PRODUCT_ID, core_props.product_id),
+ PROP(VERSION_STATUS, core_props.version_status),
+ PROP(MINOR_REVISION, core_props.minor_revision),
+ PROP(MAJOR_REVISION, core_props.major_revision),
+ PROP(GPU_SPEED_MHZ, core_props.gpu_speed_mhz),
+ PROP(GPU_FREQ_KHZ_MAX, core_props.gpu_freq_khz_max),
+ PROP(GPU_FREQ_KHZ_MIN, core_props.gpu_freq_khz_min),
+ PROP(LOG2_PROGRAM_COUNTER_SIZE, core_props.log2_program_counter_size),
+ PROP(TEXTURE_FEATURES_0, core_props.texture_features[0]),
+ PROP(TEXTURE_FEATURES_1, core_props.texture_features[1]),
+ PROP(TEXTURE_FEATURES_2, core_props.texture_features[2]),
+ PROP(GPU_AVAILABLE_MEMORY_SIZE, core_props.gpu_available_memory_size),
+
+ PROP(L2_LOG2_LINE_SIZE, l2_props.log2_line_size),
+ PROP(L2_LOG2_CACHE_SIZE, l2_props.log2_cache_size),
+ PROP(L2_NUM_L2_SLICES, l2_props.num_l2_slices),
+
+ PROP(TILER_BIN_SIZE_BYTES, tiler_props.bin_size_bytes),
+ PROP(TILER_MAX_ACTIVE_LEVELS, tiler_props.max_active_levels),
+
+ PROP(MAX_THREADS, thread_props.max_threads),
+ PROP(MAX_WORKGROUP_SIZE, thread_props.max_workgroup_size),
+ PROP(MAX_BARRIER_SIZE, thread_props.max_barrier_size),
+ PROP(MAX_REGISTERS, thread_props.max_registers),
+ PROP(MAX_TASK_QUEUE, thread_props.max_task_queue),
+ PROP(MAX_THREAD_GROUP_SPLIT, thread_props.max_thread_group_split),
+ PROP(IMPL_TECH, thread_props.impl_tech),
+
+ PROP(RAW_SHADER_PRESENT, raw_props.shader_present),
+ PROP(RAW_TILER_PRESENT, raw_props.tiler_present),
+ PROP(RAW_L2_PRESENT, raw_props.l2_present),
+ PROP(RAW_STACK_PRESENT, raw_props.stack_present),
+ PROP(RAW_L2_FEATURES, raw_props.l2_features),
+ PROP(RAW_SUSPEND_SIZE, raw_props.suspend_size),
+ PROP(RAW_MEM_FEATURES, raw_props.mem_features),
+ PROP(RAW_MMU_FEATURES, raw_props.mmu_features),
+ PROP(RAW_AS_PRESENT, raw_props.as_present),
+ PROP(RAW_JS_PRESENT, raw_props.js_present),
+ PROP(RAW_JS_FEATURES_0, raw_props.js_features[0]),
+ PROP(RAW_JS_FEATURES_1, raw_props.js_features[1]),
+ PROP(RAW_JS_FEATURES_2, raw_props.js_features[2]),
+ PROP(RAW_JS_FEATURES_3, raw_props.js_features[3]),
+ PROP(RAW_JS_FEATURES_4, raw_props.js_features[4]),
+ PROP(RAW_JS_FEATURES_5, raw_props.js_features[5]),
+ PROP(RAW_JS_FEATURES_6, raw_props.js_features[6]),
+ PROP(RAW_JS_FEATURES_7, raw_props.js_features[7]),
+ PROP(RAW_JS_FEATURES_8, raw_props.js_features[8]),
+ PROP(RAW_JS_FEATURES_9, raw_props.js_features[9]),
+ PROP(RAW_JS_FEATURES_10, raw_props.js_features[10]),
+ PROP(RAW_JS_FEATURES_11, raw_props.js_features[11]),
+ PROP(RAW_JS_FEATURES_12, raw_props.js_features[12]),
+ PROP(RAW_JS_FEATURES_13, raw_props.js_features[13]),
+ PROP(RAW_JS_FEATURES_14, raw_props.js_features[14]),
+ PROP(RAW_JS_FEATURES_15, raw_props.js_features[15]),
+ PROP(RAW_TILER_FEATURES, raw_props.tiler_features),
+ PROP(RAW_TEXTURE_FEATURES_0, raw_props.texture_features[0]),
+ PROP(RAW_TEXTURE_FEATURES_1, raw_props.texture_features[1]),
+ PROP(RAW_TEXTURE_FEATURES_2, raw_props.texture_features[2]),
+ PROP(RAW_GPU_ID, raw_props.gpu_id),
+ PROP(RAW_THREAD_MAX_THREADS, raw_props.thread_max_threads),
+ PROP(RAW_THREAD_MAX_WORKGROUP_SIZE,
+ raw_props.thread_max_workgroup_size),
+ PROP(RAW_THREAD_MAX_BARRIER_SIZE, raw_props.thread_max_barrier_size),
+ PROP(RAW_THREAD_FEATURES, raw_props.thread_features),
+ PROP(RAW_COHERENCY_MODE, raw_props.coherency_mode),
+
+ PROP(COHERENCY_NUM_GROUPS, coherency_info.num_groups),
+ PROP(COHERENCY_NUM_CORE_GROUPS, coherency_info.num_core_groups),
+ PROP(COHERENCY_COHERENCY, coherency_info.coherency),
+ PROP(COHERENCY_GROUP_0, coherency_info.group[0].core_mask),
+ PROP(COHERENCY_GROUP_1, coherency_info.group[1].core_mask),
+ PROP(COHERENCY_GROUP_2, coherency_info.group[2].core_mask),
+ PROP(COHERENCY_GROUP_3, coherency_info.group[3].core_mask),
+ PROP(COHERENCY_GROUP_4, coherency_info.group[4].core_mask),
+ PROP(COHERENCY_GROUP_5, coherency_info.group[5].core_mask),
+ PROP(COHERENCY_GROUP_6, coherency_info.group[6].core_mask),
+ PROP(COHERENCY_GROUP_7, coherency_info.group[7].core_mask),
+ PROP(COHERENCY_GROUP_8, coherency_info.group[8].core_mask),
+ PROP(COHERENCY_GROUP_9, coherency_info.group[9].core_mask),
+ PROP(COHERENCY_GROUP_10, coherency_info.group[10].core_mask),
+ PROP(COHERENCY_GROUP_11, coherency_info.group[11].core_mask),
+ PROP(COHERENCY_GROUP_12, coherency_info.group[12].core_mask),
+ PROP(COHERENCY_GROUP_13, coherency_info.group[13].core_mask),
+ PROP(COHERENCY_GROUP_14, coherency_info.group[14].core_mask),
+ PROP(COHERENCY_GROUP_15, coherency_info.group[15].core_mask),
+
+#undef PROP
+};
+
+int kbase_gpuprops_populate_user_buffer(struct kbase_device *kbdev)
+{
+ struct kbase_gpu_props *kprops = &kbdev->gpu_props;
+ struct mali_base_gpu_props *props = &kprops->props;
+ u32 count = ARRAY_SIZE(gpu_property_mapping);
+ u32 i;
+ u32 size = 0;
+ u8 *p;
+
+ for (i = 0; i < count; i++) {
+ /* 4 bytes for the ID, and the size of the property */
+ size += 4 + gpu_property_mapping[i].size;
+ }
+
+ kprops->prop_buffer_size = size;
+ kprops->prop_buffer = kmalloc(size, GFP_KERNEL);
+
+ if (!kprops->prop_buffer) {
+ kprops->prop_buffer_size = 0;
+ return -ENOMEM;
+ }
+
+ p = kprops->prop_buffer;
+
+#define WRITE_U8(v) (*p++ = (v) & 0xFF)
+#define WRITE_U16(v) do { WRITE_U8(v); WRITE_U8((v) >> 8); } while (0)
+#define WRITE_U32(v) do { WRITE_U16(v); WRITE_U16((v) >> 16); } while (0)
+#define WRITE_U64(v) do { WRITE_U32(v); WRITE_U32((v) >> 32); } while (0)
+
+ for (i = 0; i < count; i++) {
+ u32 type = gpu_property_mapping[i].type;
+ u8 type_size;
+ void *field = ((u8 *)props) + gpu_property_mapping[i].offset;
+
+ switch (gpu_property_mapping[i].size) {
+ case 1:
+ type_size = KBASE_GPUPROP_VALUE_SIZE_U8;
+ break;
+ case 2:
+ type_size = KBASE_GPUPROP_VALUE_SIZE_U16;
+ break;
+ case 4:
+ type_size = KBASE_GPUPROP_VALUE_SIZE_U32;
+ break;
+ case 8:
+ type_size = KBASE_GPUPROP_VALUE_SIZE_U64;
+ break;
+ default:
+ dev_err(kbdev->dev,
+ "Invalid gpu_property_mapping type=%d size=%d",
+ type, gpu_property_mapping[i].size);
+ return -EINVAL;
+ }
+
+ WRITE_U32((type<<2) | type_size);
+
+ switch (type_size) {
+ case KBASE_GPUPROP_VALUE_SIZE_U8:
+ WRITE_U8(*((u8 *)field));
+ break;
+ case KBASE_GPUPROP_VALUE_SIZE_U16:
+ WRITE_U16(*((u16 *)field));
+ break;
+ case KBASE_GPUPROP_VALUE_SIZE_U32:
+ WRITE_U32(*((u32 *)field));
+ break;
+ case KBASE_GPUPROP_VALUE_SIZE_U64:
+ WRITE_U64(*((u64 *)field));
+ break;
+ default: /* Cannot be reached */
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_gpuprops.h b/drivers/gpu/arm_gpu/mali_kbase_gpuprops.h
new file mode 100644
index 000000000000..57b3eaf9cd53
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_gpuprops.h
@@ -0,0 +1,84 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015,2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_gpuprops.h
+ * Base kernel property query APIs
+ */
+
+#ifndef _KBASE_GPUPROPS_H_
+#define _KBASE_GPUPROPS_H_
+
+#include "mali_kbase_gpuprops_types.h"
+
+/* Forward definition - see mali_kbase.h */
+struct kbase_device;
+
+/**
+ * @brief Set up Kbase GPU properties.
+ *
+ * Set up Kbase GPU properties with information from the GPU registers
+ *
+ * @param kbdev The struct kbase_device structure for the device
+ */
+void kbase_gpuprops_set(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpuprops_set_features - Set up Kbase GPU properties
+ * @kbdev: Device pointer
+ *
+ * This function sets up GPU properties that are dependent on the hardware
+ * features bitmask. This function must be preceeded by a call to
+ * kbase_hw_set_features_mask().
+ */
+void kbase_gpuprops_set_features(struct kbase_device *kbdev);
+
+/**
+ * @brief Provide GPU properties to userside through UKU call.
+ *
+ * Fill the struct kbase_uk_gpuprops with values from GPU configuration registers.
+ *
+ * @param kctx The struct kbase_context structure
+ * @param kbase_props A copy of the struct kbase_uk_gpuprops structure from userspace
+ *
+ * @return 0 on success. Any other value indicates failure.
+ */
+int kbase_gpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_gpuprops * const kbase_props);
+
+/**
+ * kbase_gpuprops_populate_user_buffer - Populate the GPU properties buffer
+ * @kbdev: The kbase device
+ *
+ * Fills kbdev->gpu_props->prop_buffer with the GPU properties for user
+ * space to read.
+ */
+int kbase_gpuprops_populate_user_buffer(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpuprops_update_core_props_gpu_id - break down gpu id value
+ * @gpu_props: the &base_gpu_props structure
+ *
+ * Break down gpu_id value stored in base_gpu_props::raw_props.gpu_id into
+ * separate fields (version_status, minor_revision, major_revision, product_id)
+ * stored in base_gpu_props::core_props.
+ */
+void kbase_gpuprops_update_core_props_gpu_id(base_gpu_props * const gpu_props);
+
+
+#endif /* _KBASE_GPUPROPS_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_gpuprops_types.h b/drivers/gpu/arm_gpu/mali_kbase_gpuprops_types.h
new file mode 100644
index 000000000000..10794fc27318
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_gpuprops_types.h
@@ -0,0 +1,92 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_gpuprops_types.h
+ * Base kernel property query APIs
+ */
+
+#ifndef _KBASE_GPUPROPS_TYPES_H_
+#define _KBASE_GPUPROPS_TYPES_H_
+
+#include "mali_base_kernel.h"
+
+#define KBASE_GPU_SPEED_MHZ 123
+#define KBASE_GPU_PC_SIZE_LOG2 24U
+
+struct kbase_gpuprops_regdump {
+ u32 gpu_id;
+ u32 l2_features;
+ u32 suspend_size; /* API 8.2+ */
+ u32 tiler_features;
+ u32 mem_features;
+ u32 mmu_features;
+ u32 as_present;
+ u32 js_present;
+ u32 thread_max_threads;
+ u32 thread_max_workgroup_size;
+ u32 thread_max_barrier_size;
+ u32 thread_features;
+ u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
+ u32 js_features[GPU_MAX_JOB_SLOTS];
+ u32 shader_present_lo;
+ u32 shader_present_hi;
+ u32 tiler_present_lo;
+ u32 tiler_present_hi;
+ u32 l2_present_lo;
+ u32 l2_present_hi;
+ u32 stack_present_lo;
+ u32 stack_present_hi;
+ u32 coherency_features;
+};
+
+struct kbase_gpu_cache_props {
+ u8 associativity;
+ u8 external_bus_width;
+};
+
+struct kbase_gpu_mem_props {
+ u8 core_group;
+};
+
+struct kbase_gpu_mmu_props {
+ u8 va_bits;
+ u8 pa_bits;
+};
+
+struct kbase_gpu_props {
+ /* kernel-only properties */
+ u8 num_cores;
+ u8 num_core_groups;
+ u8 num_address_spaces;
+ u8 num_job_slots;
+
+ struct kbase_gpu_cache_props l2_props;
+
+ struct kbase_gpu_mem_props mem;
+ struct kbase_gpu_mmu_props mmu;
+
+ /* Properties shared with userspace */
+ base_gpu_props props;
+
+ u32 prop_buffer_size;
+ void *prop_buffer;
+};
+
+#endif /* _KBASE_GPUPROPS_TYPES_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_hw.c b/drivers/gpu/arm_gpu/mali_kbase_hw.c
new file mode 100644
index 000000000000..bacb32ff661a
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_hw.c
@@ -0,0 +1,492 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Run-time work-arounds helpers
+ */
+
+#include <mali_base_hwconfig_features.h>
+#include <mali_base_hwconfig_issues.h>
+#include <mali_midg_regmap.h>
+#include "mali_kbase.h"
+#include "mali_kbase_hw.h"
+
+void kbase_hw_set_features_mask(struct kbase_device *kbdev)
+{
+ const enum base_hw_feature *features;
+ u32 gpu_id;
+ u32 product_id;
+
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ product_id = gpu_id & GPU_ID_VERSION_PRODUCT_ID;
+ product_id >>= GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+ if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+ switch (gpu_id & GPU_ID2_PRODUCT_MODEL) {
+ case GPU_ID2_PRODUCT_TMIX:
+ features = base_hw_features_tMIx;
+ break;
+ case GPU_ID2_PRODUCT_THEX:
+ features = base_hw_features_tHEx;
+ break;
+ case GPU_ID2_PRODUCT_TSIX:
+ features = base_hw_features_tSIx;
+ break;
+#ifdef MALI_INCLUDE_TDVX
+ case GPU_ID2_PRODUCT_TDVX:
+ features = base_hw_features_tDVx;
+ break;
+#endif /* MALI_INCLUDE_TDVX */
+#ifdef MALI_INCLUDE_TGOX
+ case GPU_ID2_PRODUCT_TGOX:
+ features = base_hw_features_tGOx;
+ break;
+#endif /* MALI_INCLUDE_TGOX */
+#ifdef MALI_INCLUDE_TKAX
+ case GPU_ID2_PRODUCT_TKAX:
+ features = base_hw_features_tKAx;
+ break;
+#endif /* MALI_INCLUDE_TKAX */
+#ifdef MALI_INCLUDE_TTRX
+ case GPU_ID2_PRODUCT_TTRX:
+ features = base_hw_features_tTRx;
+ break;
+#endif /* MALI_INCLUDE_TTRX */
+ default:
+ features = base_hw_features_generic;
+ break;
+ }
+ } else {
+ switch (product_id) {
+ case GPU_ID_PI_TFRX:
+ /* FALLTHROUGH */
+ case GPU_ID_PI_T86X:
+ features = base_hw_features_tFxx;
+ break;
+ case GPU_ID_PI_T83X:
+ features = base_hw_features_t83x;
+ break;
+ case GPU_ID_PI_T82X:
+ features = base_hw_features_t82x;
+ break;
+ case GPU_ID_PI_T76X:
+ features = base_hw_features_t76x;
+ break;
+ case GPU_ID_PI_T72X:
+ features = base_hw_features_t72x;
+ break;
+ case GPU_ID_PI_T62X:
+ features = base_hw_features_t62x;
+ break;
+ case GPU_ID_PI_T60X:
+ features = base_hw_features_t60x;
+ break;
+ default:
+ features = base_hw_features_generic;
+ break;
+ }
+ }
+
+ for (; *features != BASE_HW_FEATURE_END; features++)
+ set_bit(*features, &kbdev->hw_features_mask[0]);
+}
+
+/**
+ * kbase_hw_get_issues_for_new_id - Get the hardware issues for a new GPU ID
+ * @kbdev: Device pointer
+ *
+ * Return: pointer to an array of hardware issues, terminated by
+ * BASE_HW_ISSUE_END.
+ *
+ * This function can only be used on new-format GPU IDs, i.e. those for which
+ * GPU_ID_IS_NEW_FORMAT evaluates as true. The GPU ID is read from the @kbdev.
+ *
+ * In debugging versions of the driver, unknown versions of a known GPU will
+ * be treated as the most recent known version not later than the actual
+ * version. In such circumstances, the GPU ID in @kbdev will also be replaced
+ * with the most recent known version.
+ *
+ * Note: The GPU configuration must have been read by kbase_gpuprops_get_props()
+ * before calling this function.
+ */
+static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(
+ struct kbase_device *kbdev)
+{
+ const enum base_hw_issue *issues = NULL;
+
+ struct base_hw_product {
+ u32 product_model;
+ struct {
+ u32 version;
+ const enum base_hw_issue *issues;
+ } map[7];
+ };
+
+ static const struct base_hw_product base_hw_products[] = {
+ {GPU_ID2_PRODUCT_TMIX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 1),
+ base_hw_issues_tMIx_r0p0_05dev0},
+ {GPU_ID2_VERSION_MAKE(0, 0, 2), base_hw_issues_tMIx_r0p0},
+ {U32_MAX /* sentinel value */, NULL} } },
+
+ {GPU_ID2_PRODUCT_THEX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tHEx_r0p0},
+ {GPU_ID2_VERSION_MAKE(0, 0, 1), base_hw_issues_tHEx_r0p0},
+ {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tHEx_r0p1},
+ {U32_MAX, NULL} } },
+
+ {GPU_ID2_PRODUCT_TSIX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tSIx_r0p0},
+ {GPU_ID2_VERSION_MAKE(0, 0, 1), base_hw_issues_tSIx_r0p0},
+ {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tSIx_r0p1},
+ {GPU_ID2_VERSION_MAKE(1, 0, 0), base_hw_issues_tSIx_r1p0},
+ {U32_MAX, NULL} } },
+
+#ifdef MALI_INCLUDE_TDVX
+ {GPU_ID2_PRODUCT_TDVX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tDVx_r0p0},
+ {U32_MAX, NULL} } },
+#endif /* MALI_INCLUDE_TDVX */
+
+
+#ifdef MALI_INCLUDE_TGOX
+ {GPU_ID2_PRODUCT_TGOX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tGOx_r0p0},
+ {U32_MAX, NULL} } },
+#endif /* MALI_INCLUDE_TGOX */
+
+#ifdef MALI_INCLUDE_TKAX
+ {GPU_ID2_PRODUCT_TKAX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tKAx_r0p0},
+ {U32_MAX, NULL} } },
+#endif /* MALI_INCLUDE_TKAX */
+
+#ifdef MALI_INCLUDE_TTRX
+ {GPU_ID2_PRODUCT_TTRX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tTRx_r0p0},
+ {U32_MAX, NULL} } },
+#endif /* MALI_INCLUDE_TTRX */
+ };
+
+ u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ const u32 product_model = gpu_id & GPU_ID2_PRODUCT_MODEL;
+ const struct base_hw_product *product = NULL;
+ size_t p;
+
+ /* Stop when we reach the end of the products array. */
+ for (p = 0; p < ARRAY_SIZE(base_hw_products); ++p) {
+ if (product_model == base_hw_products[p].product_model) {
+ product = &base_hw_products[p];
+ break;
+ }
+ }
+
+ if (product != NULL) {
+ /* Found a matching product. */
+ const u32 version = gpu_id & GPU_ID2_VERSION;
+#if !MALI_CUSTOMER_RELEASE
+ u32 fallback_version = 0;
+ const enum base_hw_issue *fallback_issues = NULL;
+#endif
+ size_t v;
+
+ /* Stop when we reach the end of the map. */
+ for (v = 0; product->map[v].version != U32_MAX; ++v) {
+
+ if (version == product->map[v].version) {
+ /* Exact match so stop. */
+ issues = product->map[v].issues;
+ break;
+ }
+
+#if !MALI_CUSTOMER_RELEASE
+ /* Check whether this is a candidate for most recent
+ known version not later than the actual
+ version. */
+ if ((version > product->map[v].version) &&
+ (product->map[v].version >= fallback_version)) {
+ fallback_version = product->map[v].version;
+ fallback_issues = product->map[v].issues;
+ }
+#endif
+ }
+
+#if !MALI_CUSTOMER_RELEASE
+ if ((issues == NULL) && (fallback_issues != NULL)) {
+ /* Fall back to the issue set of the most recent known
+ version not later than the actual version. */
+ issues = fallback_issues;
+
+ dev_info(kbdev->dev,
+ "r%dp%d status %d is unknown; treating as r%dp%d status %d",
+ (gpu_id & GPU_ID2_VERSION_MAJOR) >>
+ GPU_ID2_VERSION_MAJOR_SHIFT,
+ (gpu_id & GPU_ID2_VERSION_MINOR) >>
+ GPU_ID2_VERSION_MINOR_SHIFT,
+ (gpu_id & GPU_ID2_VERSION_STATUS) >>
+ GPU_ID2_VERSION_STATUS_SHIFT,
+ (fallback_version & GPU_ID2_VERSION_MAJOR) >>
+ GPU_ID2_VERSION_MAJOR_SHIFT,
+ (fallback_version & GPU_ID2_VERSION_MINOR) >>
+ GPU_ID2_VERSION_MINOR_SHIFT,
+ (fallback_version & GPU_ID2_VERSION_STATUS) >>
+ GPU_ID2_VERSION_STATUS_SHIFT);
+
+ gpu_id &= ~GPU_ID2_VERSION;
+ gpu_id |= fallback_version;
+ kbdev->gpu_props.props.raw_props.gpu_id = gpu_id;
+
+ kbase_gpuprops_update_core_props_gpu_id(
+ &kbdev->gpu_props.props);
+ }
+#endif
+ }
+ return issues;
+}
+
+int kbase_hw_set_issues_mask(struct kbase_device *kbdev)
+{
+ const enum base_hw_issue *issues;
+ u32 gpu_id;
+ u32 product_id;
+ u32 impl_tech;
+
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ product_id = gpu_id & GPU_ID_VERSION_PRODUCT_ID;
+ product_id >>= GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+ impl_tech = kbdev->gpu_props.props.thread_props.impl_tech;
+
+ if (impl_tech != IMPLEMENTATION_MODEL) {
+ if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+ issues = kbase_hw_get_issues_for_new_id(kbdev);
+ if (issues == NULL) {
+ dev_err(kbdev->dev,
+ "Unknown GPU ID %x", gpu_id);
+ return -EINVAL;
+ }
+
+#if !MALI_CUSTOMER_RELEASE
+ /* The GPU ID might have been replaced with the last
+ known version of the same GPU. */
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+#endif
+
+ } else {
+ switch (gpu_id) {
+ case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_15DEV0):
+ issues = base_hw_issues_t60x_r0p0_15dev0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_EAC):
+ issues = base_hw_issues_t60x_r0p0_eac;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 1, 0):
+ issues = base_hw_issues_t60x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 0, 1, 0):
+ issues = base_hw_issues_t62x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 1):
+ issues = base_hw_issues_t62x_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 1, 0):
+ issues = base_hw_issues_t62x_r1p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 0, 1):
+ issues = base_hw_issues_t76x_r0p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 1):
+ issues = base_hw_issues_t76x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 9):
+ issues = base_hw_issues_t76x_r0p1_50rel0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 2, 1):
+ issues = base_hw_issues_t76x_r0p2;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 3, 1):
+ issues = base_hw_issues_t76x_r0p3;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T76X, 1, 0, 0):
+ issues = base_hw_issues_t76x_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 1):
+ case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 2):
+ issues = base_hw_issues_t72x_r0p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 0, 0):
+ issues = base_hw_issues_t72x_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 1, 0):
+ issues = base_hw_issues_t72x_r1p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 1, 2):
+ issues = base_hw_issues_tFRx_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 2, 0):
+ issues = base_hw_issues_tFRx_r0p2;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 1, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 1, 0, 8):
+ issues = base_hw_issues_tFRx_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 2, 0, 0):
+ issues = base_hw_issues_tFRx_r2p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T86X, 0, 2, 0):
+ issues = base_hw_issues_t86x_r0p2;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T86X, 1, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T86X, 1, 0, 8):
+ issues = base_hw_issues_t86x_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T86X, 2, 0, 0):
+ issues = base_hw_issues_t86x_r2p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T83X, 0, 1, 0):
+ issues = base_hw_issues_t83x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T83X, 1, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T83X, 1, 0, 8):
+ issues = base_hw_issues_t83x_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T82X, 0, 0, 0):
+ issues = base_hw_issues_t82x_r0p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T82X, 0, 1, 0):
+ issues = base_hw_issues_t82x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T82X, 1, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T82X, 1, 0, 8):
+ issues = base_hw_issues_t82x_r1p0;
+ break;
+ default:
+ dev_err(kbdev->dev,
+ "Unknown GPU ID %x", gpu_id);
+ return -EINVAL;
+ }
+ }
+ } else {
+ /* Software model */
+ if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+ switch (gpu_id & GPU_ID2_PRODUCT_MODEL) {
+ case GPU_ID2_PRODUCT_TMIX:
+ issues = base_hw_issues_model_tMIx;
+ break;
+ case GPU_ID2_PRODUCT_THEX:
+ issues = base_hw_issues_model_tHEx;
+ break;
+ case GPU_ID2_PRODUCT_TSIX:
+ issues = base_hw_issues_model_tSIx;
+ break;
+#ifdef MALI_INCLUDE_TDVX
+ case GPU_ID2_PRODUCT_TDVX:
+ issues = base_hw_issues_model_tDVx;
+ break;
+#endif /* MALI_INCLUDE_TNOX */
+#ifdef MALI_INCLUDE_TGOX
+ case GPU_ID2_PRODUCT_TGOX:
+ issues = base_hw_issues_model_tGOx;
+ break;
+#endif /* MALI_INCLUDE_TGOX */
+#ifdef MALI_INCLUDE_TKAX
+ case GPU_ID2_PRODUCT_TKAX:
+ issues = base_hw_issues_model_tKAx;
+ break;
+#endif /* MALI_INCLUDE_TKAX */
+#ifdef MALI_INCLUDE_TTRX
+ case GPU_ID2_PRODUCT_TTRX:
+ issues = base_hw_issues_model_tTRx;
+ break;
+#endif /* MALI_INCLUDE_TTRX */
+ default:
+ dev_err(kbdev->dev,
+ "Unknown GPU ID %x", gpu_id);
+ return -EINVAL;
+ }
+ } else {
+ switch (product_id) {
+ case GPU_ID_PI_T60X:
+ issues = base_hw_issues_model_t60x;
+ break;
+ case GPU_ID_PI_T62X:
+ issues = base_hw_issues_model_t62x;
+ break;
+ case GPU_ID_PI_T72X:
+ issues = base_hw_issues_model_t72x;
+ break;
+ case GPU_ID_PI_T76X:
+ issues = base_hw_issues_model_t76x;
+ break;
+ case GPU_ID_PI_TFRX:
+ issues = base_hw_issues_model_tFRx;
+ break;
+ case GPU_ID_PI_T86X:
+ issues = base_hw_issues_model_t86x;
+ break;
+ case GPU_ID_PI_T83X:
+ issues = base_hw_issues_model_t83x;
+ break;
+ case GPU_ID_PI_T82X:
+ issues = base_hw_issues_model_t82x;
+ break;
+ default:
+ dev_err(kbdev->dev, "Unknown GPU ID %x",
+ gpu_id);
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+ dev_info(kbdev->dev,
+ "GPU identified as 0x%x arch %d.%d.%d r%dp%d status %d",
+ (gpu_id & GPU_ID2_PRODUCT_MAJOR) >>
+ GPU_ID2_PRODUCT_MAJOR_SHIFT,
+ (gpu_id & GPU_ID2_ARCH_MAJOR) >>
+ GPU_ID2_ARCH_MAJOR_SHIFT,
+ (gpu_id & GPU_ID2_ARCH_MINOR) >>
+ GPU_ID2_ARCH_MINOR_SHIFT,
+ (gpu_id & GPU_ID2_ARCH_REV) >>
+ GPU_ID2_ARCH_REV_SHIFT,
+ (gpu_id & GPU_ID2_VERSION_MAJOR) >>
+ GPU_ID2_VERSION_MAJOR_SHIFT,
+ (gpu_id & GPU_ID2_VERSION_MINOR) >>
+ GPU_ID2_VERSION_MINOR_SHIFT,
+ (gpu_id & GPU_ID2_VERSION_STATUS) >>
+ GPU_ID2_VERSION_STATUS_SHIFT);
+ } else {
+ dev_info(kbdev->dev,
+ "GPU identified as 0x%04x r%dp%d status %d",
+ (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
+ GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ (gpu_id & GPU_ID_VERSION_MAJOR) >>
+ GPU_ID_VERSION_MAJOR_SHIFT,
+ (gpu_id & GPU_ID_VERSION_MINOR) >>
+ GPU_ID_VERSION_MINOR_SHIFT,
+ (gpu_id & GPU_ID_VERSION_STATUS) >>
+ GPU_ID_VERSION_STATUS_SHIFT);
+ }
+
+ for (; *issues != BASE_HW_ISSUE_END; issues++)
+ set_bit(*issues, &kbdev->hw_issues_mask[0]);
+
+ return 0;
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_hw.h b/drivers/gpu/arm_gpu/mali_kbase_hw.h
new file mode 100644
index 000000000000..754250ce968d
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_hw.h
@@ -0,0 +1,65 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file
+ * Run-time work-arounds helpers
+ */
+
+#ifndef _KBASE_HW_H_
+#define _KBASE_HW_H_
+
+#include "mali_kbase_defs.h"
+
+/**
+ * @brief Tell whether a work-around should be enabled
+ */
+#define kbase_hw_has_issue(kbdev, issue)\
+ test_bit(issue, &(kbdev)->hw_issues_mask[0])
+
+/**
+ * @brief Tell whether a feature is supported
+ */
+#define kbase_hw_has_feature(kbdev, feature)\
+ test_bit(feature, &(kbdev)->hw_features_mask[0])
+
+/**
+ * kbase_hw_set_issues_mask - Set the hardware issues mask based on the GPU ID
+ * @kbdev: Device pointer
+ *
+ * Return: 0 if the GPU ID was recognized, otherwise -EINVAL.
+ *
+ * The GPU ID is read from the @kbdev.
+ *
+ * In debugging versions of the driver, unknown versions of a known GPU with a
+ * new-format ID will be treated as the most recent known version not later
+ * than the actual version. In such circumstances, the GPU ID in @kbdev will
+ * also be replaced with the most recent known version.
+ *
+ * Note: The GPU configuration must have been read by
+ * kbase_gpuprops_get_props() before calling this function.
+ */
+int kbase_hw_set_issues_mask(struct kbase_device *kbdev);
+
+/**
+ * @brief Set the features mask depending on the GPU ID
+ */
+void kbase_hw_set_features_mask(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HW_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_hwaccess_backend.h b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_backend.h
new file mode 100644
index 000000000000..b09be99e6b4e
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_backend.h
@@ -0,0 +1,54 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * HW access backend common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_BACKEND_H_
+#define _KBASE_HWACCESS_BACKEND_H_
+
+/**
+ * kbase_backend_early_init - Perform any backend-specific initialization.
+ * @kbdev: Device pointer
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int kbase_backend_early_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_late_init - Perform any backend-specific initialization.
+ * @kbdev: Device pointer
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int kbase_backend_late_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_early_term - Perform any backend-specific termination.
+ * @kbdev: Device pointer
+ */
+void kbase_backend_early_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_late_term - Perform any backend-specific termination.
+ * @kbdev: Device pointer
+ */
+void kbase_backend_late_term(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HWACCESS_BACKEND_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_hwaccess_defs.h b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_defs.h
new file mode 100644
index 000000000000..0acf297192fd
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_defs.h
@@ -0,0 +1,36 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/**
+ * @file mali_kbase_hwaccess_gpu_defs.h
+ * HW access common definitions
+ */
+
+#ifndef _KBASE_HWACCESS_DEFS_H_
+#define _KBASE_HWACCESS_DEFS_H_
+
+#include <mali_kbase_jm_defs.h>
+
+/* The hwaccess_lock (a spinlock) must be held when accessing this structure */
+struct kbase_hwaccess_data {
+ struct kbase_context *active_kctx;
+
+ struct kbase_backend_data backend;
+};
+
+#endif /* _KBASE_HWACCESS_DEFS_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_hwaccess_gpuprops.h b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_gpuprops.h
new file mode 100644
index 000000000000..cf8a8131c22e
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_gpuprops.h
@@ -0,0 +1,47 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/**
+ * Base kernel property query backend APIs
+ */
+
+#ifndef _KBASE_HWACCESS_GPUPROPS_H_
+#define _KBASE_HWACCESS_GPUPROPS_H_
+
+/**
+ * kbase_backend_gpuprops_get() - Fill @regdump with GPU properties read from
+ * GPU
+ * @kbdev: Device pointer
+ * @regdump: Pointer to struct kbase_gpuprops_regdump structure
+ */
+void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
+ struct kbase_gpuprops_regdump *regdump);
+
+/**
+ * kbase_backend_gpuprops_get - Fill @regdump with GPU properties read from GPU
+ * @kbdev: Device pointer
+ * @regdump: Pointer to struct kbase_gpuprops_regdump structure
+ *
+ * This function reads GPU properties that are dependent on the hardware
+ * features bitmask
+ */
+void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
+ struct kbase_gpuprops_regdump *regdump);
+
+
+#endif /* _KBASE_HWACCESS_GPUPROPS_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_hwaccess_instr.h b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_instr.h
new file mode 100644
index 000000000000..5de2b7535bb4
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_instr.h
@@ -0,0 +1,116 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * HW Access instrumentation common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_INSTR_H_
+#define _KBASE_HWACCESS_INSTR_H_
+
+#include <mali_kbase_instr_defs.h>
+
+/**
+ * kbase_instr_hwcnt_enable_internal - Enable HW counters collection
+ * @kbdev: Kbase device
+ * @kctx: Kbase context
+ * @setup: HW counter setup parameters
+ *
+ * Context: might sleep, waiting for reset to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ struct kbase_uk_hwcnt_setup *setup);
+
+/**
+ * kbase_instr_hwcnt_disable_internal - Disable HW counters collection
+ * @kctx: Kbase context
+ *
+ * Context: might sleep, waiting for an ongoing dump to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_hwcnt_request_dump() - Request HW counter dump from GPU
+ * @kctx: Kbase context
+ *
+ * Caller must either wait for kbase_instr_hwcnt_dump_complete() to return true,
+ * of call kbase_instr_hwcnt_wait_for_dump().
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_hwcnt_wait_for_dump() - Wait until pending HW counter dump has
+ * completed.
+ * @kctx: Kbase context
+ *
+ * Context: will sleep, waiting for dump to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_hwcnt_dump_complete - Tell whether the HW counters dump has
+ * completed
+ * @kctx: Kbase context
+ * @success: Set to true if successful
+ *
+ * Context: does not sleep.
+ *
+ * Return: true if the dump is complete
+ */
+bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx,
+ bool * const success);
+
+/**
+ * kbase_instr_hwcnt_clear() - Clear HW counters
+ * @kctx: Kbase context
+ *
+ * Context: might sleep, waiting for reset to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_clear(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_backend_init() - Initialise the instrumentation backend
+ * @kbdev: Kbase device
+ *
+ * This function should be called during driver initialization.
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_backend_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_instr_backend_init() - Terminate the instrumentation backend
+ * @kbdev: Kbase device
+ *
+ * This function should be called during driver termination.
+ */
+void kbase_instr_backend_term(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HWACCESS_INSTR_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_hwaccess_jm.h b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_jm.h
new file mode 100644
index 000000000000..750fda2cd81d
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_jm.h
@@ -0,0 +1,381 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * HW access job manager common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_JM_H_
+#define _KBASE_HWACCESS_JM_H_
+
+/**
+ * kbase_backend_run_atom() - Run an atom on the GPU
+ * @kbdev: Device pointer
+ * @atom: Atom to run
+ *
+ * Caller must hold the HW access lock
+ */
+void kbase_backend_run_atom(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+
+/**
+ * kbase_backend_slot_update - Update state based on slot ringbuffers
+ *
+ * @kbdev: Device pointer
+ *
+ * Inspect the jobs in the slot ringbuffers and update state.
+ *
+ * This will cause jobs to be submitted to hardware if they are unblocked
+ */
+void kbase_backend_slot_update(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_find_and_release_free_address_space() - Release a free AS
+ * @kbdev: Device pointer
+ * @kctx: Context pointer
+ *
+ * This function can evict an idle context from the runpool, freeing up the
+ * address space it was using.
+ *
+ * The address space is marked as in use. The caller must either assign a
+ * context using kbase_gpu_use_ctx(), or release it using
+ * kbase_ctx_sched_release()
+ *
+ * Return: Number of free address space, or KBASEP_AS_NR_INVALID if none
+ * available
+ */
+int kbase_backend_find_and_release_free_address_space(
+ struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * kbase_backend_use_ctx() - Activate a currently unscheduled context, using the
+ * provided address space.
+ * @kbdev: Device pointer
+ * @kctx: Context pointer. May be NULL
+ * @as_nr: Free address space to use
+ *
+ * kbase_gpu_next_job() will pull atoms from the active context.
+ *
+ * Return: true if successful, false if ASID not assigned.
+ */
+bool kbase_backend_use_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int as_nr);
+
+/**
+ * kbase_backend_use_ctx_sched() - Activate a context.
+ * @kbdev: Device pointer
+ * @kctx: Context pointer
+ *
+ * kbase_gpu_next_job() will pull atoms from the active context.
+ *
+ * The context must already be scheduled and assigned to an address space. If
+ * the context is not scheduled, then kbase_gpu_use_ctx() should be used
+ * instead.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if context is now active, false otherwise (ie if context does
+ * not have an address space assigned)
+ */
+bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
+ struct kbase_context *kctx);
+
+/**
+ * kbase_backend_release_ctx_irq - Release a context from the GPU. This will
+ * de-assign the assigned address space.
+ * @kbdev: Device pointer
+ * @kctx: Context pointer
+ *
+ * Caller must hold kbase_device->mmu_hw_mutex and hwaccess_lock
+ */
+void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
+ struct kbase_context *kctx);
+
+/**
+ * kbase_backend_release_ctx_noirq - Release a context from the GPU. This will
+ * de-assign the assigned address space.
+ * @kbdev: Device pointer
+ * @kctx: Context pointer
+ *
+ * Caller must hold kbase_device->mmu_hw_mutex
+ *
+ * This function must perform any operations that could not be performed in IRQ
+ * context by kbase_backend_release_ctx_irq().
+ */
+void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
+ struct kbase_context *kctx);
+
+/**
+ * kbase_backend_cacheclean - Perform a cache clean if the given atom requires
+ * one
+ * @kbdev: Device pointer
+ * @katom: Pointer to the failed atom
+ *
+ * On some GPUs, the GPU cache must be cleaned following a failed atom. This
+ * function performs a clean if it is required by @katom.
+ */
+void kbase_backend_cacheclean(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+
+
+/**
+ * kbase_backend_complete_wq() - Perform backend-specific actions required on
+ * completing an atom.
+ * @kbdev: Device pointer
+ * @katom: Pointer to the atom to complete
+ *
+ * This function should only be called from kbase_jd_done_worker() or
+ * js_return_worker().
+ *
+ * Return: true if atom has completed, false if atom should be re-submitted
+ */
+void kbase_backend_complete_wq(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+
+/**
+ * kbase_backend_complete_wq_post_sched - Perform backend-specific actions
+ * required on completing an atom, after
+ * any scheduling has taken place.
+ * @kbdev: Device pointer
+ * @core_req: Core requirements of atom
+ * @affinity: Affinity of atom
+ * @coreref_state: Coreref state of atom
+ *
+ * This function should only be called from kbase_jd_done_worker() or
+ * js_return_worker().
+ */
+void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
+ base_jd_core_req core_req, u64 affinity,
+ enum kbase_atom_coreref_state coreref_state);
+
+/**
+ * kbase_backend_reset() - The GPU is being reset. Cancel all jobs on the GPU
+ * and remove any others from the ringbuffers.
+ * @kbdev: Device pointer
+ * @end_timestamp: Timestamp of reset
+ */
+void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp);
+
+/**
+ * kbase_backend_inspect_head() - Return the atom currently at the head of slot
+ * @js
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ *
+ * Return : Atom currently at the head of slot @js, or NULL
+ */
+struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev,
+ int js);
+
+/**
+ * kbase_backend_inspect_tail - Return the atom currently at the tail of slot
+ * @js
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ *
+ * Return : Atom currently at the head of slot @js, or NULL
+ */
+struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
+ int js);
+
+/**
+ * kbase_backend_nr_atoms_on_slot() - Return the number of atoms currently on a
+ * slot.
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ *
+ * Return : Number of atoms currently on slot
+ */
+int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_backend_nr_atoms_submitted() - Return the number of atoms on a slot
+ * that are currently on the GPU.
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ *
+ * Return : Number of atoms currently on slot @js that are currently on the GPU.
+ */
+int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_backend_ctx_count_changed() - Number of contexts ready to submit jobs
+ * has changed.
+ * @kbdev: Device pointer
+ *
+ * Perform any required backend-specific actions (eg starting/stopping
+ * scheduling timers).
+ */
+void kbase_backend_ctx_count_changed(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timeouts_changed() - Job Scheduler timeouts have changed.
+ * @kbdev: Device pointer
+ *
+ * Perform any required backend-specific actions (eg updating timeouts of
+ * currently running atoms).
+ */
+void kbase_backend_timeouts_changed(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_slot_free() - Return the number of jobs that can be currently
+ * submitted to slot @js.
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ *
+ * Return : Number of jobs that can be submitted.
+ */
+int kbase_backend_slot_free(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_job_check_enter_disjoint - potentially leave disjoint state
+ * @kbdev: kbase device
+ * @target_katom: atom which is finishing
+ *
+ * Work out whether to leave disjoint state when finishing an atom that was
+ * originated by kbase_job_check_enter_disjoint().
+ */
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+ struct kbase_jd_atom *target_katom);
+
+/**
+ * kbase_backend_jm_kill_jobs_from_kctx - Kill all jobs that are currently
+ * running from a context
+ * @kctx: Context pointer
+ *
+ * This is used in response to a page fault to remove all jobs from the faulting
+ * context from the hardware.
+ */
+void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx);
+
+/**
+ * kbase_jm_wait_for_zero_jobs - Wait for context to have zero jobs running, and
+ * to be descheduled.
+ * @kctx: Context pointer
+ *
+ * This should be called following kbase_js_zap_context(), to ensure the context
+ * can be safely destroyed.
+ */
+void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx);
+
+/**
+ * kbase_backend_get_current_flush_id - Return the current flush ID
+ *
+ * @kbdev: Device pointer
+ *
+ * Return: the current flush ID to be recorded for each job chain
+ */
+u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev);
+
+#if KBASE_GPU_RESET_EN
+/**
+ * kbase_prepare_to_reset_gpu - Prepare for resetting the GPU.
+ * @kbdev: Device pointer
+ *
+ * This function just soft-stops all the slots to ensure that as many jobs as
+ * possible are saved.
+ *
+ * Return: a boolean which should be interpreted as follows:
+ * - true - Prepared for reset, kbase_reset_gpu should be called.
+ * - false - Another thread is performing a reset, kbase_reset_gpu should
+ * not be called.
+ */
+bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu - Reset the GPU
+ * @kbdev: Device pointer
+ *
+ * This function should be called after kbase_prepare_to_reset_gpu if it returns
+ * true. It should never be called without a corresponding call to
+ * kbase_prepare_to_reset_gpu.
+ *
+ * After this function is called (or not called if kbase_prepare_to_reset_gpu
+ * returned false), the caller should wait for kbdev->reset_waitq to be
+ * signalled to know when the reset has completed.
+ */
+void kbase_reset_gpu(struct kbase_device *kbdev);
+
+/**
+ * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU.
+ * @kbdev: Device pointer
+ *
+ * This function just soft-stops all the slots to ensure that as many jobs as
+ * possible are saved.
+ *
+ * Return: a boolean which should be interpreted as follows:
+ * - true - Prepared for reset, kbase_reset_gpu should be called.
+ * - false - Another thread is performing a reset, kbase_reset_gpu should
+ * not be called.
+ */
+bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_locked - Reset the GPU
+ * @kbdev: Device pointer
+ *
+ * This function should be called after kbase_prepare_to_reset_gpu if it
+ * returns true. It should never be called without a corresponding call to
+ * kbase_prepare_to_reset_gpu.
+ *
+ * After this function is called (or not called if kbase_prepare_to_reset_gpu
+ * returned false), the caller should wait for kbdev->reset_waitq to be
+ * signalled to know when the reset has completed.
+ */
+void kbase_reset_gpu_locked(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_silent - Reset the GPU silently
+ * @kbdev: Device pointer
+ *
+ * Reset the GPU without trying to cancel jobs and don't emit messages into
+ * the kernel log while doing the reset.
+ *
+ * This function should be used in cases where we are doing a controlled reset
+ * of the GPU as part of normal processing (e.g. exiting protected mode) where
+ * the driver will have ensured the scheduler has been idled and all other
+ * users of the GPU (e.g. instrumentation) have been suspended.
+ */
+void kbase_reset_gpu_silent(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_active - Reports if the GPU is being reset
+ * @kbdev: Device pointer
+ *
+ * Return: True if the GPU is in the process of being reset.
+ */
+bool kbase_reset_gpu_active(struct kbase_device *kbdev);
+#endif
+
+/**
+ * kbase_job_slot_hardstop - Hard-stop the specified job slot
+ * @kctx: The kbase context that contains the job(s) that should
+ * be hard-stopped
+ * @js: The job slot to hard-stop
+ * @target_katom: The job that should be hard-stopped (or NULL for all
+ * jobs from the context)
+ * Context:
+ * The job slot lock must be held when calling this function.
+ */
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+ struct kbase_jd_atom *target_katom);
+
+extern struct protected_mode_ops kbase_native_protected_ops;
+
+#endif /* _KBASE_HWACCESS_JM_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_hwaccess_pm.h b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_pm.h
new file mode 100644
index 000000000000..71c7d495c40a
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_pm.h
@@ -0,0 +1,209 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/**
+ * @file mali_kbase_hwaccess_pm.h
+ * HW access power manager common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_PM_H_
+#define _KBASE_HWACCESS_PM_H_
+
+#include <mali_midg_regmap.h>
+#include <linux/atomic.h>
+
+#include <mali_kbase_pm_defs.h>
+
+/* Forward definition - see mali_kbase.h */
+struct kbase_device;
+
+/* Functions common to all HW access backends */
+
+/**
+ * Initialize the power management framework.
+ *
+ * Must be called before any other power management function
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ *
+ * @return 0 if the power management framework was successfully
+ * initialized.
+ */
+int kbase_hwaccess_pm_init(struct kbase_device *kbdev);
+
+/**
+ * Terminate the power management framework.
+ *
+ * No power management functions may be called after this (except
+ * @ref kbase_pm_init)
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ */
+void kbase_hwaccess_pm_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_hwaccess_pm_powerup - Power up the GPU.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @flags: Flags to pass on to kbase_pm_init_hw
+ *
+ * Power up GPU after all modules have been initialized and interrupt handlers
+ * installed.
+ *
+ * Return: 0 if powerup was successful.
+ */
+int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
+ unsigned int flags);
+
+/**
+ * Halt the power management framework.
+ *
+ * Should ensure that no new interrupts are generated, but allow any currently
+ * running interrupt handlers to complete successfully. The GPU is forced off by
+ * the time this function returns, regardless of whether or not the active power
+ * policy asks for the GPU to be powered off.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ */
+void kbase_hwaccess_pm_halt(struct kbase_device *kbdev);
+
+/**
+ * Perform any backend-specific actions to suspend the GPU
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ */
+void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev);
+
+/**
+ * Perform any backend-specific actions to resume the GPU from a suspend
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ */
+void kbase_hwaccess_pm_resume(struct kbase_device *kbdev);
+
+/**
+ * Perform any required actions for activating the GPU. Called when the first
+ * context goes active.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ */
+void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev);
+
+/**
+ * Perform any required actions for idling the GPU. Called when the last
+ * context goes idle.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ */
+void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev);
+
+
+/**
+ * Set the debug core mask.
+ *
+ * This determines which cores the power manager is allowed to use.
+ *
+ * @param kbdev The kbase device structure for the device (must be a
+ * valid pointer)
+ * @param new_core_mask_js0 The core mask to use for job slot 0
+ * @param new_core_mask_js0 The core mask to use for job slot 1
+ * @param new_core_mask_js0 The core mask to use for job slot 2
+ */
+void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
+ u64 new_core_mask_js0, u64 new_core_mask_js1,
+ u64 new_core_mask_js2);
+
+
+/**
+ * Get the current policy.
+ *
+ * Returns the policy that is currently active.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ *
+ * @return The current policy
+ */
+const struct kbase_pm_ca_policy
+*kbase_pm_ca_get_policy(struct kbase_device *kbdev);
+
+/**
+ * Change the policy to the one specified.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ * @param policy The policy to change to (valid pointer returned from
+ * @ref kbase_pm_ca_list_policies)
+ */
+void kbase_pm_ca_set_policy(struct kbase_device *kbdev,
+ const struct kbase_pm_ca_policy *policy);
+
+/**
+ * Retrieve a static list of the available policies.
+ *
+ * @param[out] policies An array pointer to take the list of policies. This may
+ * be NULL. The contents of this array must not be
+ * modified.
+ *
+ * @return The number of policies
+ */
+int
+kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **policies);
+
+
+/**
+ * Get the current policy.
+ *
+ * Returns the policy that is currently active.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ *
+ * @return The current policy
+ */
+const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev);
+
+/**
+ * Change the policy to the one specified.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ * @param policy The policy to change to (valid pointer returned from
+ * @ref kbase_pm_list_policies)
+ */
+void kbase_pm_set_policy(struct kbase_device *kbdev,
+ const struct kbase_pm_policy *policy);
+
+/**
+ * Retrieve a static list of the available policies.
+ *
+ * @param[out] policies An array pointer to take the list of policies. This may
+ * be NULL. The contents of this array must not be
+ * modified.
+ *
+ * @return The number of policies
+ */
+int kbase_pm_list_policies(const struct kbase_pm_policy * const **policies);
+
+#endif /* _KBASE_HWACCESS_PM_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_hwaccess_time.h b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_time.h
new file mode 100644
index 000000000000..89d26eaf09a4
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_hwaccess_time.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/**
+ *
+ */
+
+#ifndef _KBASE_BACKEND_TIME_H_
+#define _KBASE_BACKEND_TIME_H_
+
+/**
+ * kbase_backend_get_gpu_time() - Get current GPU time
+ * @kbdev: Device pointer
+ * @cycle_counter: Pointer to u64 to store cycle counter in
+ * @system_time: Pointer to u64 to store system time in
+ * @ts: Pointer to struct timespec to store current monotonic
+ * time in
+ */
+void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
+ u64 *system_time, struct timespec *ts);
+
+/**
+ * kbase_wait_write_flush() - Wait for GPU write flush
+ * @kctx: Context pointer
+ *
+ * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
+ * its write buffer.
+ *
+ * If GPU resets occur then the counters are reset to zero, the delay may not be
+ * as expected.
+ *
+ * This function is only in use for BASE_HW_ISSUE_6367
+ */
+#ifndef CONFIG_MALI_NO_MALI
+void kbase_wait_write_flush(struct kbase_context *kctx);
+#endif
+
+#endif /* _KBASE_BACKEND_TIME_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_hwcnt_reader.h b/drivers/gpu/arm_gpu/mali_kbase_hwcnt_reader.h
new file mode 100644
index 000000000000..cf7bf1b35dc5
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_hwcnt_reader.h
@@ -0,0 +1,66 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_HWCNT_READER_H_
+#define _KBASE_HWCNT_READER_H_
+
+/* The ids of ioctl commands. */
+#define KBASE_HWCNT_READER 0xBE
+#define KBASE_HWCNT_READER_GET_HWVER _IOR(KBASE_HWCNT_READER, 0x00, u32)
+#define KBASE_HWCNT_READER_GET_BUFFER_SIZE _IOR(KBASE_HWCNT_READER, 0x01, u32)
+#define KBASE_HWCNT_READER_DUMP _IOW(KBASE_HWCNT_READER, 0x10, u32)
+#define KBASE_HWCNT_READER_CLEAR _IOW(KBASE_HWCNT_READER, 0x11, u32)
+#define KBASE_HWCNT_READER_GET_BUFFER _IOR(KBASE_HWCNT_READER, 0x20,\
+ struct kbase_hwcnt_reader_metadata)
+#define KBASE_HWCNT_READER_PUT_BUFFER _IOW(KBASE_HWCNT_READER, 0x21,\
+ struct kbase_hwcnt_reader_metadata)
+#define KBASE_HWCNT_READER_SET_INTERVAL _IOW(KBASE_HWCNT_READER, 0x30, u32)
+#define KBASE_HWCNT_READER_ENABLE_EVENT _IOW(KBASE_HWCNT_READER, 0x40, u32)
+#define KBASE_HWCNT_READER_DISABLE_EVENT _IOW(KBASE_HWCNT_READER, 0x41, u32)
+#define KBASE_HWCNT_READER_GET_API_VERSION _IOW(KBASE_HWCNT_READER, 0xFF, u32)
+
+/**
+ * struct kbase_hwcnt_reader_metadata - hwcnt reader sample buffer metadata
+ * @timestamp: time when sample was collected
+ * @event_id: id of an event that triggered sample collection
+ * @buffer_idx: position in sampling area where sample buffer was stored
+ */
+struct kbase_hwcnt_reader_metadata {
+ u64 timestamp;
+ u32 event_id;
+ u32 buffer_idx;
+};
+
+/**
+ * enum base_hwcnt_reader_event - hwcnt dumping events
+ * @BASE_HWCNT_READER_EVENT_MANUAL: manual request for dump
+ * @BASE_HWCNT_READER_EVENT_PERIODIC: periodic dump
+ * @BASE_HWCNT_READER_EVENT_PREJOB: prejob dump request
+ * @BASE_HWCNT_READER_EVENT_POSTJOB: postjob dump request
+ * @BASE_HWCNT_READER_EVENT_COUNT: number of supported events
+ */
+enum base_hwcnt_reader_event {
+ BASE_HWCNT_READER_EVENT_MANUAL,
+ BASE_HWCNT_READER_EVENT_PERIODIC,
+ BASE_HWCNT_READER_EVENT_PREJOB,
+ BASE_HWCNT_READER_EVENT_POSTJOB,
+
+ BASE_HWCNT_READER_EVENT_COUNT
+};
+
+#endif /* _KBASE_HWCNT_READER_H_ */
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_ioctl.h b/drivers/gpu/arm_gpu/mali_kbase_ioctl.h
new file mode 100644
index 000000000000..3957cd171999
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_ioctl.h
@@ -0,0 +1,658 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_IOCTL_H_
+#define _KBASE_IOCTL_H_
+
+#ifdef __cpluscplus
+extern "C" {
+#endif
+
+#include <linux/types.h>
+
+#define KBASE_IOCTL_TYPE 0x80
+
+#ifdef ANDROID
+/* Android's definition of ioctl is incorrect, specifying the type argument as
+ * 'int'. This creates a warning when using _IOWR (as the top bit is set). Work
+ * round this by redefining _IOC to include a case to 'int'.
+ */
+#undef _IOC
+#define _IOC(dir, type, nr, size) \
+ ((int)(((dir) << _IOC_DIRSHIFT) | ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | ((size) << _IOC_SIZESHIFT)))
+#endif
+
+/**
+ * struct kbase_ioctl_version_check - Check version compatibility with kernel
+ *
+ * @major: Major version number
+ * @minor: Minor version number
+ */
+struct kbase_ioctl_version_check {
+ __u16 major;
+ __u16 minor;
+};
+
+#define KBASE_IOCTL_VERSION_CHECK \
+ _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check)
+
+/**
+ * struct kbase_ioctl_set_flags - Set kernel context creation flags
+ *
+ * @create_flags: Flags - see base_context_create_flags
+ */
+struct kbase_ioctl_set_flags {
+ __u32 create_flags;
+};
+
+#define KBASE_IOCTL_SET_FLAGS \
+ _IOW(KBASE_IOCTL_TYPE, 1, struct kbase_ioctl_set_flags)
+
+/**
+ * struct kbase_ioctl_job_submit - Submit jobs/atoms to the kernel
+ *
+ * @addr: Memory address of an array of struct base_jd_atom_v2
+ * @nr_atoms: Number of entries in the array
+ * @stride: sizeof(struct base_jd_atom_v2)
+ */
+struct kbase_ioctl_job_submit {
+ __u64 addr;
+ __u32 nr_atoms;
+ __u32 stride;
+};
+
+#define KBASE_IOCTL_JOB_SUBMIT \
+ _IOW(KBASE_IOCTL_TYPE, 2, struct kbase_ioctl_job_submit)
+
+/**
+ * struct kbase_ioctl_get_gpuprops - Read GPU properties from the kernel
+ *
+ * @buffer: Pointer to the buffer to store properties into
+ * @size: Size of the buffer
+ * @flags: Flags - must be zero for now
+ *
+ * The ioctl will return the number of bytes stored into @buffer or an error
+ * on failure (e.g. @size is too small). If @size is specified as 0 then no
+ * data will be written but the return value will be the number of bytes needed
+ * for all the properties.
+ *
+ * @flags may be used in the future to request a different format for the
+ * buffer. With @flags == 0 the following format is used.
+ *
+ * The buffer will be filled with pairs of values, a u32 key identifying the
+ * property followed by the value. The size of the value is identified using
+ * the bottom bits of the key. The value then immediately followed the key and
+ * is tightly packed (there is no padding). All keys and values are
+ * little-endian.
+ *
+ * 00 = u8
+ * 01 = u16
+ * 10 = u32
+ * 11 = u64
+ */
+struct kbase_ioctl_get_gpuprops {
+ __u64 buffer;
+ __u32 size;
+ __u32 flags;
+};
+
+#define KBASE_IOCTL_GET_GPUPROPS \
+ _IOW(KBASE_IOCTL_TYPE, 3, struct kbase_ioctl_get_gpuprops)
+
+#define KBASE_IOCTL_POST_TERM \
+ _IO(KBASE_IOCTL_TYPE, 4)
+
+/**
+ * union kbase_ioctl_mem_alloc - Allocate memory on the GPU
+ *
+ * @va_pages: The number of pages of virtual address space to reserve
+ * @commit_pages: The number of physical pages to allocate
+ * @extent: The number of extra pages to allocate on each GPU fault which grows
+ * the region
+ * @flags: Flags
+ * @gpu_va: The GPU virtual address which is allocated
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_alloc {
+ struct {
+ __u64 va_pages;
+ __u64 commit_pages;
+ __u64 extent;
+ __u64 flags;
+ } in;
+ struct {
+ __u64 flags;
+ __u64 gpu_va;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_ALLOC \
+ _IOWR(KBASE_IOCTL_TYPE, 5, union kbase_ioctl_mem_alloc)
+
+/**
+ * struct kbase_ioctl_mem_query - Query properties of a GPU memory region
+ * @gpu_addr: A GPU address contained within the region
+ * @query: The type of query
+ * @value: The result of the query
+ *
+ * Use a %KBASE_MEM_QUERY_xxx flag as input for @query.
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_query {
+ struct {
+ __u64 gpu_addr;
+ __u64 query;
+ } in;
+ struct {
+ __u64 value;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_QUERY \
+ _IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query)
+
+#define KBASE_MEM_QUERY_COMMIT_SIZE 1
+#define KBASE_MEM_QUERY_VA_SIZE 2
+#define KBASE_MEM_QUERY_FLAGS 3
+
+/**
+ * struct kbase_ioctl_mem_free - Free a memory region
+ * @gpu_addr: Handle to the region to free
+ */
+struct kbase_ioctl_mem_free {
+ __u64 gpu_addr;
+};
+
+#define KBASE_IOCTL_MEM_FREE \
+ _IOW(KBASE_IOCTL_TYPE, 7, struct kbase_ioctl_mem_free)
+
+/**
+ * struct kbase_ioctl_hwcnt_reader_setup - Setup HWC dumper/reader
+ * @buffer_count: requested number of dumping buffers
+ * @jm_bm: counters selection bitmask (JM)
+ * @shader_bm: counters selection bitmask (Shader)
+ * @tiler_bm: counters selection bitmask (Tiler)
+ * @mmu_l2_bm: counters selection bitmask (MMU_L2)
+ *
+ * A fd is returned from the ioctl if successful, or a negative value on error
+ */
+struct kbase_ioctl_hwcnt_reader_setup {
+ __u32 buffer_count;
+ __u32 jm_bm;
+ __u32 shader_bm;
+ __u32 tiler_bm;
+ __u32 mmu_l2_bm;
+};
+
+#define KBASE_IOCTL_HWCNT_READER_SETUP \
+ _IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup)
+
+/**
+ * struct kbase_ioctl_hwcnt_enable - Enable hardware counter collection
+ * @dump_buffer: GPU address to write counters to
+ * @jm_bm: counters selection bitmask (JM)
+ * @shader_bm: counters selection bitmask (Shader)
+ * @tiler_bm: counters selection bitmask (Tiler)
+ * @mmu_l2_bm: counters selection bitmask (MMU_L2)
+ */
+struct kbase_ioctl_hwcnt_enable {
+ __u64 dump_buffer;
+ __u32 jm_bm;
+ __u32 shader_bm;
+ __u32 tiler_bm;
+ __u32 mmu_l2_bm;
+};
+
+#define KBASE_IOCTL_HWCNT_ENABLE \
+ _IOW(KBASE_IOCTL_TYPE, 9, struct kbase_ioctl_hwcnt_enable)
+
+#define KBASE_IOCTL_HWCNT_DUMP \
+ _IO(KBASE_IOCTL_TYPE, 10)
+
+#define KBASE_IOCTL_HWCNT_CLEAR \
+ _IO(KBASE_IOCTL_TYPE, 11)
+
+/**
+ * struct kbase_ioctl_disjoint_query - Query the disjoint counter
+ * @counter: A counter of disjoint events in the kernel
+ */
+struct kbase_ioctl_disjoint_query {
+ __u32 counter;
+};
+
+#define KBASE_IOCTL_DISJOINT_QUERY \
+ _IOR(KBASE_IOCTL_TYPE, 12, struct kbase_ioctl_disjoint_query)
+
+/**
+ * struct kbase_ioctl_get_ddk_version - Query the kernel version
+ * @version_buffer: Buffer to receive the kernel version string
+ * @size: Size of the buffer
+ *
+ * The ioctl will return the number of bytes written into version_buffer
+ * (which includes a NULL byte) or a negative error code
+ */
+struct kbase_ioctl_get_ddk_version {
+ __u64 version_buffer;
+ __u32 size;
+};
+
+#define KBASE_IOCTL_GET_DDK_VERSION \
+ _IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version)
+
+/**
+ * struct kbase_ioctl_mem_jit_init - Initialise the JIT memory allocator
+ *
+ * @va_pages: Number of VA pages to reserve for JIT
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ */
+struct kbase_ioctl_mem_jit_init {
+ __u64 va_pages;
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT \
+ _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init)
+
+/**
+ * struct kbase_ioctl_mem_sync - Perform cache maintenance on memory
+ *
+ * @handle: GPU memory handle (GPU VA)
+ * @user_addr: The address where it is mapped in user space
+ * @size: The number of bytes to synchronise
+ * @type: The direction to synchronise: 0 is sync to memory (clean),
+ * 1 is sync from memory (invalidate). Use the BASE_SYNCSET_OP_xxx constants.
+ * @padding: Padding to round up to a multiple of 8 bytes, must be zero
+ */
+struct kbase_ioctl_mem_sync {
+ __u64 handle;
+ __u64 user_addr;
+ __u64 size;
+ __u8 type;
+ __u8 padding[7];
+};
+
+#define KBASE_IOCTL_MEM_SYNC \
+ _IOW(KBASE_IOCTL_TYPE, 15, struct kbase_ioctl_mem_sync)
+
+/**
+ * union kbase_ioctl_mem_find_cpu_offset - Find the offset of a CPU pointer
+ *
+ * @gpu_addr: The GPU address of the memory region
+ * @cpu_addr: The CPU address to locate
+ * @size: A size in bytes to validate is contained within the region
+ * @offset: The offset from the start of the memory region to @cpu_addr
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_find_cpu_offset {
+ struct {
+ __u64 gpu_addr;
+ __u64 cpu_addr;
+ __u64 size;
+ } in;
+ struct {
+ __u64 offset;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \
+ _IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset)
+
+/**
+ * struct kbase_ioctl_get_context_id - Get the kernel context ID
+ *
+ * @id: The kernel context ID
+ */
+struct kbase_ioctl_get_context_id {
+ int id; /* This should really be __u32, but see GPUCORE-10048 */
+};
+
+#define KBASE_IOCTL_GET_CONTEXT_ID \
+ _IOR(KBASE_IOCTL_TYPE, 17, struct kbase_ioctl_get_context_id)
+
+/**
+ * struct kbase_ioctl_tlstream_acquire - Acquire a tlstream fd
+ *
+ * @flags: Flags
+ *
+ * The ioctl returns a file descriptor when successful
+ */
+struct kbase_ioctl_tlstream_acquire {
+ __u32 flags;
+};
+
+#define KBASE_IOCTL_TLSTREAM_ACQUIRE \
+ _IOW(KBASE_IOCTL_TYPE, 18, struct kbase_ioctl_tlstream_acquire)
+
+#define KBASE_IOCTL_TLSTREAM_FLUSH \
+ _IO(KBASE_IOCTL_TYPE, 19)
+
+/**
+ * struct kbase_ioctl_mem_commit - Change the amount of memory backing a region
+ *
+ * @gpu_addr: The memory region to modify
+ * @pages: The number of physical pages that should be present
+ *
+ * The ioctl may return on the following error codes or 0 for success:
+ * -ENOMEM: Out of memory
+ * -EINVAL: Invalid arguments
+ */
+struct kbase_ioctl_mem_commit {
+ __u64 gpu_addr;
+ __u64 pages;
+};
+
+#define KBASE_IOCTL_MEM_COMMIT \
+ _IOW(KBASE_IOCTL_TYPE, 20, struct kbase_ioctl_mem_commit)
+
+/**
+ * union kbase_ioctl_mem_alias - Create an alias of memory regions
+ * @flags: Flags, see BASE_MEM_xxx
+ * @stride: Bytes between start of each memory region
+ * @nents: The number of regions to pack together into the alias
+ * @aliasing_info: Pointer to an array of struct base_mem_aliasing_info
+ * @gpu_va: Address of the new alias
+ * @va_pages: Size of the new alias
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_alias {
+ struct {
+ __u64 flags;
+ __u64 stride;
+ __u64 nents;
+ __u64 aliasing_info;
+ } in;
+ struct {
+ __u64 flags;
+ __u64 gpu_va;
+ __u64 va_pages;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_ALIAS \
+ _IOWR(KBASE_IOCTL_TYPE, 21, union kbase_ioctl_mem_alias)
+
+/**
+ * union kbase_ioctl_mem_import - Import memory for use by the GPU
+ * @flags: Flags, see BASE_MEM_xxx
+ * @phandle: Handle to the external memory
+ * @type: Type of external memory, see base_mem_import_type
+ * @padding: Amount of extra VA pages to append to the imported buffer
+ * @gpu_va: Address of the new alias
+ * @va_pages: Size of the new alias
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_import {
+ struct {
+ __u64 flags;
+ __u64 phandle;
+ __u32 type;
+ __u32 padding;
+ } in;
+ struct {
+ __u64 flags;
+ __u64 gpu_va;
+ __u64 va_pages;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_IMPORT \
+ _IOWR(KBASE_IOCTL_TYPE, 22, union kbase_ioctl_mem_import)
+
+/**
+ * struct kbase_ioctl_mem_flags_change - Change the flags for a memory region
+ * @gpu_va: The GPU region to modify
+ * @flags: The new flags to set
+ * @mask: Mask of the flags to modify
+ */
+struct kbase_ioctl_mem_flags_change {
+ __u64 gpu_va;
+ __u64 flags;
+ __u64 mask;
+};
+
+#define KBASE_IOCTL_MEM_FLAGS_CHANGE \
+ _IOW(KBASE_IOCTL_TYPE, 23, struct kbase_ioctl_mem_flags_change)
+
+/**
+ * struct kbase_ioctl_stream_create - Create a synchronisation stream
+ * @name: A name to identify this stream. Must be NULL-terminated.
+ *
+ * Note that this is also called a "timeline", but is named stream to avoid
+ * confusion with other uses of the word.
+ *
+ * Unused bytes in @name (after the first NULL byte) must be also be NULL bytes.
+ *
+ * The ioctl returns a file descriptor.
+ */
+struct kbase_ioctl_stream_create {
+ char name[32];
+};
+
+#define KBASE_IOCTL_STREAM_CREATE \
+ _IOW(KBASE_IOCTL_TYPE, 24, struct kbase_ioctl_stream_create)
+
+/**
+ * struct kbase_ioctl_fence_validate - Validate a fd refers to a fence
+ * @fd: The file descriptor to validate
+ */
+struct kbase_ioctl_fence_validate {
+ int fd;
+};
+
+#define KBASE_IOCTL_FENCE_VALIDATE \
+ _IOW(KBASE_IOCTL_TYPE, 25, struct kbase_ioctl_fence_validate)
+
+/**
+ * struct kbase_ioctl_get_profiling_controls - Get the profiling controls
+ * @count: The size of @buffer in u32 words
+ * @buffer: The buffer to receive the profiling controls
+ */
+struct kbase_ioctl_get_profiling_controls {
+ __u64 buffer;
+ __u32 count;
+};
+
+#define KBASE_IOCTL_GET_PROFILING_CONTROLS \
+ _IOW(KBASE_IOCTL_TYPE, 26, struct kbase_ioctl_get_profiling_controls)
+
+/**
+ * struct kbase_ioctl_mem_profile_add - Provide profiling information to kernel
+ * @buffer: Pointer to the information
+ * @len: Length
+ * @padding: Padding
+ *
+ * The data provided is accessible through a debugfs file
+ */
+struct kbase_ioctl_mem_profile_add {
+ __u64 buffer;
+ __u32 len;
+ __u32 padding;
+};
+
+#define KBASE_IOCTL_MEM_PROFILE_ADD \
+ _IOW(KBASE_IOCTL_TYPE, 27, struct kbase_ioctl_mem_profile_add)
+
+/**
+ * struct kbase_ioctl_soft_event_update - Update the status of a soft-event
+ * @event: GPU address of the event which has been updated
+ * @new_status: The new status to set
+ * @flags: Flags for future expansion
+ */
+struct kbase_ioctl_soft_event_update {
+ __u64 event;
+ __u32 new_status;
+ __u32 flags;
+};
+
+#define KBASE_IOCTL_SOFT_EVENT_UPDATE \
+ _IOW(KBASE_IOCTL_TYPE, 28, struct kbase_ioctl_soft_event_update)
+
+/* IOCTLs 29-32 are reserved */
+
+/***************
+ * test ioctls *
+ ***************/
+#if MALI_UNIT_TEST
+/* These ioctls are purely for test purposes and are not used in the production
+ * driver, they therefore may change without notice
+ */
+
+#define KBASE_IOCTL_TEST_TYPE (KBASE_IOCTL_TYPE + 1)
+
+/**
+ * struct kbase_ioctl_tlstream_test - Start a timeline stream test
+ *
+ * @tpw_count: number of trace point writers in each context
+ * @msg_delay: time delay between tracepoints from one writer in milliseconds
+ * @msg_count: number of trace points written by one writer
+ * @aux_msg: if non-zero aux messages will be included
+ */
+struct kbase_ioctl_tlstream_test {
+ __u32 tpw_count;
+ __u32 msg_delay;
+ __u32 msg_count;
+ __u32 aux_msg;
+};
+
+#define KBASE_IOCTL_TLSTREAM_TEST \
+ _IOW(KBASE_IOCTL_TEST_TYPE, 1, struct kbase_ioctl_tlstream_test)
+
+/**
+ * struct kbase_ioctl_tlstream_stats - Read tlstream stats for test purposes
+ * @bytes_collected: number of bytes read by user
+ * @bytes_generated: number of bytes generated by tracepoints
+ */
+struct kbase_ioctl_tlstream_stats {
+ __u32 bytes_collected;
+ __u32 bytes_generated;
+};
+
+#define KBASE_IOCTL_TLSTREAM_STATS \
+ _IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats)
+
+#endif
+
+/**********************************
+ * Definitions for GPU properties *
+ **********************************/
+#define KBASE_GPUPROP_VALUE_SIZE_U8 (0x0)
+#define KBASE_GPUPROP_VALUE_SIZE_U16 (0x1)
+#define KBASE_GPUPROP_VALUE_SIZE_U32 (0x2)
+#define KBASE_GPUPROP_VALUE_SIZE_U64 (0x3)
+
+#define KBASE_GPUPROP_PRODUCT_ID 1
+#define KBASE_GPUPROP_VERSION_STATUS 2
+#define KBASE_GPUPROP_MINOR_REVISION 3
+#define KBASE_GPUPROP_MAJOR_REVISION 4
+#define KBASE_GPUPROP_GPU_SPEED_MHZ 5
+#define KBASE_GPUPROP_GPU_FREQ_KHZ_MAX 6
+#define KBASE_GPUPROP_GPU_FREQ_KHZ_MIN 7
+#define KBASE_GPUPROP_LOG2_PROGRAM_COUNTER_SIZE 8
+#define KBASE_GPUPROP_TEXTURE_FEATURES_0 9
+#define KBASE_GPUPROP_TEXTURE_FEATURES_1 10
+#define KBASE_GPUPROP_TEXTURE_FEATURES_2 11
+#define KBASE_GPUPROP_GPU_AVAILABLE_MEMORY_SIZE 12
+
+#define KBASE_GPUPROP_L2_LOG2_LINE_SIZE 13
+#define KBASE_GPUPROP_L2_LOG2_CACHE_SIZE 14
+#define KBASE_GPUPROP_L2_NUM_L2_SLICES 15
+
+#define KBASE_GPUPROP_TILER_BIN_SIZE_BYTES 16
+#define KBASE_GPUPROP_TILER_MAX_ACTIVE_LEVELS 17
+
+#define KBASE_GPUPROP_MAX_THREADS 18
+#define KBASE_GPUPROP_MAX_WORKGROUP_SIZE 19
+#define KBASE_GPUPROP_MAX_BARRIER_SIZE 20
+#define KBASE_GPUPROP_MAX_REGISTERS 21
+#define KBASE_GPUPROP_MAX_TASK_QUEUE 22
+#define KBASE_GPUPROP_MAX_THREAD_GROUP_SPLIT 23
+#define KBASE_GPUPROP_IMPL_TECH 24
+
+#define KBASE_GPUPROP_RAW_SHADER_PRESENT 25
+#define KBASE_GPUPROP_RAW_TILER_PRESENT 26
+#define KBASE_GPUPROP_RAW_L2_PRESENT 27
+#define KBASE_GPUPROP_RAW_STACK_PRESENT 28
+#define KBASE_GPUPROP_RAW_L2_FEATURES 29
+#define KBASE_GPUPROP_RAW_SUSPEND_SIZE 30
+#define KBASE_GPUPROP_RAW_MEM_FEATURES 31
+#define KBASE_GPUPROP_RAW_MMU_FEATURES 32
+#define KBASE_GPUPROP_RAW_AS_PRESENT 33
+#define KBASE_GPUPROP_RAW_JS_PRESENT 34
+#define KBASE_GPUPROP_RAW_JS_FEATURES_0 35
+#define KBASE_GPUPROP_RAW_JS_FEATURES_1 36
+#define KBASE_GPUPROP_RAW_JS_FEATURES_2 37
+#define KBASE_GPUPROP_RAW_JS_FEATURES_3 38
+#define KBASE_GPUPROP_RAW_JS_FEATURES_4 39
+#define KBASE_GPUPROP_RAW_JS_FEATURES_5 40
+#define KBASE_GPUPROP_RAW_JS_FEATURES_6 41
+#define KBASE_GPUPROP_RAW_JS_FEATURES_7 42
+#define KBASE_GPUPROP_RAW_JS_FEATURES_8 43
+#define KBASE_GPUPROP_RAW_JS_FEATURES_9 44
+#define KBASE_GPUPROP_RAW_JS_FEATURES_10 45
+#define KBASE_GPUPROP_RAW_JS_FEATURES_11 46
+#define KBASE_GPUPROP_RAW_JS_FEATURES_12 47
+#define KBASE_GPUPROP_RAW_JS_FEATURES_13 48
+#define KBASE_GPUPROP_RAW_JS_FEATURES_14 49
+#define KBASE_GPUPROP_RAW_JS_FEATURES_15 50
+#define KBASE_GPUPROP_RAW_TILER_FEATURES 51
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_0 52
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_1 53
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_2 54
+#define KBASE_GPUPROP_RAW_GPU_ID 55
+#define KBASE_GPUPROP_RAW_THREAD_MAX_THREADS 56
+#define KBASE_GPUPROP_RAW_THREAD_MAX_WORKGROUP_SIZE 57
+#define KBASE_GPUPROP_RAW_THREAD_MAX_BARRIER_SIZE 58
+#define KBASE_GPUPROP_RAW_THREAD_FEATURES 59
+#define KBASE_GPUPROP_RAW_COHERENCY_MODE 60
+
+#define KBASE_GPUPROP_COHERENCY_NUM_GROUPS 61
+#define KBASE_GPUPROP_COHERENCY_NUM_CORE_GROUPS 62
+#define KBASE_GPUPROP_COHERENCY_COHERENCY 63
+#define KBASE_GPUPROP_COHERENCY_GROUP_0 64
+#define KBASE_GPUPROP_COHERENCY_GROUP_1 65
+#define KBASE_GPUPROP_COHERENCY_GROUP_2 66
+#define KBASE_GPUPROP_COHERENCY_GROUP_3 67
+#define KBASE_GPUPROP_COHERENCY_GROUP_4 68
+#define KBASE_GPUPROP_COHERENCY_GROUP_5 69
+#define KBASE_GPUPROP_COHERENCY_GROUP_6 70
+#define KBASE_GPUPROP_COHERENCY_GROUP_7 71
+#define KBASE_GPUPROP_COHERENCY_GROUP_8 72
+#define KBASE_GPUPROP_COHERENCY_GROUP_9 73
+#define KBASE_GPUPROP_COHERENCY_GROUP_10 74
+#define KBASE_GPUPROP_COHERENCY_GROUP_11 75
+#define KBASE_GPUPROP_COHERENCY_GROUP_12 76
+#define KBASE_GPUPROP_COHERENCY_GROUP_13 77
+#define KBASE_GPUPROP_COHERENCY_GROUP_14 78
+#define KBASE_GPUPROP_COHERENCY_GROUP_15 79
+
+#ifdef __cpluscplus
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/arm_gpu/mali_kbase_jd.c b/drivers/gpu/arm_gpu/mali_kbase_jd.c
new file mode 100644
index 000000000000..15ed06132eeb
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_jd.c
@@ -0,0 +1,1847 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include <linux/dma-buf.h>
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+#include <mali_kbase.h>
+#include <mali_kbase_uku.h>
+#include <linux/random.h>
+#include <linux/version.h>
+#include <linux/ratelimit.h>
+
+#include <mali_kbase_jm.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_tlstream.h>
+
+#include "mali_kbase_dma_fence.h"
+
+#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
+/* random32 was renamed to prandom_u32 in 3.8 */
+#define prandom_u32 random32
+#endif
+
+/* Return whether katom will run on the GPU or not. Currently only soft jobs and
+ * dependency-only atoms do not run on the GPU */
+#define IS_GPU_ATOM(katom) (!((katom->core_req & BASE_JD_REQ_SOFT_JOB) || \
+ ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) == \
+ BASE_JD_REQ_DEP)))
+/*
+ * This is the kernel side of the API. Only entry points are:
+ * - kbase_jd_submit(): Called from userspace to submit a single bag
+ * - kbase_jd_done(): Called from interrupt context to track the
+ * completion of a job.
+ * Callouts:
+ * - to the job manager (enqueue a job)
+ * - to the event subsystem (signals the completion/failure of bag/job-chains).
+ */
+
+static void __user *
+get_compat_pointer(struct kbase_context *kctx, const u64 p)
+{
+#ifdef CONFIG_COMPAT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ return compat_ptr(p);
+#endif
+ return u64_to_user_ptr(p);
+}
+
+/* Runs an atom, either by handing to the JS or by immediately running it in the case of soft-jobs
+ *
+ * Returns whether the JS needs a reschedule.
+ *
+ * Note that the caller must also check the atom status and
+ * if it is KBASE_JD_ATOM_STATE_COMPLETED must call jd_done_nolock
+ */
+static int jd_run_atom(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+
+ KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+ if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) {
+ /* Dependency only atom */
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ return 0;
+ } else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+ /* Soft-job */
+ if (katom->will_fail_event_code) {
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ return 0;
+ }
+ if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+ == BASE_JD_REQ_SOFT_REPLAY) {
+ if (!kbase_replay_process(katom))
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ } else if (kbase_process_soft_job(katom) == 0) {
+ kbase_finish_soft_job(katom);
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ }
+ return 0;
+ }
+
+ katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+ /* Queue an action about whether we should try scheduling a context */
+ return kbasep_js_add_job(kctx, katom);
+}
+
+#if defined(CONFIG_KDS) || defined(CONFIG_MALI_DMA_FENCE)
+void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom)
+{
+ struct kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(katom);
+ kbdev = katom->kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Check whether the atom's other dependencies were already met. If
+ * katom is a GPU atom then the job scheduler may be able to represent
+ * the dependencies, hence we may attempt to submit it before they are
+ * met. Other atoms must have had both dependencies resolved.
+ */
+ if (IS_GPU_ATOM(katom) ||
+ (!kbase_jd_katom_dep_atom(&katom->dep[0]) &&
+ !kbase_jd_katom_dep_atom(&katom->dep[1]))) {
+ /* katom dep complete, attempt to run it */
+ bool resched = false;
+
+ resched = jd_run_atom(katom);
+
+ if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+ /* The atom has already finished */
+ resched |= jd_done_nolock(katom, NULL);
+ }
+
+ if (resched)
+ kbase_js_sched_all(kbdev);
+ }
+}
+#endif
+
+#ifdef CONFIG_KDS
+
+/* Add the katom to the kds waiting list.
+ * Atoms must be added to the waiting list after a successful call to kds_async_waitall.
+ * The caller must hold the kbase_jd_context.lock */
+
+static void kbase_jd_kds_waiters_add(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx;
+
+ KBASE_DEBUG_ASSERT(katom);
+
+ kctx = katom->kctx;
+
+ list_add_tail(&katom->node, &kctx->waiting_kds_resource);
+}
+
+/* Remove the katom from the kds waiting list.
+ * Atoms must be removed from the waiting list before a call to kds_resource_set_release_sync.
+ * The supplied katom must first have been added to the list with a call to kbase_jd_kds_waiters_add.
+ * The caller must hold the kbase_jd_context.lock */
+
+static void kbase_jd_kds_waiters_remove(struct kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(katom);
+ list_del(&katom->node);
+}
+
+static void kds_dep_clear(void *callback_parameter, void *callback_extra_parameter)
+{
+ struct kbase_jd_atom *katom;
+ struct kbase_jd_context *ctx;
+
+ katom = (struct kbase_jd_atom *)callback_parameter;
+ KBASE_DEBUG_ASSERT(katom);
+
+ ctx = &katom->kctx->jctx;
+
+ /* If KDS resource has already been satisfied (e.g. due to zapping)
+ * do nothing.
+ */
+ mutex_lock(&ctx->lock);
+ if (!katom->kds_dep_satisfied) {
+ katom->kds_dep_satisfied = true;
+ kbase_jd_dep_clear_locked(katom);
+ }
+ mutex_unlock(&ctx->lock);
+}
+
+static void kbase_cancel_kds_wait_job(struct kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(katom);
+
+ /* Prevent job_done_nolock from being called twice on an atom when
+ * there is a race between job completion and cancellation */
+
+ if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
+ /* Wait was cancelled - zap the atom */
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ if (jd_done_nolock(katom, NULL))
+ kbase_js_sched_all(katom->kctx->kbdev);
+ }
+}
+#endif /* CONFIG_KDS */
+
+void kbase_jd_free_external_resources(struct kbase_jd_atom *katom)
+{
+#ifdef CONFIG_KDS
+ if (katom->kds_rset) {
+ struct kbase_jd_context *jctx = &katom->kctx->jctx;
+
+ /*
+ * As the atom is no longer waiting, remove it from
+ * the waiting list.
+ */
+
+ mutex_lock(&jctx->lock);
+ kbase_jd_kds_waiters_remove(katom);
+ mutex_unlock(&jctx->lock);
+
+ /* Release the kds resource or cancel if zapping */
+ kds_resource_set_release_sync(&katom->kds_rset);
+ }
+#endif /* CONFIG_KDS */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ /* Flush dma-fence workqueue to ensure that any callbacks that may have
+ * been queued are done before continuing.
+ * Any successfully completed atom would have had all it's callbacks
+ * completed before the atom was run, so only flush for failed atoms.
+ */
+ if (katom->event_code != BASE_JD_EVENT_DONE)
+ flush_workqueue(katom->kctx->dma_fence.wq);
+#endif /* CONFIG_MALI_DMA_FENCE */
+}
+
+static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(katom);
+ KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
+
+#ifdef CONFIG_KDS
+ /* Prevent the KDS resource from triggering the atom in case of zapping */
+ if (katom->kds_rset)
+ katom->kds_dep_satisfied = true;
+#endif /* CONFIG_KDS */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ kbase_dma_fence_signal(katom);
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+ kbase_gpu_vm_lock(katom->kctx);
+ /* only roll back if extres is non-NULL */
+ if (katom->extres) {
+ u32 res_no;
+
+ res_no = katom->nr_extres;
+ while (res_no-- > 0) {
+ struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
+ struct kbase_va_region *reg;
+
+ reg = kbase_region_tracker_find_region_base_address(
+ katom->kctx,
+ katom->extres[res_no].gpu_address);
+ kbase_unmap_external_resource(katom->kctx, reg, alloc);
+ }
+ kfree(katom->extres);
+ katom->extres = NULL;
+ }
+ kbase_gpu_vm_unlock(katom->kctx);
+}
+
+/*
+ * Set up external resources needed by this job.
+ *
+ * jctx.lock must be held when this is called.
+ */
+
+static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const struct base_jd_atom_v2 *user_atom)
+{
+ int err_ret_val = -EINVAL;
+ u32 res_no;
+#ifdef CONFIG_KDS
+ u32 kds_res_count = 0;
+ struct kds_resource **kds_resources = NULL;
+ unsigned long *kds_access_bitmap = NULL;
+#endif /* CONFIG_KDS */
+#ifdef CONFIG_MALI_DMA_FENCE
+ struct kbase_dma_fence_resv_info info = {
+ .dma_fence_resv_count = 0,
+ };
+#ifdef CONFIG_SYNC
+ /*
+ * When both dma-buf fence and Android native sync is enabled, we
+ * disable dma-buf fence for contexts that are using Android native
+ * fences.
+ */
+ const bool implicit_sync = !kbase_ctx_flag(katom->kctx,
+ KCTX_NO_IMPLICIT_SYNC);
+#else /* CONFIG_SYNC */
+ const bool implicit_sync = true;
+#endif /* CONFIG_SYNC */
+#endif /* CONFIG_MALI_DMA_FENCE */
+ struct base_external_resource *input_extres;
+
+ KBASE_DEBUG_ASSERT(katom);
+ KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
+
+ /* no resources encoded, early out */
+ if (!katom->nr_extres)
+ return -EINVAL;
+
+ katom->extres = kmalloc_array(katom->nr_extres, sizeof(*katom->extres), GFP_KERNEL);
+ if (NULL == katom->extres) {
+ err_ret_val = -ENOMEM;
+ goto early_err_out;
+ }
+
+ /* copy user buffer to the end of our real buffer.
+ * Make sure the struct sizes haven't changed in a way
+ * we don't support */
+ BUILD_BUG_ON(sizeof(*input_extres) > sizeof(*katom->extres));
+ input_extres = (struct base_external_resource *)
+ (((unsigned char *)katom->extres) +
+ (sizeof(*katom->extres) - sizeof(*input_extres)) *
+ katom->nr_extres);
+
+ if (copy_from_user(input_extres,
+ get_compat_pointer(katom->kctx, user_atom->extres_list),
+ sizeof(*input_extres) * katom->nr_extres) != 0) {
+ err_ret_val = -EINVAL;
+ goto early_err_out;
+ }
+#ifdef CONFIG_KDS
+ /* assume we have to wait for all */
+ KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
+ kds_resources = kmalloc_array(katom->nr_extres, sizeof(struct kds_resource *), GFP_KERNEL);
+
+ if (!kds_resources) {
+ err_ret_val = -ENOMEM;
+ goto early_err_out;
+ }
+
+ KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
+ kds_access_bitmap = kcalloc(BITS_TO_LONGS(katom->nr_extres),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!kds_access_bitmap) {
+ err_ret_val = -ENOMEM;
+ goto early_err_out;
+ }
+#endif /* CONFIG_KDS */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ if (implicit_sync) {
+ info.resv_objs = kmalloc_array(katom->nr_extres,
+ sizeof(struct reservation_object *),
+ GFP_KERNEL);
+ if (!info.resv_objs) {
+ err_ret_val = -ENOMEM;
+ goto early_err_out;
+ }
+
+ info.dma_fence_excl_bitmap =
+ kcalloc(BITS_TO_LONGS(katom->nr_extres),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!info.dma_fence_excl_bitmap) {
+ err_ret_val = -ENOMEM;
+ goto early_err_out;
+ }
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+ /* Take the processes mmap lock */
+ down_read(&current->mm->mmap_sem);
+
+ /* need to keep the GPU VM locked while we set up UMM buffers */
+ kbase_gpu_vm_lock(katom->kctx);
+ for (res_no = 0; res_no < katom->nr_extres; res_no++) {
+ struct base_external_resource *res;
+ struct kbase_va_region *reg;
+ struct kbase_mem_phy_alloc *alloc;
+ bool exclusive;
+
+ res = &input_extres[res_no];
+ exclusive = (res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE)
+ ? true : false;
+ reg = kbase_region_tracker_find_region_enclosing_address(
+ katom->kctx,
+ res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
+ /* did we find a matching region object? */
+ if (NULL == reg || (reg->flags & KBASE_REG_FREE)) {
+ /* roll back */
+ goto failed_loop;
+ }
+
+ if (!(katom->core_req & BASE_JD_REQ_SOFT_JOB) &&
+ (reg->flags & KBASE_REG_SECURE)) {
+ katom->atom_flags |= KBASE_KATOM_FLAG_PROTECTED;
+ }
+
+ alloc = kbase_map_external_resource(katom->kctx, reg,
+ current->mm
+#ifdef CONFIG_KDS
+ , &kds_res_count, kds_resources,
+ kds_access_bitmap, exclusive
+#endif
+ );
+ if (!alloc) {
+ err_ret_val = -EINVAL;
+ goto failed_loop;
+ }
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ if (implicit_sync &&
+ reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+ struct reservation_object *resv;
+
+ resv = reg->gpu_alloc->imported.umm.dma_buf->resv;
+ if (resv)
+ kbase_dma_fence_add_reservation(resv, &info,
+ exclusive);
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+ /* finish with updating out array with the data we found */
+ /* NOTE: It is important that this is the last thing we do (or
+ * at least not before the first write) as we overwrite elements
+ * as we loop and could be overwriting ourself, so no writes
+ * until the last read for an element.
+ * */
+ katom->extres[res_no].gpu_address = reg->start_pfn << PAGE_SHIFT; /* save the start_pfn (as an address, not pfn) to use fast lookup later */
+ katom->extres[res_no].alloc = alloc;
+ }
+ /* successfully parsed the extres array */
+ /* drop the vm lock before we call into kds */
+ kbase_gpu_vm_unlock(katom->kctx);
+
+ /* Release the processes mmap lock */
+ up_read(&current->mm->mmap_sem);
+
+#ifdef CONFIG_KDS
+ if (kds_res_count) {
+ int wait_failed;
+
+ /* We have resources to wait for with kds */
+ katom->kds_dep_satisfied = false;
+
+ wait_failed = kds_async_waitall(&katom->kds_rset,
+ &katom->kctx->jctx.kds_cb, katom, NULL,
+ kds_res_count, kds_access_bitmap,
+ kds_resources);
+
+ if (wait_failed)
+ goto failed_kds_setup;
+ else
+ kbase_jd_kds_waiters_add(katom);
+ } else {
+ /* Nothing to wait for, so kds dep met */
+ katom->kds_dep_satisfied = true;
+ }
+ kfree(kds_resources);
+ kfree(kds_access_bitmap);
+#endif /* CONFIG_KDS */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ if (implicit_sync) {
+ if (info.dma_fence_resv_count) {
+ int ret;
+
+ ret = kbase_dma_fence_wait(katom, &info);
+ if (ret < 0)
+ goto failed_dma_fence_setup;
+ }
+
+ kfree(info.resv_objs);
+ kfree(info.dma_fence_excl_bitmap);
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+ /* all done OK */
+ return 0;
+
+/* error handling section */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+failed_dma_fence_setup:
+#ifdef CONFIG_KDS
+ /* If we are here, dma_fence setup failed but KDS didn't.
+ * Revert KDS setup if any.
+ */
+ if (kds_res_count) {
+ mutex_unlock(&katom->kctx->jctx.lock);
+ kds_resource_set_release_sync(&katom->kds_rset);
+ mutex_lock(&katom->kctx->jctx.lock);
+
+ kbase_jd_kds_waiters_remove(katom);
+ katom->kds_dep_satisfied = true;
+ }
+#endif /* CONFIG_KDS */
+#endif /* CONFIG_MALI_DMA_FENCE */
+#ifdef CONFIG_KDS
+failed_kds_setup:
+#endif
+#if defined(CONFIG_KDS) || defined(CONFIG_MALI_DMA_FENCE)
+ /* Lock the processes mmap lock */
+ down_read(&current->mm->mmap_sem);
+
+ /* lock before we unmap */
+ kbase_gpu_vm_lock(katom->kctx);
+#endif
+
+ failed_loop:
+ /* undo the loop work */
+ while (res_no-- > 0) {
+ struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
+
+ kbase_unmap_external_resource(katom->kctx, NULL, alloc);
+ }
+ kbase_gpu_vm_unlock(katom->kctx);
+
+ /* Release the processes mmap lock */
+ up_read(&current->mm->mmap_sem);
+
+ early_err_out:
+ kfree(katom->extres);
+ katom->extres = NULL;
+#ifdef CONFIG_KDS
+ kfree(kds_resources);
+ kfree(kds_access_bitmap);
+#endif /* CONFIG_KDS */
+#ifdef CONFIG_MALI_DMA_FENCE
+ if (implicit_sync) {
+ kfree(info.resv_objs);
+ kfree(info.dma_fence_excl_bitmap);
+ }
+#endif
+ return err_ret_val;
+}
+
+static inline void jd_resolve_dep(struct list_head *out_list,
+ struct kbase_jd_atom *katom,
+ u8 d, bool ctx_is_dying)
+{
+ u8 other_d = !d;
+
+ while (!list_empty(&katom->dep_head[d])) {
+ struct kbase_jd_atom *dep_atom;
+ struct kbase_jd_atom *other_dep_atom;
+ u8 dep_type;
+
+ dep_atom = list_entry(katom->dep_head[d].next,
+ struct kbase_jd_atom, dep_item[d]);
+ list_del(katom->dep_head[d].next);
+
+ dep_type = kbase_jd_katom_dep_type(&dep_atom->dep[d]);
+ kbase_jd_katom_dep_clear(&dep_atom->dep[d]);
+
+ if (katom->event_code != BASE_JD_EVENT_DONE &&
+ (dep_type != BASE_JD_DEP_TYPE_ORDER)) {
+#ifdef CONFIG_KDS
+ if (!dep_atom->kds_dep_satisfied) {
+ /* Just set kds_dep_satisfied to true. If the callback happens after this then it will early out and
+ * do nothing. If the callback doesn't happen then kbase_jd_post_external_resources will clean up
+ */
+ dep_atom->kds_dep_satisfied = true;
+ }
+#endif
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ kbase_dma_fence_cancel_callbacks(dep_atom);
+#endif
+
+ dep_atom->event_code = katom->event_code;
+ KBASE_DEBUG_ASSERT(dep_atom->status !=
+ KBASE_JD_ATOM_STATE_UNUSED);
+
+ if ((dep_atom->core_req & BASE_JD_REQ_SOFT_REPLAY)
+ != BASE_JD_REQ_SOFT_REPLAY) {
+ dep_atom->will_fail_event_code =
+ dep_atom->event_code;
+ } else {
+ dep_atom->status =
+ KBASE_JD_ATOM_STATE_COMPLETED;
+ }
+ }
+ other_dep_atom = (struct kbase_jd_atom *)
+ kbase_jd_katom_dep_atom(&dep_atom->dep[other_d]);
+
+ if (!dep_atom->in_jd_list && (!other_dep_atom ||
+ (IS_GPU_ATOM(dep_atom) && !ctx_is_dying &&
+ !dep_atom->will_fail_event_code &&
+ !other_dep_atom->will_fail_event_code))) {
+ bool dep_satisfied = true;
+#ifdef CONFIG_MALI_DMA_FENCE
+ int dep_count;
+
+ dep_count = kbase_fence_dep_count_read(dep_atom);
+ if (likely(dep_count == -1)) {
+ dep_satisfied = true;
+ } else {
+ /*
+ * There are either still active callbacks, or
+ * all fences for this @dep_atom has signaled,
+ * but the worker that will queue the atom has
+ * not yet run.
+ *
+ * Wait for the fences to signal and the fence
+ * worker to run and handle @dep_atom. If
+ * @dep_atom was completed due to error on
+ * @katom, then the fence worker will pick up
+ * the complete status and error code set on
+ * @dep_atom above.
+ */
+ dep_satisfied = false;
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+#ifdef CONFIG_KDS
+ dep_satisfied = dep_satisfied && dep_atom->kds_dep_satisfied;
+#endif
+
+ if (dep_satisfied) {
+ dep_atom->in_jd_list = true;
+ list_add_tail(&dep_atom->jd_item, out_list);
+ }
+ }
+ }
+}
+
+KBASE_EXPORT_TEST_API(jd_resolve_dep);
+
+#if MALI_CUSTOMER_RELEASE == 0
+static void jd_force_failure(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
+{
+ kbdev->force_replay_count++;
+
+ if (kbdev->force_replay_count >= kbdev->force_replay_limit) {
+ kbdev->force_replay_count = 0;
+ katom->event_code = BASE_JD_EVENT_FORCE_REPLAY;
+
+ if (kbdev->force_replay_random)
+ kbdev->force_replay_limit =
+ (prandom_u32() % KBASEP_FORCE_REPLAY_RANDOM_LIMIT) + 1;
+
+ dev_info(kbdev->dev, "force_replay : promoting to error\n");
+ }
+}
+
+/** Test to see if atom should be forced to fail.
+ *
+ * This function will check if an atom has a replay job as a dependent. If so
+ * then it will be considered for forced failure. */
+static void jd_check_force_failure(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct kbase_device *kbdev = kctx->kbdev;
+ int i;
+
+ if ((kbdev->force_replay_limit == KBASEP_FORCE_REPLAY_DISABLED) ||
+ (katom->core_req & BASEP_JD_REQ_EVENT_NEVER))
+ return;
+
+ for (i = 1; i < BASE_JD_ATOM_COUNT; i++) {
+ if (kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[0]) == katom ||
+ kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[1]) == katom) {
+ struct kbase_jd_atom *dep_atom = &kctx->jctx.atoms[i];
+
+ if ((dep_atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) ==
+ BASE_JD_REQ_SOFT_REPLAY &&
+ (dep_atom->core_req & kbdev->force_replay_core_req)
+ == kbdev->force_replay_core_req) {
+ jd_force_failure(kbdev, katom);
+ return;
+ }
+ }
+ }
+}
+#endif
+
+/**
+ * is_dep_valid - Validate that a dependency is valid for early dependency
+ * submission
+ * @katom: Dependency atom to validate
+ *
+ * A dependency is valid if any of the following are true :
+ * - It does not exist (a non-existent dependency does not block submission)
+ * - It is in the job scheduler
+ * - It has completed, does not have a failure event code, and has not been
+ * marked to fail in the future
+ *
+ * Return: true if valid, false otherwise
+ */
+static bool is_dep_valid(struct kbase_jd_atom *katom)
+{
+ /* If there's no dependency then this is 'valid' from the perspective of
+ * early dependency submission */
+ if (!katom)
+ return true;
+
+ /* Dependency must have reached the job scheduler */
+ if (katom->status < KBASE_JD_ATOM_STATE_IN_JS)
+ return false;
+
+ /* If dependency has completed and has failed or will fail then it is
+ * not valid */
+ if (katom->status >= KBASE_JD_ATOM_STATE_HW_COMPLETED &&
+ (katom->event_code != BASE_JD_EVENT_DONE ||
+ katom->will_fail_event_code))
+ return false;
+
+ return true;
+}
+
+static void jd_try_submitting_deps(struct list_head *out_list,
+ struct kbase_jd_atom *node)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct list_head *pos;
+
+ list_for_each(pos, &node->dep_head[i]) {
+ struct kbase_jd_atom *dep_atom = list_entry(pos,
+ struct kbase_jd_atom, dep_item[i]);
+
+ if (IS_GPU_ATOM(dep_atom) && !dep_atom->in_jd_list) {
+ /*Check if atom deps look sane*/
+ bool dep0_valid = is_dep_valid(
+ dep_atom->dep[0].atom);
+ bool dep1_valid = is_dep_valid(
+ dep_atom->dep[1].atom);
+ bool dep_satisfied = true;
+#ifdef CONFIG_MALI_DMA_FENCE
+ int dep_count;
+
+ dep_count = kbase_fence_dep_count_read(
+ dep_atom);
+ if (likely(dep_count == -1)) {
+ dep_satisfied = true;
+ } else {
+ /*
+ * There are either still active callbacks, or
+ * all fences for this @dep_atom has signaled,
+ * but the worker that will queue the atom has
+ * not yet run.
+ *
+ * Wait for the fences to signal and the fence
+ * worker to run and handle @dep_atom. If
+ * @dep_atom was completed due to error on
+ * @katom, then the fence worker will pick up
+ * the complete status and error code set on
+ * @dep_atom above.
+ */
+ dep_satisfied = false;
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+#ifdef CONFIG_KDS
+ dep_satisfied = dep_satisfied &&
+ dep_atom->kds_dep_satisfied;
+#endif
+
+ if (dep0_valid && dep1_valid && dep_satisfied) {
+ dep_atom->in_jd_list = true;
+ list_add(&dep_atom->jd_item, out_list);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Perform the necessary handling of an atom that has finished running
+ * on the GPU.
+ *
+ * Note that if this is a soft-job that has had kbase_prepare_soft_job called on it then the caller
+ * is responsible for calling kbase_finish_soft_job *before* calling this function.
+ *
+ * The caller must hold the kbase_jd_context.lock.
+ */
+bool jd_done_nolock(struct kbase_jd_atom *katom,
+ struct list_head *completed_jobs_ctx)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct list_head completed_jobs;
+ struct list_head runnable_jobs;
+ bool need_to_try_schedule_context = false;
+ int i;
+
+ INIT_LIST_HEAD(&completed_jobs);
+ INIT_LIST_HEAD(&runnable_jobs);
+
+ KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+#if MALI_CUSTOMER_RELEASE == 0
+ jd_check_force_failure(katom);
+#endif
+
+ /* This is needed in case an atom is failed due to being invalid, this
+ * can happen *before* the jobs that the atom depends on have completed */
+ for (i = 0; i < 2; i++) {
+ if (kbase_jd_katom_dep_atom(&katom->dep[i])) {
+ list_del(&katom->dep_item[i]);
+ kbase_jd_katom_dep_clear(&katom->dep[i]);
+ }
+ }
+
+ /* With PRLAM-10817 or PRLAM-10959 the last tile of a fragment job being soft-stopped can fail with
+ * BASE_JD_EVENT_TILE_RANGE_FAULT.
+ *
+ * So here if the fragment job failed with TILE_RANGE_FAULT and it has been soft-stopped, then we promote the
+ * error code to BASE_JD_EVENT_DONE
+ */
+
+ if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10817) || kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10959)) &&
+ katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT) {
+ if ((katom->core_req & BASE_JD_REQ_FS) && (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED)) {
+ /* Promote the failure to job done */
+ katom->event_code = BASE_JD_EVENT_DONE;
+ katom->atom_flags = katom->atom_flags & (~KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED);
+ }
+ }
+
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ list_add_tail(&katom->jd_item, &completed_jobs);
+
+ while (!list_empty(&completed_jobs)) {
+ katom = list_entry(completed_jobs.prev, struct kbase_jd_atom, jd_item);
+ list_del(completed_jobs.prev);
+ KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
+
+ for (i = 0; i < 2; i++)
+ jd_resolve_dep(&runnable_jobs, katom, i,
+ kbase_ctx_flag(kctx, KCTX_DYING));
+
+ if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+ kbase_jd_post_external_resources(katom);
+
+ while (!list_empty(&runnable_jobs)) {
+ struct kbase_jd_atom *node;
+
+ node = list_entry(runnable_jobs.next,
+ struct kbase_jd_atom, jd_item);
+ list_del(runnable_jobs.next);
+ node->in_jd_list = false;
+
+ KBASE_DEBUG_ASSERT(node->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+ if (node->status != KBASE_JD_ATOM_STATE_COMPLETED &&
+ !kbase_ctx_flag(kctx, KCTX_DYING)) {
+ need_to_try_schedule_context |= jd_run_atom(node);
+ } else {
+ node->event_code = katom->event_code;
+
+ if ((node->core_req &
+ BASE_JD_REQ_SOFT_JOB_TYPE) ==
+ BASE_JD_REQ_SOFT_REPLAY) {
+ if (kbase_replay_process(node))
+ /* Don't complete this atom */
+ continue;
+ } else if (node->core_req &
+ BASE_JD_REQ_SOFT_JOB) {
+ /* If this is a fence wait soft job
+ * then remove it from the list of sync
+ * waiters.
+ */
+ if (BASE_JD_REQ_SOFT_FENCE_WAIT == node->core_req)
+ kbasep_remove_waiting_soft_job(node);
+
+ kbase_finish_soft_job(node);
+ }
+ node->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ }
+
+ if (node->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+ list_add_tail(&node->jd_item, &completed_jobs);
+ } else if (node->status == KBASE_JD_ATOM_STATE_IN_JS &&
+ !node->will_fail_event_code) {
+ /* Node successfully submitted, try submitting
+ * dependencies as they may now be representable
+ * in JS */
+ jd_try_submitting_deps(&runnable_jobs, node);
+ }
+ }
+
+ /* Register a completed job as a disjoint event when the GPU
+ * is in a disjoint state (ie. being reset or replaying jobs).
+ */
+ kbase_disjoint_event_potential(kctx->kbdev);
+ if (completed_jobs_ctx)
+ list_add_tail(&katom->jd_item, completed_jobs_ctx);
+ else
+ kbase_event_post(kctx, katom);
+
+ /* Decrement and check the TOTAL number of jobs. This includes
+ * those not tracked by the scheduler: 'not ready to run' and
+ * 'dependency-only' jobs. */
+ if (--kctx->jctx.job_nr == 0)
+ wake_up(&kctx->jctx.zero_jobs_wait); /* All events are safely queued now, and we can signal any waiter
+ * that we've got no more jobs (so we can be safely terminated) */
+ }
+
+ return need_to_try_schedule_context;
+}
+
+KBASE_EXPORT_TEST_API(jd_done_nolock);
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+enum {
+ CORE_REQ_DEP_ONLY,
+ CORE_REQ_SOFT,
+ CORE_REQ_COMPUTE,
+ CORE_REQ_FRAGMENT,
+ CORE_REQ_VERTEX,
+ CORE_REQ_TILER,
+ CORE_REQ_FRAGMENT_VERTEX,
+ CORE_REQ_FRAGMENT_VERTEX_TILER,
+ CORE_REQ_FRAGMENT_TILER,
+ CORE_REQ_VERTEX_TILER,
+ CORE_REQ_UNKNOWN
+};
+static const char * const core_req_strings[] = {
+ "Dependency Only Job",
+ "Soft Job",
+ "Compute Shader Job",
+ "Fragment Shader Job",
+ "Vertex/Geometry Shader Job",
+ "Tiler Job",
+ "Fragment Shader + Vertex/Geometry Shader Job",
+ "Fragment Shader + Vertex/Geometry Shader Job + Tiler Job",
+ "Fragment Shader + Tiler Job",
+ "Vertex/Geometry Shader Job + Tiler Job",
+ "Unknown Job"
+};
+static const char *kbasep_map_core_reqs_to_string(base_jd_core_req core_req)
+{
+ if (core_req & BASE_JD_REQ_SOFT_JOB)
+ return core_req_strings[CORE_REQ_SOFT];
+ if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+ return core_req_strings[CORE_REQ_COMPUTE];
+ switch (core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) {
+ case BASE_JD_REQ_DEP:
+ return core_req_strings[CORE_REQ_DEP_ONLY];
+ case BASE_JD_REQ_FS:
+ return core_req_strings[CORE_REQ_FRAGMENT];
+ case BASE_JD_REQ_CS:
+ return core_req_strings[CORE_REQ_VERTEX];
+ case BASE_JD_REQ_T:
+ return core_req_strings[CORE_REQ_TILER];
+ case (BASE_JD_REQ_FS | BASE_JD_REQ_CS):
+ return core_req_strings[CORE_REQ_FRAGMENT_VERTEX];
+ case (BASE_JD_REQ_FS | BASE_JD_REQ_T):
+ return core_req_strings[CORE_REQ_FRAGMENT_TILER];
+ case (BASE_JD_REQ_CS | BASE_JD_REQ_T):
+ return core_req_strings[CORE_REQ_VERTEX_TILER];
+ case (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T):
+ return core_req_strings[CORE_REQ_FRAGMENT_VERTEX_TILER];
+ }
+ return core_req_strings[CORE_REQ_UNKNOWN];
+}
+#endif
+
+bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *user_atom, struct kbase_jd_atom *katom)
+{
+ struct kbase_jd_context *jctx = &kctx->jctx;
+ int queued = 0;
+ int i;
+ int sched_prio;
+ bool ret;
+ bool will_fail = false;
+
+ /* Update the TOTAL number of jobs. This includes those not tracked by
+ * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
+ jctx->job_nr++;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ katom->start_timestamp.tv64 = 0;
+#else
+ katom->start_timestamp = 0;
+#endif
+ katom->udata = user_atom->udata;
+ katom->kctx = kctx;
+ katom->nr_extres = user_atom->nr_extres;
+ katom->extres = NULL;
+ katom->device_nr = user_atom->device_nr;
+ katom->affinity = 0;
+ katom->jc = user_atom->jc;
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
+ katom->core_req = user_atom->core_req;
+ katom->atom_flags = 0;
+ katom->retry_count = 0;
+ katom->need_cache_flush_cores_retained = 0;
+ katom->pre_dep = NULL;
+ katom->post_dep = NULL;
+ katom->x_pre_dep = NULL;
+ katom->x_post_dep = NULL;
+ katom->will_fail_event_code = BASE_JD_EVENT_NOT_STARTED;
+
+ /* Implicitly sets katom->protected_state.enter as well. */
+ katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
+
+ katom->age = kctx->age_count++;
+
+ INIT_LIST_HEAD(&katom->jd_item);
+#ifdef CONFIG_KDS
+ /* Start by assuming that the KDS dependencies are satisfied,
+ * kbase_jd_pre_external_resources will correct this if there are dependencies */
+ katom->kds_dep_satisfied = true;
+ katom->kds_rset = NULL;
+#endif /* CONFIG_KDS */
+#ifdef CONFIG_MALI_DMA_FENCE
+ kbase_fence_dep_count_set(katom, -1);
+#endif
+
+ /* Don't do anything if there is a mess up with dependencies.
+ This is done in a separate cycle to check both the dependencies at ones, otherwise
+ it will be extra complexity to deal with 1st dependency ( just added to the list )
+ if only the 2nd one has invalid config.
+ */
+ for (i = 0; i < 2; i++) {
+ int dep_atom_number = user_atom->pre_dep[i].atom_id;
+ base_jd_dep_type dep_atom_type = user_atom->pre_dep[i].dependency_type;
+
+ if (dep_atom_number) {
+ if (dep_atom_type != BASE_JD_DEP_TYPE_ORDER &&
+ dep_atom_type != BASE_JD_DEP_TYPE_DATA) {
+ katom->event_code = BASE_JD_EVENT_JOB_CONFIG_FAULT;
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+
+ /* Wrong dependency setup. Atom will be sent
+ * back to user space. Do not record any
+ * dependencies. */
+ KBASE_TLSTREAM_TL_NEW_ATOM(
+ katom,
+ kbase_jd_atom_id(kctx, katom));
+ KBASE_TLSTREAM_TL_RET_ATOM_CTX(
+ katom, kctx);
+ KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(katom,
+ TL_ATOM_STATE_IDLE);
+
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+ }
+ }
+
+ /* Add dependencies */
+ for (i = 0; i < 2; i++) {
+ int dep_atom_number = user_atom->pre_dep[i].atom_id;
+ base_jd_dep_type dep_atom_type;
+ struct kbase_jd_atom *dep_atom = &jctx->atoms[dep_atom_number];
+
+ dep_atom_type = user_atom->pre_dep[i].dependency_type;
+ kbase_jd_katom_dep_clear(&katom->dep[i]);
+
+ if (!dep_atom_number)
+ continue;
+
+ if (dep_atom->status == KBASE_JD_ATOM_STATE_UNUSED ||
+ dep_atom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+
+ if (dep_atom->event_code == BASE_JD_EVENT_DONE)
+ continue;
+ /* don't stop this atom if it has an order dependency
+ * only to the failed one, try to submit it through
+ * the normal path
+ */
+ if (dep_atom_type == BASE_JD_DEP_TYPE_ORDER &&
+ dep_atom->event_code > BASE_JD_EVENT_ACTIVE) {
+ continue;
+ }
+
+ /* Atom has completed, propagate the error code if any */
+ katom->event_code = dep_atom->event_code;
+ katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+
+ /* This atom is going through soft replay or
+ * will be sent back to user space. Do not record any
+ * dependencies. */
+ KBASE_TLSTREAM_TL_NEW_ATOM(
+ katom,
+ kbase_jd_atom_id(kctx, katom));
+ KBASE_TLSTREAM_TL_RET_ATOM_CTX(katom, kctx);
+ KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(katom,
+ TL_ATOM_STATE_IDLE);
+
+ if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+ == BASE_JD_REQ_SOFT_REPLAY) {
+ if (kbase_replay_process(katom)) {
+ ret = false;
+ goto out;
+ }
+ }
+ will_fail = true;
+
+ } else {
+ /* Atom is in progress, add this atom to the list */
+ list_add_tail(&katom->dep_item[i], &dep_atom->dep_head[i]);
+ kbase_jd_katom_dep_set(&katom->dep[i], dep_atom, dep_atom_type);
+ queued = 1;
+ }
+ }
+
+ if (will_fail) {
+ if (!queued) {
+ ret = jd_done_nolock(katom, NULL);
+
+ goto out;
+ } else {
+ katom->will_fail_event_code = katom->event_code;
+ ret = false;
+
+ goto out;
+ }
+ } else {
+ /* These must occur after the above loop to ensure that an atom
+ * that depends on a previous atom with the same number behaves
+ * as expected */
+ katom->event_code = BASE_JD_EVENT_DONE;
+ katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+ }
+
+ /* For invalid priority, be most lenient and choose the default */
+ sched_prio = kbasep_js_atom_prio_to_sched_prio(user_atom->prio);
+ if (sched_prio == KBASE_JS_ATOM_SCHED_PRIO_INVALID)
+ sched_prio = KBASE_JS_ATOM_SCHED_PRIO_DEFAULT;
+ katom->sched_priority = sched_prio;
+
+ /* Create a new atom recording all dependencies it was set up with. */
+ KBASE_TLSTREAM_TL_NEW_ATOM(
+ katom,
+ kbase_jd_atom_id(kctx, katom));
+ KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(katom, TL_ATOM_STATE_IDLE);
+ KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY(katom, katom->sched_priority);
+ KBASE_TLSTREAM_TL_RET_ATOM_CTX(katom, kctx);
+ for (i = 0; i < 2; i++)
+ if (BASE_JD_DEP_TYPE_INVALID != kbase_jd_katom_dep_type(
+ &katom->dep[i])) {
+ KBASE_TLSTREAM_TL_DEP_ATOM_ATOM(
+ (void *)kbase_jd_katom_dep_atom(
+ &katom->dep[i]),
+ (void *)katom);
+ } else if (BASE_JD_DEP_TYPE_INVALID !=
+ user_atom->pre_dep[i].dependency_type) {
+ /* Resolved dependency. */
+ int dep_atom_number =
+ user_atom->pre_dep[i].atom_id;
+ struct kbase_jd_atom *dep_atom =
+ &jctx->atoms[dep_atom_number];
+
+ KBASE_TLSTREAM_TL_RDEP_ATOM_ATOM(
+ (void *)dep_atom,
+ (void *)katom);
+ }
+
+ /* Reject atoms with job chain = NULL, as these cause issues with soft-stop */
+ if (!katom->jc && (katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
+ dev_warn(kctx->kbdev->dev, "Rejecting atom with jc = NULL");
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+
+ /* Reject atoms with an invalid device_nr */
+ if ((katom->core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) &&
+ (katom->device_nr >= kctx->kbdev->gpu_props.num_core_groups)) {
+ dev_warn(kctx->kbdev->dev,
+ "Rejecting atom with invalid device_nr %d",
+ katom->device_nr);
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+
+ /* Reject atoms with invalid core requirements */
+ if ((katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) &&
+ (katom->core_req & BASE_JD_REQ_EVENT_COALESCE)) {
+ dev_warn(kctx->kbdev->dev,
+ "Rejecting atom with invalid core requirements");
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ katom->core_req &= ~BASE_JD_REQ_EVENT_COALESCE;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+
+ if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+ /* handle what we need to do to access the external resources */
+ if (kbase_jd_pre_external_resources(katom, user_atom) != 0) {
+ /* setup failed (no access, bad resource, unknown resource types, etc.) */
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+ }
+
+ /* Validate the atom. Function will return error if the atom is
+ * malformed.
+ *
+ * Soft-jobs never enter the job scheduler but have their own initialize method.
+ *
+ * If either fail then we immediately complete the atom with an error.
+ */
+ if ((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0) {
+ if (!kbase_js_is_atom_valid(kctx->kbdev, katom)) {
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+ } else {
+ /* Soft-job */
+ if (kbase_prepare_soft_job(katom) != 0) {
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+ }
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+ katom->work_id = atomic_inc_return(&jctx->work_id);
+ trace_gpu_job_enqueue((u32)kctx->id, katom->work_id,
+ kbasep_map_core_reqs_to_string(katom->core_req));
+#endif
+
+ if (queued && !IS_GPU_ATOM(katom)) {
+ ret = false;
+ goto out;
+ }
+#ifdef CONFIG_KDS
+ if (!katom->kds_dep_satisfied) {
+ /* Queue atom due to KDS dependency */
+ ret = false;
+ goto out;
+ }
+#endif /* CONFIG_KDS */
+
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ if (kbase_fence_dep_count_read(katom) != -1) {
+ ret = false;
+ goto out;
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+ if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+ == BASE_JD_REQ_SOFT_REPLAY) {
+ if (kbase_replay_process(katom))
+ ret = false;
+ else
+ ret = jd_done_nolock(katom, NULL);
+
+ goto out;
+ } else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+ if (kbase_process_soft_job(katom) == 0) {
+ kbase_finish_soft_job(katom);
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+
+ ret = false;
+ } else if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
+ katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+ ret = kbasep_js_add_job(kctx, katom);
+ /* If job was cancelled then resolve immediately */
+ if (katom->event_code == BASE_JD_EVENT_JOB_CANCELLED)
+ ret = jd_done_nolock(katom, NULL);
+ } else {
+ /* This is a pure dependency. Resolve it immediately */
+ ret = jd_done_nolock(katom, NULL);
+ }
+
+ out:
+ return ret;
+}
+
+int kbase_jd_submit(struct kbase_context *kctx,
+ void __user *user_addr, u32 nr_atoms, u32 stride,
+ bool uk6_atom)
+{
+ struct kbase_jd_context *jctx = &kctx->jctx;
+ int err = 0;
+ int i;
+ bool need_to_try_schedule_context = false;
+ struct kbase_device *kbdev;
+ u32 latest_flush;
+
+ /*
+ * kbase_jd_submit isn't expected to fail and so all errors with the
+ * jobs are reported by immediately failing them (through event system)
+ */
+ kbdev = kctx->kbdev;
+
+ beenthere(kctx, "%s", "Enter");
+
+ if (kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+ dev_err(kbdev->dev, "Attempt to submit to a context that has SUBMIT_DISABLED set on it");
+ return -EINVAL;
+ }
+
+ if (stride != sizeof(base_jd_atom_v2)) {
+ dev_err(kbdev->dev, "Stride passed to job_submit doesn't match kernel");
+ return -EINVAL;
+ }
+
+ KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_add_return(nr_atoms,
+ &kctx->timeline.jd_atoms_in_flight));
+
+ /* All atoms submitted in this call have the same flush ID */
+ latest_flush = kbase_backend_get_current_flush_id(kbdev);
+
+ for (i = 0; i < nr_atoms; i++) {
+ struct base_jd_atom_v2 user_atom;
+ struct kbase_jd_atom *katom;
+
+ if (copy_from_user(&user_atom, user_addr,
+ sizeof(user_atom)) != 0) {
+ err = -EINVAL;
+ KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx,
+ atomic_sub_return(nr_atoms - i,
+ &kctx->timeline.jd_atoms_in_flight));
+ break;
+ }
+
+#ifdef BASE_LEGACY_UK10_2_SUPPORT
+ if (KBASE_API_VERSION(10, 3) > kctx->api_version)
+ user_atom.core_req = (u32)(user_atom.compat_core_req
+ & 0x7fff);
+#endif /* BASE_LEGACY_UK10_2_SUPPORT */
+
+ user_addr = (void __user *)((uintptr_t) user_addr + stride);
+
+ mutex_lock(&jctx->lock);
+#ifndef compiletime_assert
+#define compiletime_assert_defined
+#define compiletime_assert(x, msg) do { switch (0) { case 0: case (x):; } } \
+while (false)
+#endif
+ compiletime_assert((1 << (8*sizeof(user_atom.atom_number))) ==
+ BASE_JD_ATOM_COUNT,
+ "BASE_JD_ATOM_COUNT and base_atom_id type out of sync");
+ compiletime_assert(sizeof(user_atom.pre_dep[0].atom_id) ==
+ sizeof(user_atom.atom_number),
+ "BASE_JD_ATOM_COUNT and base_atom_id type out of sync");
+#ifdef compiletime_assert_defined
+#undef compiletime_assert
+#undef compiletime_assert_defined
+#endif
+ katom = &jctx->atoms[user_atom.atom_number];
+
+ /* Record the flush ID for the cache flush optimisation */
+ katom->flush_id = latest_flush;
+
+ while (katom->status != KBASE_JD_ATOM_STATE_UNUSED) {
+ /* Atom number is already in use, wait for the atom to
+ * complete
+ */
+ mutex_unlock(&jctx->lock);
+
+ /* This thread will wait for the atom to complete. Due
+ * to thread scheduling we are not sure that the other
+ * thread that owns the atom will also schedule the
+ * context, so we force the scheduler to be active and
+ * hence eventually schedule this context at some point
+ * later.
+ */
+ kbase_js_sched_all(kbdev);
+
+ if (wait_event_killable(katom->completed,
+ katom->status ==
+ KBASE_JD_ATOM_STATE_UNUSED) != 0) {
+ /* We're being killed so the result code
+ * doesn't really matter
+ */
+ return 0;
+ }
+ mutex_lock(&jctx->lock);
+ }
+
+ need_to_try_schedule_context |=
+ jd_submit_atom(kctx, &user_atom, katom);
+
+ /* Register a completed job as a disjoint event when the GPU is in a disjoint state
+ * (ie. being reset or replaying jobs).
+ */
+ kbase_disjoint_event_potential(kbdev);
+
+ mutex_unlock(&jctx->lock);
+ }
+
+ if (need_to_try_schedule_context)
+ kbase_js_sched_all(kbdev);
+
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_submit);
+
+void kbase_jd_done_worker(struct work_struct *data)
+{
+ struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
+ struct kbase_jd_context *jctx;
+ struct kbase_context *kctx;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ struct kbase_device *kbdev;
+ struct kbasep_js_device_data *js_devdata;
+ u64 cache_jc = katom->jc;
+ struct kbasep_js_atom_retained_state katom_retained_state;
+ bool context_idle;
+ base_jd_core_req core_req = katom->core_req;
+ u64 affinity = katom->affinity;
+ enum kbase_atom_coreref_state coreref_state = katom->coreref_state;
+
+ /* Soft jobs should never reach this function */
+ KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
+
+ kctx = katom->kctx;
+ jctx = &kctx->jctx;
+ kbdev = kctx->kbdev;
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_devdata = &kbdev->js_data;
+
+ KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER, kctx, katom, katom->jc, 0);
+
+ kbase_backend_complete_wq(kbdev, katom);
+
+ /*
+ * Begin transaction on JD context and JS context
+ */
+ mutex_lock(&jctx->lock);
+ KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(katom, TL_ATOM_STATE_DONE);
+ mutex_lock(&js_devdata->queue_mutex);
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /* This worker only gets called on contexts that are scheduled *in*. This is
+ * because it only happens in response to an IRQ from a job that was
+ * running.
+ */
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ if (katom->event_code == BASE_JD_EVENT_STOPPED) {
+ /* Atom has been promoted to stopped */
+ unsigned long flags;
+
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+ kbase_js_unpull(kctx, katom);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&jctx->lock);
+
+ return;
+ }
+
+ if (katom->event_code != BASE_JD_EVENT_DONE)
+ dev_err(kbdev->dev,
+ "t6xx: GPU fault 0x%02lx from job slot %d\n",
+ (unsigned long)katom->event_code,
+ katom->slot_nr);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+ kbase_as_poking_timer_release_atom(kbdev, kctx, katom);
+
+ /* Retain state before the katom disappears */
+ kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
+
+ context_idle = kbase_js_complete_atom_wq(kctx, katom);
+
+ KBASE_DEBUG_ASSERT(kbasep_js_has_atom_finished(&katom_retained_state));
+
+ kbasep_js_remove_job(kbdev, kctx, katom);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+ katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
+ /* jd_done_nolock() requires the jsctx_mutex lock to be dropped */
+ jd_done_nolock(katom, &kctx->completed_jobs);
+
+ /* katom may have been freed now, do not use! */
+
+ if (context_idle) {
+ unsigned long flags;
+
+ context_idle = false;
+ mutex_lock(&js_devdata->queue_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* If kbase_sched() has scheduled this context back in then
+ * KCTX_ACTIVE will have been set after we marked it as
+ * inactive, and another pm reference will have been taken, so
+ * drop our reference. But do not call kbase_jm_idle_ctx(), as
+ * the context is active and fast-starting is allowed.
+ *
+ * If an atom has been fast-started then kctx->atoms_pulled will
+ * be non-zero but KCTX_ACTIVE will still be false (as the
+ * previous pm reference has been inherited). Do NOT drop our
+ * reference, as it has been re-used, and leave the context as
+ * active.
+ *
+ * If no new atoms have been started then KCTX_ACTIVE will still
+ * be false and atoms_pulled will be zero, so drop the reference
+ * and call kbase_jm_idle_ctx().
+ *
+ * As the checks are done under both the queue_mutex and
+ * hwaccess_lock is should be impossible for this to race
+ * with the scheduler code.
+ */
+ if (kbase_ctx_flag(kctx, KCTX_ACTIVE) ||
+ !atomic_read(&kctx->atoms_pulled)) {
+ /* Calling kbase_jm_idle_ctx() here will ensure that
+ * atoms are not fast-started when we drop the
+ * hwaccess_lock. This is not performed if
+ * KCTX_ACTIVE is set as in that case another pm
+ * reference has been taken and a fast-start would be
+ * valid.
+ */
+ if (!kbase_ctx_flag(kctx, KCTX_ACTIVE))
+ kbase_jm_idle_ctx(kbdev, kctx);
+ context_idle = true;
+ } else {
+ kbase_ctx_flag_set(kctx, KCTX_ACTIVE);
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&js_devdata->queue_mutex);
+ }
+
+ /*
+ * Transaction complete
+ */
+ mutex_unlock(&jctx->lock);
+
+ /* Job is now no longer running, so can now safely release the context
+ * reference, and handle any actions that were logged against the atom's retained state */
+
+ kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, &katom_retained_state);
+
+ kbase_js_sched_all(kbdev);
+
+ if (!atomic_dec_return(&kctx->work_count)) {
+ /* If worker now idle then post all events that jd_done_nolock()
+ * has queued */
+ mutex_lock(&jctx->lock);
+ while (!list_empty(&kctx->completed_jobs)) {
+ struct kbase_jd_atom *atom = list_entry(
+ kctx->completed_jobs.next,
+ struct kbase_jd_atom, jd_item);
+ list_del(kctx->completed_jobs.next);
+
+ kbase_event_post(kctx, atom);
+ }
+ mutex_unlock(&jctx->lock);
+ }
+
+ kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity,
+ coreref_state);
+
+ if (context_idle)
+ kbase_pm_context_idle(kbdev);
+
+ KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER_END, kctx, NULL, cache_jc, 0);
+}
+
+/**
+ * jd_cancel_worker - Work queue job cancel function.
+ * @data: a &struct work_struct
+ *
+ * Only called as part of 'Zapping' a context (which occurs on termination).
+ * Operates serially with the kbase_jd_done_worker() on the work queue.
+ *
+ * This can only be called on contexts that aren't scheduled.
+ *
+ * We don't need to release most of the resources that would occur on
+ * kbase_jd_done() or kbase_jd_done_worker(), because the atoms here must not be
+ * running (by virtue of only being called on contexts that aren't
+ * scheduled).
+ */
+static void jd_cancel_worker(struct work_struct *data)
+{
+ struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
+ struct kbase_jd_context *jctx;
+ struct kbase_context *kctx;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ bool need_to_try_schedule_context;
+ bool attr_state_changed;
+ struct kbase_device *kbdev;
+
+ /* Soft jobs should never reach this function */
+ KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
+
+ kctx = katom->kctx;
+ kbdev = kctx->kbdev;
+ jctx = &kctx->jctx;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ KBASE_TRACE_ADD(kbdev, JD_CANCEL_WORKER, kctx, katom, katom->jc, 0);
+
+ /* This only gets called on contexts that are scheduled out. Hence, we must
+ * make sure we don't de-ref the number of running jobs (there aren't
+ * any), nor must we try to schedule out the context (it's already
+ * scheduled out).
+ */
+ KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ /* Scheduler: Remove the job from the system */
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ attr_state_changed = kbasep_js_remove_cancelled_job(kbdev, kctx, katom);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ mutex_lock(&jctx->lock);
+
+ need_to_try_schedule_context = jd_done_nolock(katom, NULL);
+ /* Because we're zapping, we're not adding any more jobs to this ctx, so no need to
+ * schedule the context. There's also no need for the jsctx_mutex to have been taken
+ * around this too. */
+ KBASE_DEBUG_ASSERT(!need_to_try_schedule_context);
+
+ /* katom may have been freed now, do not use! */
+ mutex_unlock(&jctx->lock);
+
+ if (attr_state_changed)
+ kbase_js_sched_all(kbdev);
+}
+
+/**
+ * kbase_jd_done - Complete a job that has been removed from the Hardware
+ * @katom: atom which has been completed
+ * @slot_nr: slot the atom was on
+ * @end_timestamp: completion time
+ * @done_code: completion code
+ *
+ * This must be used whenever a job has been removed from the Hardware, e.g.:
+ * An IRQ indicates that the job finished (for both error and 'done' codes), or
+ * the job was evicted from the JS_HEAD_NEXT registers during a Soft/Hard stop.
+ *
+ * Some work is carried out immediately, and the rest is deferred onto a
+ * workqueue
+ *
+ * Context:
+ * This can be called safely from atomic context.
+ * The caller must hold kbdev->hwaccess_lock
+ */
+void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr,
+ ktime_t *end_timestamp, kbasep_js_atom_done_code done_code)
+{
+ struct kbase_context *kctx;
+ struct kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(katom);
+ kctx = katom->kctx;
+ KBASE_DEBUG_ASSERT(kctx);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT)
+ katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
+
+ KBASE_TRACE_ADD(kbdev, JD_DONE, kctx, katom, katom->jc, 0);
+
+ kbase_job_check_leave_disjoint(kbdev, katom);
+
+ katom->slot_nr = slot_nr;
+
+ atomic_inc(&kctx->work_count);
+
+#ifdef CONFIG_DEBUG_FS
+ /* a failed job happened and is waiting for dumping*/
+ if (!katom->will_fail_event_code &&
+ kbase_debug_job_fault_process(katom, katom->event_code))
+ return;
+#endif
+
+ WARN_ON(work_pending(&katom->work));
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
+ INIT_WORK(&katom->work, kbase_jd_done_worker);
+ queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_done);
+
+void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ KBASE_DEBUG_ASSERT(NULL != katom);
+ kctx = katom->kctx;
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ KBASE_TRACE_ADD(kbdev, JD_CANCEL, kctx, katom, katom->jc, 0);
+
+ /* This should only be done from a context that is not scheduled */
+ KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ WARN_ON(work_pending(&katom->work));
+
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
+ INIT_WORK(&katom->work, jd_cancel_worker);
+ queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+
+void kbase_jd_zap_context(struct kbase_context *kctx)
+{
+ struct kbase_jd_atom *katom;
+ struct list_head *entry, *tmp;
+ struct kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(kctx);
+
+ kbdev = kctx->kbdev;
+
+ KBASE_TRACE_ADD(kbdev, JD_ZAP_CONTEXT, kctx, NULL, 0u, 0u);
+
+ kbase_js_zap_context(kctx);
+
+ mutex_lock(&kctx->jctx.lock);
+
+ /*
+ * While holding the struct kbase_jd_context lock clean up jobs which are known to kbase but are
+ * queued outside the job scheduler.
+ */
+
+ del_timer_sync(&kctx->soft_job_timeout);
+ list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
+ katom = list_entry(entry, struct kbase_jd_atom, queue);
+ kbase_cancel_soft_job(katom);
+ }
+
+
+#ifdef CONFIG_KDS
+
+ /* For each job waiting on a kds resource, cancel the wait and force the job to
+ * complete early, this is done so that we don't leave jobs outstanding waiting
+ * on kds resources which may never be released when contexts are zapped, resulting
+ * in a hang.
+ *
+ * Note that we can safely iterate over the list as the struct kbase_jd_context lock is held,
+ * this prevents items being removed when calling job_done_nolock in kbase_cancel_kds_wait_job.
+ */
+
+ list_for_each(entry, &kctx->waiting_kds_resource) {
+ katom = list_entry(entry, struct kbase_jd_atom, node);
+
+ kbase_cancel_kds_wait_job(katom);
+ }
+#endif
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ kbase_dma_fence_cancel_all_atoms(kctx);
+#endif
+
+ mutex_unlock(&kctx->jctx.lock);
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ /* Flush dma-fence workqueue to ensure that any callbacks that may have
+ * been queued are done before continuing.
+ */
+ flush_workqueue(kctx->dma_fence.wq);
+#endif
+
+ kbase_jm_wait_for_zero_jobs(kctx);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_zap_context);
+
+int kbase_jd_init(struct kbase_context *kctx)
+{
+ int i;
+ int mali_err = 0;
+#ifdef CONFIG_KDS
+ int err;
+#endif /* CONFIG_KDS */
+
+ KBASE_DEBUG_ASSERT(kctx);
+
+ kctx->jctx.job_done_wq = alloc_workqueue("mali_jd",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ if (NULL == kctx->jctx.job_done_wq) {
+ mali_err = -ENOMEM;
+ goto out1;
+ }
+
+ for (i = 0; i < BASE_JD_ATOM_COUNT; i++) {
+ init_waitqueue_head(&kctx->jctx.atoms[i].completed);
+
+ INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[0]);
+ INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[1]);
+
+ /* Catch userspace attempting to use an atom which doesn't exist as a pre-dependency */
+ kctx->jctx.atoms[i].event_code = BASE_JD_EVENT_JOB_INVALID;
+ kctx->jctx.atoms[i].status = KBASE_JD_ATOM_STATE_UNUSED;
+
+#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE)
+ kctx->jctx.atoms[i].dma_fence.context =
+ dma_fence_context_alloc(1);
+ atomic_set(&kctx->jctx.atoms[i].dma_fence.seqno, 0);
+ INIT_LIST_HEAD(&kctx->jctx.atoms[i].dma_fence.callbacks);
+#endif
+ }
+
+ mutex_init(&kctx->jctx.lock);
+
+ init_waitqueue_head(&kctx->jctx.zero_jobs_wait);
+
+ spin_lock_init(&kctx->jctx.tb_lock);
+
+#ifdef CONFIG_KDS
+ err = kds_callback_init(&kctx->jctx.kds_cb, 0, kds_dep_clear);
+ if (0 != err) {
+ mali_err = -EINVAL;
+ goto out2;
+ }
+#endif /* CONFIG_KDS */
+
+ kctx->jctx.job_nr = 0;
+ INIT_LIST_HEAD(&kctx->completed_jobs);
+ atomic_set(&kctx->work_count, 0);
+
+ return 0;
+
+#ifdef CONFIG_KDS
+ out2:
+ destroy_workqueue(kctx->jctx.job_done_wq);
+#endif /* CONFIG_KDS */
+ out1:
+ return mali_err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_init);
+
+void kbase_jd_exit(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx);
+
+#ifdef CONFIG_KDS
+ kds_callback_term(&kctx->jctx.kds_cb);
+#endif /* CONFIG_KDS */
+ /* Work queue is emptied by this */
+ destroy_workqueue(kctx->jctx.job_done_wq);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_exit);
diff --git a/drivers/gpu/arm_gpu/mali_kbase_jd_debugfs.c b/drivers/gpu/arm_gpu/mali_kbase_jd_debugfs.c
new file mode 100644
index 000000000000..c8b37c4e3291
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_jd_debugfs.c
@@ -0,0 +1,235 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+#include <mali_kbase.h>
+#include <mali_kbase_jd_debugfs.h>
+#include <mali_kbase_dma_fence.h>
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#include <mali_kbase_sync.h>
+#endif
+
+struct kbase_jd_debugfs_depinfo {
+ u8 id;
+ char type;
+};
+
+static void kbase_jd_debugfs_fence_info(struct kbase_jd_atom *atom,
+ struct seq_file *sfile)
+{
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ struct kbase_sync_fence_info info;
+ int res;
+
+ switch (atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+ case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+ res = kbase_sync_fence_out_info_get(atom, &info);
+ if (0 == res) {
+ seq_printf(sfile, "Sa([%p]%d) ",
+ info.fence, info.status);
+ break;
+ }
+ case BASE_JD_REQ_SOFT_FENCE_WAIT:
+ res = kbase_sync_fence_in_info_get(atom, &info);
+ if (0 == res) {
+ seq_printf(sfile, "Wa([%p]%d) ",
+ info.fence, info.status);
+ break;
+ }
+ default:
+ break;
+ }
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+ struct kbase_fence_cb *cb;
+
+ if (atom->dma_fence.fence) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence *fence = atom->dma_fence.fence;
+#else
+ struct dma_fence *fence = atom->dma_fence.fence;
+#endif
+
+ seq_printf(sfile,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+ "Sd(%u#%u: %s) ",
+#else
+ "Sd(%llu#%u: %s) ",
+#endif
+ fence->context,
+ fence->seqno,
+ dma_fence_is_signaled(fence) ?
+ "signaled" : "active");
+ }
+
+ list_for_each_entry(cb, &atom->dma_fence.callbacks,
+ node) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence *fence = cb->fence;
+#else
+ struct dma_fence *fence = cb->fence;
+#endif
+
+ seq_printf(sfile,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+ "Wd(%u#%u: %s) ",
+#else
+ "Wd(%llu#%u: %s) ",
+#endif
+ fence->context,
+ fence->seqno,
+ dma_fence_is_signaled(fence) ?
+ "signaled" : "active");
+ }
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+}
+
+static void kbasep_jd_debugfs_atom_deps(
+ struct kbase_jd_debugfs_depinfo *deps,
+ struct kbase_jd_atom *atom)
+{
+ struct kbase_context *kctx = atom->kctx;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ deps[i].id = (unsigned)(atom->dep[i].atom ?
+ kbase_jd_atom_id(kctx, atom->dep[i].atom) : 0);
+
+ switch (atom->dep[i].dep_type) {
+ case BASE_JD_DEP_TYPE_INVALID:
+ deps[i].type = ' ';
+ break;
+ case BASE_JD_DEP_TYPE_DATA:
+ deps[i].type = 'D';
+ break;
+ case BASE_JD_DEP_TYPE_ORDER:
+ deps[i].type = '>';
+ break;
+ default:
+ deps[i].type = '?';
+ break;
+ }
+ }
+}
+/**
+ * kbasep_jd_debugfs_atoms_show - Show callback for the JD atoms debugfs file.
+ * @sfile: The debugfs entry
+ * @data: Data associated with the entry
+ *
+ * This function is called to get the contents of the JD atoms debugfs file.
+ * This is a report of all atoms managed by kbase_jd_context.atoms
+ *
+ * Return: 0 if successfully prints data in debugfs entry file, failure
+ * otherwise
+ */
+static int kbasep_jd_debugfs_atoms_show(struct seq_file *sfile, void *data)
+{
+ struct kbase_context *kctx = sfile->private;
+ struct kbase_jd_atom *atoms;
+ unsigned long irq_flags;
+ int i;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ /* Print version */
+ seq_printf(sfile, "v%u\n", MALI_JD_DEBUGFS_VERSION);
+
+ /* Print U/K API version */
+ seq_printf(sfile, "ukv%u.%u\n", BASE_UK_VERSION_MAJOR,
+ BASE_UK_VERSION_MINOR);
+
+ /* Print table heading */
+ seq_puts(sfile, " ID, Core req, St, CR, Predeps, Start time, Additional info...\n");
+
+ atoms = kctx->jctx.atoms;
+ /* General atom states */
+ mutex_lock(&kctx->jctx.lock);
+ /* JS-related states */
+ spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
+ for (i = 0; i != BASE_JD_ATOM_COUNT; ++i) {
+ struct kbase_jd_atom *atom = &atoms[i];
+ s64 start_timestamp = 0;
+ struct kbase_jd_debugfs_depinfo deps[2];
+
+ if (atom->status == KBASE_JD_ATOM_STATE_UNUSED)
+ continue;
+
+ /* start_timestamp is cleared as soon as the atom leaves UNUSED state
+ * and set before a job is submitted to the h/w, a non-zero value means
+ * it is valid */
+ if (ktime_to_ns(atom->start_timestamp))
+ start_timestamp = ktime_to_ns(
+ ktime_sub(ktime_get(), atom->start_timestamp));
+
+ kbasep_jd_debugfs_atom_deps(deps, atom);
+
+ seq_printf(sfile,
+ "%3u, %8x, %2u, %2u, %c%3u %c%3u, %20lld, ",
+ i, atom->core_req, atom->status,
+ atom->coreref_state,
+ deps[0].type, deps[0].id,
+ deps[1].type, deps[1].id,
+ start_timestamp);
+
+
+ kbase_jd_debugfs_fence_info(atom, sfile);
+
+ seq_puts(sfile, "\n");
+ }
+ spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
+ mutex_unlock(&kctx->jctx.lock);
+
+ return 0;
+}
+
+
+/**
+ * kbasep_jd_debugfs_atoms_open - open operation for atom debugfs file
+ * @in: &struct inode pointer
+ * @file: &struct file pointer
+ *
+ * Return: file descriptor
+ */
+static int kbasep_jd_debugfs_atoms_open(struct inode *in, struct file *file)
+{
+ return single_open(file, kbasep_jd_debugfs_atoms_show, in->i_private);
+}
+
+static const struct file_operations kbasep_jd_debugfs_atoms_fops = {
+ .open = kbasep_jd_debugfs_atoms_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void kbasep_jd_debugfs_ctx_init(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ /* Expose all atoms */
+ debugfs_create_file("atoms", S_IRUGO, kctx->kctx_dentry, kctx,
+ &kbasep_jd_debugfs_atoms_fops);
+
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_jd_debugfs.h b/drivers/gpu/arm_gpu/mali_kbase_jd_debugfs.h
new file mode 100644
index 000000000000..0935f1db7296
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_jd_debugfs.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_jd_debugfs.h
+ * Header file for job dispatcher-related entries in debugfs
+ */
+
+#ifndef _KBASE_JD_DEBUGFS_H
+#define _KBASE_JD_DEBUGFS_H
+
+#include <linux/debugfs.h>
+
+#include <mali_kbase.h>
+
+#define MALI_JD_DEBUGFS_VERSION 2
+
+/**
+ * kbasep_jd_debugfs_ctx_init() - Add debugfs entries for JD system
+ *
+ * @kctx Pointer to kbase_context
+ */
+void kbasep_jd_debugfs_ctx_init(struct kbase_context *kctx);
+
+#endif /*_KBASE_JD_DEBUGFS_H*/
diff --git a/drivers/gpu/arm_gpu/mali_kbase_jm.c b/drivers/gpu/arm_gpu/mali_kbase_jm.c
new file mode 100644
index 000000000000..0c5c6a6f78cb
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_jm.c
@@ -0,0 +1,131 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * HW access job manager common APIs
+ */
+
+#include <mali_kbase.h>
+#include "mali_kbase_hwaccess_jm.h"
+#include "mali_kbase_jm.h"
+
+/**
+ * kbase_jm_next_job() - Attempt to run the next @nr_jobs_to_submit jobs on slot
+ * @js on the active context.
+ * @kbdev: Device pointer
+ * @js: Job slot to run on
+ * @nr_jobs_to_submit: Number of jobs to attempt to submit
+ *
+ * Return: true if slot can still be submitted on, false if slot is now full.
+ */
+static bool kbase_jm_next_job(struct kbase_device *kbdev, int js,
+ int nr_jobs_to_submit)
+{
+ struct kbase_context *kctx;
+ int i;
+
+ kctx = kbdev->hwaccess.active_kctx;
+
+ if (!kctx)
+ return true;
+
+ for (i = 0; i < nr_jobs_to_submit; i++) {
+ struct kbase_jd_atom *katom = kbase_js_pull(kctx, js);
+
+ if (!katom)
+ return true; /* Context has no jobs on this slot */
+
+ kbase_backend_run_atom(kbdev, katom);
+ }
+
+ return false; /* Slot ringbuffer should now be full */
+}
+
+u32 kbase_jm_kick(struct kbase_device *kbdev, u32 js_mask)
+{
+ u32 ret_mask = 0;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ while (js_mask) {
+ int js = ffs(js_mask) - 1;
+ int nr_jobs_to_submit = kbase_backend_slot_free(kbdev, js);
+
+ if (kbase_jm_next_job(kbdev, js, nr_jobs_to_submit))
+ ret_mask |= (1 << js);
+
+ js_mask &= ~(1 << js);
+ }
+
+ return ret_mask;
+}
+
+void kbase_jm_try_kick(struct kbase_device *kbdev, u32 js_mask)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (!down_trylock(&js_devdata->schedule_sem)) {
+ kbase_jm_kick(kbdev, js_mask);
+ up(&js_devdata->schedule_sem);
+ }
+}
+
+void kbase_jm_try_kick_all(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (!down_trylock(&js_devdata->schedule_sem)) {
+ kbase_jm_kick_all(kbdev);
+ up(&js_devdata->schedule_sem);
+ }
+}
+
+void kbase_jm_idle_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (kbdev->hwaccess.active_kctx == kctx)
+ kbdev->hwaccess.active_kctx = NULL;
+}
+
+struct kbase_jd_atom *kbase_jm_return_atom_to_js(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (katom->event_code != BASE_JD_EVENT_STOPPED &&
+ katom->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT) {
+ return kbase_js_complete_atom(katom, NULL);
+ } else {
+ kbase_js_unpull(katom->kctx, katom);
+ return NULL;
+ }
+}
+
+struct kbase_jd_atom *kbase_jm_complete(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom, ktime_t *end_timestamp)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ return kbase_js_complete_atom(katom, end_timestamp);
+}
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_jm.h b/drivers/gpu/arm_gpu/mali_kbase_jm.h
new file mode 100644
index 000000000000..a74ee24c8058
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_jm.h
@@ -0,0 +1,110 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Job manager common APIs
+ */
+
+#ifndef _KBASE_JM_H_
+#define _KBASE_JM_H_
+
+/**
+ * kbase_jm_kick() - Indicate that there are jobs ready to run.
+ * @kbdev: Device pointer
+ * @js_mask: Mask of the job slots that can be pulled from.
+ *
+ * Caller must hold the hwaccess_lock and schedule_sem semaphore
+ *
+ * Return: Mask of the job slots that can still be submitted to.
+ */
+u32 kbase_jm_kick(struct kbase_device *kbdev, u32 js_mask);
+
+/**
+ * kbase_jm_kick_all() - Indicate that there are jobs ready to run on all job
+ * slots.
+ * @kbdev: Device pointer
+ *
+ * Caller must hold the hwaccess_lock and schedule_sem semaphore
+ *
+ * Return: Mask of the job slots that can still be submitted to.
+ */
+static inline u32 kbase_jm_kick_all(struct kbase_device *kbdev)
+{
+ return kbase_jm_kick(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1);
+}
+
+/**
+ * kbase_jm_try_kick - Attempt to call kbase_jm_kick
+ * @kbdev: Device pointer
+ * @js_mask: Mask of the job slots that can be pulled from
+ * Context: Caller must hold hwaccess_lock
+ *
+ * If schedule_sem can be immediately obtained then this function will call
+ * kbase_jm_kick() otherwise it will do nothing.
+ */
+void kbase_jm_try_kick(struct kbase_device *kbdev, u32 js_mask);
+
+/**
+ * kbase_jm_try_kick_all() - Attempt to call kbase_jm_kick_all
+ * @kbdev: Device pointer
+ * Context: Caller must hold hwaccess_lock
+ *
+ * If schedule_sem can be immediately obtained then this function will call
+ * kbase_jm_kick_all() otherwise it will do nothing.
+ */
+void kbase_jm_try_kick_all(struct kbase_device *kbdev);
+
+/**
+ * kbase_jm_idle_ctx() - Mark a context as idle.
+ * @kbdev: Device pointer
+ * @kctx: Context to mark as idle
+ *
+ * No more atoms will be pulled from this context until it is marked as active
+ * by kbase_js_use_ctx().
+ *
+ * The context should have no atoms currently pulled from it
+ * (kctx->atoms_pulled == 0).
+ *
+ * Caller must hold the hwaccess_lock
+ */
+void kbase_jm_idle_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * kbase_jm_return_atom_to_js() - Return an atom to the job scheduler that has
+ * been soft-stopped or will fail due to a
+ * dependency
+ * @kbdev: Device pointer
+ * @katom: Atom that has been stopped or will be failed
+ *
+ * Return: Atom that has now been unblocked and can now be run, or NULL if none
+ */
+struct kbase_jd_atom *kbase_jm_return_atom_to_js(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+
+/**
+ * kbase_jm_complete() - Complete an atom
+ * @kbdev: Device pointer
+ * @katom: Atom that has completed
+ * @end_timestamp: Timestamp of atom completion
+ *
+ * Return: Atom that has now been unblocked and can now be run, or NULL if none
+ */
+struct kbase_jd_atom *kbase_jm_complete(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom, ktime_t *end_timestamp);
+
+#endif /* _KBASE_JM_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_js.c b/drivers/gpu/arm_gpu/mali_kbase_js.c
new file mode 100644
index 000000000000..2be32d6da788
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_js.c
@@ -0,0 +1,2819 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Job Scheduler Implementation
+ */
+#include <mali_kbase.h>
+#include <mali_kbase_js.h>
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include <mali_kbase_gator.h>
+#endif
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_ctx_sched.h>
+
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config_defaults.h>
+
+#include "mali_kbase_jm.h"
+#include "mali_kbase_hwaccess_jm.h"
+
+/*
+ * Private types
+ */
+
+/* Bitpattern indicating the result of releasing a context */
+enum {
+ /* The context was descheduled - caller should try scheduling in a new
+ * one to keep the runpool full */
+ KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED = (1u << 0),
+ /* Ctx attributes were changed - caller should try scheduling all
+ * contexts */
+ KBASEP_JS_RELEASE_RESULT_SCHED_ALL = (1u << 1)
+};
+
+typedef u32 kbasep_js_release_result;
+
+const int kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS] = {
+ KBASE_JS_ATOM_SCHED_PRIO_MED, /* BASE_JD_PRIO_MEDIUM */
+ KBASE_JS_ATOM_SCHED_PRIO_HIGH, /* BASE_JD_PRIO_HIGH */
+ KBASE_JS_ATOM_SCHED_PRIO_LOW /* BASE_JD_PRIO_LOW */
+};
+
+const base_jd_prio
+kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT] = {
+ BASE_JD_PRIO_HIGH, /* KBASE_JS_ATOM_SCHED_PRIO_HIGH */
+ BASE_JD_PRIO_MEDIUM, /* KBASE_JS_ATOM_SCHED_PRIO_MED */
+ BASE_JD_PRIO_LOW /* KBASE_JS_ATOM_SCHED_PRIO_LOW */
+};
+
+
+/*
+ * Private function prototypes
+ */
+static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
+ struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbasep_js_atom_retained_state *katom_retained_state);
+
+static int kbase_js_get_slot(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+
+static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
+ kbasep_js_ctx_job_cb callback);
+
+/* Helper for trace subcodes */
+#if KBASE_TRACE_ENABLE
+static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ return atomic_read(&kctx->refcount);
+}
+#else /* KBASE_TRACE_ENABLE */
+static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ CSTD_UNUSED(kbdev);
+ CSTD_UNUSED(kctx);
+ return 0;
+}
+#endif /* KBASE_TRACE_ENABLE */
+
+/*
+ * Private functions
+ */
+
+/**
+ * core_reqs_from_jsn_features - Convert JSn_FEATURES to core requirements
+ * @features: JSn_FEATURE register value
+ *
+ * Given a JSn_FEATURE register value returns the core requirements that match
+ *
+ * Return: Core requirement bit mask
+ */
+static base_jd_core_req core_reqs_from_jsn_features(u16 features)
+{
+ base_jd_core_req core_req = 0u;
+
+ if ((features & JS_FEATURE_SET_VALUE_JOB) != 0)
+ core_req |= BASE_JD_REQ_V;
+
+ if ((features & JS_FEATURE_CACHE_FLUSH_JOB) != 0)
+ core_req |= BASE_JD_REQ_CF;
+
+ if ((features & JS_FEATURE_COMPUTE_JOB) != 0)
+ core_req |= BASE_JD_REQ_CS;
+
+ if ((features & JS_FEATURE_TILER_JOB) != 0)
+ core_req |= BASE_JD_REQ_T;
+
+ if ((features & JS_FEATURE_FRAGMENT_JOB) != 0)
+ core_req |= BASE_JD_REQ_FS;
+
+ return core_req;
+}
+
+static void kbase_js_sync_timers(struct kbase_device *kbdev)
+{
+ mutex_lock(&kbdev->js_data.runpool_mutex);
+ kbase_backend_ctx_count_changed(kbdev);
+ mutex_unlock(&kbdev->js_data.runpool_mutex);
+}
+
+/* Hold the mmu_hw_mutex and hwaccess_lock for this */
+bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ bool result = false;
+ int as_nr;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ as_nr = kctx->as_nr;
+ if (atomic_read(&kctx->refcount) > 0) {
+ KBASE_DEBUG_ASSERT(as_nr >= 0);
+
+ kbase_ctx_sched_retain_ctx_refcount(kctx);
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RETAIN_CTX_NOLOCK, kctx,
+ NULL, 0u, atomic_read(&kctx->refcount));
+ result = true;
+ }
+
+ return result;
+}
+
+/**
+ * jsctx_rb_none_to_pull_prio(): - Check if there are no pullable atoms
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js: Job slot id to check.
+ * @prio: Priority to check.
+ *
+ * Return true if there are no atoms to pull. There may be running atoms in the
+ * ring buffer even if there are no atoms to pull. It is also possible for the
+ * ring buffer to be full (with running atoms) when this functions returns
+ * true.
+ *
+ * Return: true if there are no atoms to pull, false otherwise.
+ */
+static inline bool
+jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio)
+{
+ struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ return RB_EMPTY_ROOT(&rb->runnable_tree);
+}
+
+/**
+ * jsctx_rb_none_to_pull(): - Check if all priority ring buffers have no
+ * pullable atoms
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js: Job slot id to check.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if the ring buffers for all priorities have no pullable atoms,
+ * false otherwise.
+ */
+static inline bool
+jsctx_rb_none_to_pull(struct kbase_context *kctx, int js)
+{
+ int prio;
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+ if (!jsctx_rb_none_to_pull_prio(kctx, js, prio))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * jsctx_queue_foreach_prio(): - Execute callback for each entry in the queue.
+ * @kctx: Pointer to kbase context with the queue.
+ * @js: Job slot id to iterate.
+ * @prio: Priority id to iterate.
+ * @callback: Function pointer to callback.
+ *
+ * Iterate over a queue and invoke @callback for each entry in the queue, and
+ * remove the entry from the queue.
+ *
+ * If entries are added to the queue while this is running those entries may, or
+ * may not be covered. To ensure that all entries in the buffer have been
+ * enumerated when this function returns jsctx->lock must be held when calling
+ * this function.
+ *
+ * The HW access lock must always be held when calling this function.
+ */
+static void
+jsctx_queue_foreach_prio(struct kbase_context *kctx, int js, int prio,
+ kbasep_js_ctx_job_cb callback)
+{
+ struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ while (!RB_EMPTY_ROOT(&queue->runnable_tree)) {
+ struct rb_node *node = rb_first(&queue->runnable_tree);
+ struct kbase_jd_atom *entry = rb_entry(node,
+ struct kbase_jd_atom, runnable_tree_node);
+
+ rb_erase(node, &queue->runnable_tree);
+ callback(kctx->kbdev, entry);
+ }
+
+ while (!list_empty(&queue->x_dep_head)) {
+ struct kbase_jd_atom *entry = list_entry(queue->x_dep_head.next,
+ struct kbase_jd_atom, queue);
+
+ list_del(queue->x_dep_head.next);
+
+ callback(kctx->kbdev, entry);
+ }
+}
+
+/**
+ * jsctx_queue_foreach(): - Execute callback for each entry in every queue
+ * @kctx: Pointer to kbase context with queue.
+ * @js: Job slot id to iterate.
+ * @callback: Function pointer to callback.
+ *
+ * Iterate over all the different priorities, and for each call
+ * jsctx_queue_foreach_prio() to iterate over the queue and invoke @callback
+ * for each entry, and remove the entry from the queue.
+ */
+static inline void
+jsctx_queue_foreach(struct kbase_context *kctx, int js,
+ kbasep_js_ctx_job_cb callback)
+{
+ int prio;
+
+ for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++)
+ jsctx_queue_foreach_prio(kctx, js, prio, callback);
+}
+
+/**
+ * jsctx_rb_peek_prio(): - Check buffer and get next atom
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js: Job slot id to check.
+ * @prio: Priority id to check.
+ *
+ * Check the ring buffer for the specified @js and @prio and return a pointer to
+ * the next atom, unless the ring buffer is empty.
+ *
+ * Return: Pointer to next atom in buffer, or NULL if there is no atom.
+ */
+static inline struct kbase_jd_atom *
+jsctx_rb_peek_prio(struct kbase_context *kctx, int js, int prio)
+{
+ struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
+ struct rb_node *node;
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ node = rb_first(&rb->runnable_tree);
+ if (!node)
+ return NULL;
+
+ return rb_entry(node, struct kbase_jd_atom, runnable_tree_node);
+}
+
+/**
+ * jsctx_rb_peek(): - Check all priority buffers and get next atom
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js: Job slot id to check.
+ *
+ * Check the ring buffers for all priorities, starting from
+ * KBASE_JS_ATOM_SCHED_PRIO_HIGH, for the specified @js and @prio and return a
+ * pointer to the next atom, unless all the priority's ring buffers are empty.
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * Return: Pointer to next atom in buffer, or NULL if there is no atom.
+ */
+static inline struct kbase_jd_atom *
+jsctx_rb_peek(struct kbase_context *kctx, int js)
+{
+ int prio;
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+ struct kbase_jd_atom *katom;
+
+ katom = jsctx_rb_peek_prio(kctx, js, prio);
+ if (katom)
+ return katom;
+ }
+
+ return NULL;
+}
+
+/**
+ * jsctx_rb_pull(): - Mark atom in list as running
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @katom: Pointer to katom to pull.
+ *
+ * Mark an atom previously obtained from jsctx_rb_peek() as running.
+ *
+ * @katom must currently be at the head of the ring buffer.
+ */
+static inline void
+jsctx_rb_pull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ int prio = katom->sched_priority;
+ int js = katom->slot_nr;
+ struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ /* Atoms must be pulled in the correct order. */
+ WARN_ON(katom != jsctx_rb_peek_prio(kctx, js, prio));
+
+ rb_erase(&katom->runnable_tree_node, &rb->runnable_tree);
+}
+
+#define LESS_THAN_WRAP(a, b) ((s32)(a - b) < 0)
+
+static void
+jsctx_tree_add(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ int prio = katom->sched_priority;
+ int js = katom->slot_nr;
+ struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+ struct rb_node **new = &(queue->runnable_tree.rb_node), *parent = NULL;
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ while (*new) {
+ struct kbase_jd_atom *entry = container_of(*new,
+ struct kbase_jd_atom, runnable_tree_node);
+
+ parent = *new;
+ if (LESS_THAN_WRAP(katom->age, entry->age))
+ new = &((*new)->rb_left);
+ else
+ new = &((*new)->rb_right);
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&katom->runnable_tree_node, parent, new);
+ rb_insert_color(&katom->runnable_tree_node, &queue->runnable_tree);
+}
+
+/**
+ * jsctx_rb_unpull(): - Undo marking of atom in list as running
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @katom: Pointer to katom to unpull.
+ *
+ * Undo jsctx_rb_pull() and put @katom back in the queue.
+ *
+ * jsctx_rb_unpull() must be called on atoms in the same order the atoms were
+ * pulled.
+ */
+static inline void
+jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ jsctx_tree_add(kctx, katom);
+}
+
+static bool kbase_js_ctx_pullable(struct kbase_context *kctx,
+ int js,
+ bool is_scheduled);
+static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js);
+static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js);
+
+/*
+ * Functions private to KBase ('Protected' functions)
+ */
+int kbasep_js_devdata_init(struct kbase_device * const kbdev)
+{
+ struct kbasep_js_device_data *jsdd;
+ int i;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ jsdd = &kbdev->js_data;
+
+#ifdef CONFIG_MALI_DEBUG
+ /* Soft-stop will be disabled on a single context by default unless
+ * softstop_always is set */
+ jsdd->softstop_always = false;
+#endif /* CONFIG_MALI_DEBUG */
+ jsdd->nr_all_contexts_running = 0;
+ jsdd->nr_user_contexts_running = 0;
+ jsdd->nr_contexts_pullable = 0;
+ atomic_set(&jsdd->nr_contexts_runnable, 0);
+ /* No ctx allowed to submit */
+ jsdd->runpool_irq.submit_allowed = 0u;
+ memset(jsdd->runpool_irq.ctx_attr_ref_count, 0,
+ sizeof(jsdd->runpool_irq.ctx_attr_ref_count));
+ memset(jsdd->runpool_irq.slot_affinities, 0,
+ sizeof(jsdd->runpool_irq.slot_affinities));
+ memset(jsdd->runpool_irq.slot_affinity_refcount, 0,
+ sizeof(jsdd->runpool_irq.slot_affinity_refcount));
+ INIT_LIST_HEAD(&jsdd->suspended_soft_jobs_list);
+
+ /* Config attributes */
+ jsdd->scheduling_period_ns = DEFAULT_JS_SCHEDULING_PERIOD_NS;
+ jsdd->soft_stop_ticks = DEFAULT_JS_SOFT_STOP_TICKS;
+ jsdd->soft_stop_ticks_cl = DEFAULT_JS_SOFT_STOP_TICKS_CL;
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
+ jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS_8408;
+ else
+ jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS;
+ jsdd->hard_stop_ticks_cl = DEFAULT_JS_HARD_STOP_TICKS_CL;
+ jsdd->hard_stop_ticks_dumping = DEFAULT_JS_HARD_STOP_TICKS_DUMPING;
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
+ jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS_8408;
+ else
+ jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS;
+ jsdd->gpu_reset_ticks_cl = DEFAULT_JS_RESET_TICKS_CL;
+ jsdd->gpu_reset_ticks_dumping = DEFAULT_JS_RESET_TICKS_DUMPING;
+ jsdd->ctx_timeslice_ns = DEFAULT_JS_CTX_TIMESLICE_NS;
+ atomic_set(&jsdd->soft_job_timeout_ms, DEFAULT_JS_SOFT_JOB_TIMEOUT);
+
+ dev_dbg(kbdev->dev, "JS Config Attribs: ");
+ dev_dbg(kbdev->dev, "\tscheduling_period_ns:%u",
+ jsdd->scheduling_period_ns);
+ dev_dbg(kbdev->dev, "\tsoft_stop_ticks:%u",
+ jsdd->soft_stop_ticks);
+ dev_dbg(kbdev->dev, "\tsoft_stop_ticks_cl:%u",
+ jsdd->soft_stop_ticks_cl);
+ dev_dbg(kbdev->dev, "\thard_stop_ticks_ss:%u",
+ jsdd->hard_stop_ticks_ss);
+ dev_dbg(kbdev->dev, "\thard_stop_ticks_cl:%u",
+ jsdd->hard_stop_ticks_cl);
+ dev_dbg(kbdev->dev, "\thard_stop_ticks_dumping:%u",
+ jsdd->hard_stop_ticks_dumping);
+ dev_dbg(kbdev->dev, "\tgpu_reset_ticks_ss:%u",
+ jsdd->gpu_reset_ticks_ss);
+ dev_dbg(kbdev->dev, "\tgpu_reset_ticks_cl:%u",
+ jsdd->gpu_reset_ticks_cl);
+ dev_dbg(kbdev->dev, "\tgpu_reset_ticks_dumping:%u",
+ jsdd->gpu_reset_ticks_dumping);
+ dev_dbg(kbdev->dev, "\tctx_timeslice_ns:%u",
+ jsdd->ctx_timeslice_ns);
+ dev_dbg(kbdev->dev, "\tsoft_job_timeout:%i",
+ atomic_read(&jsdd->soft_job_timeout_ms));
+
+ if (!(jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_ss &&
+ jsdd->hard_stop_ticks_ss < jsdd->gpu_reset_ticks_ss &&
+ jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_dumping &&
+ jsdd->hard_stop_ticks_dumping <
+ jsdd->gpu_reset_ticks_dumping)) {
+ dev_err(kbdev->dev, "Job scheduler timeouts invalid; soft/hard/reset tick counts should be in increasing order\n");
+ return -EINVAL;
+ }
+
+#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS
+ dev_dbg(kbdev->dev, "Job Scheduling Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.",
+ jsdd->soft_stop_ticks,
+ jsdd->scheduling_period_ns);
+#endif
+#if KBASE_DISABLE_SCHEDULING_HARD_STOPS
+ dev_dbg(kbdev->dev, "Job Scheduling Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_dumping==%u at %uns per tick. Other hard-stops may still occur.",
+ jsdd->hard_stop_ticks_ss,
+ jsdd->hard_stop_ticks_dumping,
+ jsdd->scheduling_period_ns);
+#endif
+#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS && KBASE_DISABLE_SCHEDULING_HARD_STOPS
+ dev_dbg(kbdev->dev, "Note: The JS tick timer (if coded) will still be run, but do nothing.");
+#endif
+
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i)
+ jsdd->js_reqs[i] = core_reqs_from_jsn_features(
+ kbdev->gpu_props.props.raw_props.js_features[i]);
+
+ /* On error, we could continue on: providing none of the below resources
+ * rely on the ones above */
+
+ mutex_init(&jsdd->runpool_mutex);
+ mutex_init(&jsdd->queue_mutex);
+ spin_lock_init(&kbdev->hwaccess_lock);
+ sema_init(&jsdd->schedule_sem, 1);
+
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) {
+ INIT_LIST_HEAD(&jsdd->ctx_list_pullable[i]);
+ INIT_LIST_HEAD(&jsdd->ctx_list_unpullable[i]);
+ }
+
+ return 0;
+}
+
+void kbasep_js_devdata_halt(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+void kbasep_js_devdata_term(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata;
+ s8 zero_ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT] = { 0, };
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ js_devdata = &kbdev->js_data;
+
+ /* The caller must de-register all contexts before calling this
+ */
+ KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running == 0);
+ KBASE_DEBUG_ASSERT(memcmp(
+ js_devdata->runpool_irq.ctx_attr_ref_count,
+ zero_ctx_attr_ref_count,
+ sizeof(zero_ctx_attr_ref_count)) == 0);
+ CSTD_UNUSED(zero_ctx_attr_ref_count);
+}
+
+int kbasep_js_kctx_init(struct kbase_context * const kctx)
+{
+ struct kbase_device *kbdev;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ int i, j;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
+ INIT_LIST_HEAD(&kctx->jctx.sched_info.ctx.ctx_list_entry[i]);
+
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ js_kctx_info->ctx.nr_jobs = 0;
+ kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
+ kbase_ctx_flag_clear(kctx, KCTX_DYING);
+ memset(js_kctx_info->ctx.ctx_attr_ref_count, 0,
+ sizeof(js_kctx_info->ctx.ctx_attr_ref_count));
+
+ /* Initially, the context is disabled from submission until the create
+ * flags are set */
+ kbase_ctx_flag_set(kctx, KCTX_SUBMIT_DISABLED);
+
+ /* On error, we could continue on: providing none of the below resources
+ * rely on the ones above */
+ mutex_init(&js_kctx_info->ctx.jsctx_mutex);
+
+ init_waitqueue_head(&js_kctx_info->ctx.is_scheduled_wait);
+
+ for (i = 0; i < KBASE_JS_ATOM_SCHED_PRIO_COUNT; i++) {
+ for (j = 0; j < BASE_JM_MAX_NR_SLOTS; j++) {
+ INIT_LIST_HEAD(&kctx->jsctx_queue[i][j].x_dep_head);
+ kctx->jsctx_queue[i][j].runnable_tree = RB_ROOT;
+ }
+ }
+
+ return 0;
+}
+
+void kbasep_js_kctx_term(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ int js;
+ bool update_ctx_count = false;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* The caller must de-register all jobs before calling this */
+ KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs == 0);
+
+ mutex_lock(&kbdev->js_data.queue_mutex);
+ mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
+ list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+ if (kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)) {
+ WARN_ON(atomic_read(&kbdev->js_data.nr_contexts_runnable) <= 0);
+ atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+ update_ctx_count = true;
+ kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+ }
+
+ mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ mutex_unlock(&kbdev->js_data.queue_mutex);
+
+ if (update_ctx_count) {
+ mutex_lock(&kbdev->js_data.runpool_mutex);
+ kbase_backend_ctx_count_changed(kbdev);
+ mutex_unlock(&kbdev->js_data.runpool_mutex);
+ }
+}
+
+/**
+ * kbase_js_ctx_list_add_pullable_nolock - Variant of
+ * kbase_jd_ctx_list_add_pullable()
+ * where the caller must hold
+ * hwaccess_lock
+ * @kbdev: Device pointer
+ * @kctx: Context to add to queue
+ * @js: Job slot to use
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js)
+{
+ bool ret = false;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
+ list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+ list_add_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+ &kbdev->js_data.ctx_list_pullable[js]);
+
+ if (!kctx->slots_pullable) {
+ kbdev->js_data.nr_contexts_pullable++;
+ ret = true;
+ if (!atomic_read(&kctx->atoms_pulled)) {
+ WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
+ atomic_inc(&kbdev->js_data.nr_contexts_runnable);
+ }
+ }
+ kctx->slots_pullable |= (1 << js);
+
+ return ret;
+}
+
+/**
+ * kbase_js_ctx_list_add_pullable_head_nolock - Variant of
+ * kbase_js_ctx_list_add_pullable_head()
+ * where the caller must hold
+ * hwaccess_lock
+ * @kbdev: Device pointer
+ * @kctx: Context to add to queue
+ * @js: Job slot to use
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_pullable_head_nolock(
+ struct kbase_device *kbdev, struct kbase_context *kctx, int js)
+{
+ bool ret = false;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
+ list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+ list_add(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+ &kbdev->js_data.ctx_list_pullable[js]);
+
+ if (!kctx->slots_pullable) {
+ kbdev->js_data.nr_contexts_pullable++;
+ ret = true;
+ if (!atomic_read(&kctx->atoms_pulled)) {
+ WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
+ atomic_inc(&kbdev->js_data.nr_contexts_runnable);
+ }
+ }
+ kctx->slots_pullable |= (1 << js);
+
+ return ret;
+}
+
+/**
+ * kbase_js_ctx_list_add_pullable_head - Add context to the head of the
+ * per-slot pullable context queue
+ * @kbdev: Device pointer
+ * @kctx: Context to add to queue
+ * @js: Job slot to use
+ *
+ * If the context is on either the pullable or unpullable queues, then it is
+ * removed before being added to the head.
+ *
+ * This function should be used when a context has been scheduled, but no jobs
+ * can currently be pulled from it.
+ *
+ * Return: true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ ret = kbase_js_ctx_list_add_pullable_head_nolock(kbdev, kctx, js);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return ret;
+}
+
+/**
+ * kbase_js_ctx_list_add_unpullable_nolock - Add context to the tail of the
+ * per-slot unpullable context queue
+ * @kbdev: Device pointer
+ * @kctx: Context to add to queue
+ * @js: Job slot to use
+ *
+ * The context must already be on the per-slot pullable queue. It will be
+ * removed from the pullable queue before being added to the unpullable queue.
+ *
+ * This function should be used when a context has been pulled from, and there
+ * are no jobs remaining on the specified slot.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js)
+{
+ bool ret = false;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+ &kbdev->js_data.ctx_list_unpullable[js]);
+
+ if (kctx->slots_pullable == (1 << js)) {
+ kbdev->js_data.nr_contexts_pullable--;
+ ret = true;
+ if (!atomic_read(&kctx->atoms_pulled)) {
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+ atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+ }
+ }
+ kctx->slots_pullable &= ~(1 << js);
+
+ return ret;
+}
+
+/**
+ * kbase_js_ctx_list_remove_nolock - Remove context from the per-slot pullable
+ * or unpullable context queues
+ * @kbdev: Device pointer
+ * @kctx: Context to remove from queue
+ * @js: Job slot to use
+ *
+ * The context must already be on one of the queues.
+ *
+ * This function should be used when a context has no jobs on the GPU, and no
+ * jobs remaining for the specified slot.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_remove_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js)
+{
+ bool ret = false;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ WARN_ON(list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]));
+
+ list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+ if (kctx->slots_pullable == (1 << js)) {
+ kbdev->js_data.nr_contexts_pullable--;
+ ret = true;
+ if (!atomic_read(&kctx->atoms_pulled)) {
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+ atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+ }
+ }
+ kctx->slots_pullable &= ~(1 << js);
+
+ return ret;
+}
+
+/**
+ * kbase_js_ctx_list_pop_head_nolock - Variant of kbase_js_ctx_list_pop_head()
+ * where the caller must hold
+ * hwaccess_lock
+ * @kbdev: Device pointer
+ * @js: Job slot to use
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: Context to use for specified slot.
+ * NULL if no contexts present for specified slot
+ */
+static struct kbase_context *kbase_js_ctx_list_pop_head_nolock(
+ struct kbase_device *kbdev,
+ int js)
+{
+ struct kbase_context *kctx;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (list_empty(&kbdev->js_data.ctx_list_pullable[js]))
+ return NULL;
+
+ kctx = list_entry(kbdev->js_data.ctx_list_pullable[js].next,
+ struct kbase_context,
+ jctx.sched_info.ctx.ctx_list_entry[js]);
+
+ list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+ return kctx;
+}
+
+/**
+ * kbase_js_ctx_list_pop_head - Pop the head context off the per-slot pullable
+ * queue.
+ * @kbdev: Device pointer
+ * @js: Job slot to use
+ *
+ * Return: Context to use for specified slot.
+ * NULL if no contexts present for specified slot
+ */
+static struct kbase_context *kbase_js_ctx_list_pop_head(
+ struct kbase_device *kbdev, int js)
+{
+ struct kbase_context *kctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kctx = kbase_js_ctx_list_pop_head_nolock(kbdev, js);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return kctx;
+}
+
+/**
+ * kbase_js_ctx_pullable - Return if a context can be pulled from on the
+ * specified slot
+ * @kctx: Context pointer
+ * @js: Job slot to use
+ * @is_scheduled: true if the context is currently scheduled
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if context can be pulled from on specified slot
+ * false otherwise
+ */
+static bool kbase_js_ctx_pullable(struct kbase_context *kctx, int js,
+ bool is_scheduled)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbase_jd_atom *katom;
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ js_devdata = &kctx->kbdev->js_data;
+
+ if (is_scheduled) {
+ if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
+ return false;
+ }
+ katom = jsctx_rb_peek(kctx, js);
+ if (!katom)
+ return false; /* No pullable atoms */
+ if (kctx->blocked_js[js][katom->sched_priority])
+ return false;
+ if (atomic_read(&katom->blocked))
+ return false; /* next atom blocked */
+ if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) {
+ if (katom->x_pre_dep->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB ||
+ katom->x_pre_dep->will_fail_event_code)
+ return false;
+ if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
+ kbase_backend_nr_atoms_on_slot(kctx->kbdev, js))
+ return false;
+ }
+
+ return true;
+}
+
+static bool kbase_js_dep_validate(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ bool ret = true;
+ bool has_dep = false, has_x_dep = false;
+ int js = kbase_js_get_slot(kbdev, katom);
+ int prio = katom->sched_priority;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
+
+ if (dep_atom) {
+ int dep_js = kbase_js_get_slot(kbdev, dep_atom);
+ int dep_prio = dep_atom->sched_priority;
+
+ /* Dependent atom must already have been submitted */
+ if (!(dep_atom->atom_flags &
+ KBASE_KATOM_FLAG_JSCTX_IN_TREE)) {
+ ret = false;
+ break;
+ }
+
+ /* Dependencies with different priorities can't
+ be represented in the ringbuffer */
+ if (prio != dep_prio) {
+ ret = false;
+ break;
+ }
+
+ if (js == dep_js) {
+ /* Only one same-slot dependency can be
+ * represented in the ringbuffer */
+ if (has_dep) {
+ ret = false;
+ break;
+ }
+ /* Each dependee atom can only have one
+ * same-slot dependency */
+ if (dep_atom->post_dep) {
+ ret = false;
+ break;
+ }
+ has_dep = true;
+ } else {
+ /* Only one cross-slot dependency can be
+ * represented in the ringbuffer */
+ if (has_x_dep) {
+ ret = false;
+ break;
+ }
+ /* Each dependee atom can only have one
+ * cross-slot dependency */
+ if (dep_atom->x_post_dep) {
+ ret = false;
+ break;
+ }
+ /* The dependee atom can not already be in the
+ * HW access ringbuffer */
+ if (dep_atom->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ ret = false;
+ break;
+ }
+ /* The dependee atom can not already have
+ * completed */
+ if (dep_atom->status !=
+ KBASE_JD_ATOM_STATE_IN_JS) {
+ ret = false;
+ break;
+ }
+ /* Cross-slot dependencies must not violate
+ * PRLAM-8987 affinity restrictions */
+ if (kbase_hw_has_issue(kbdev,
+ BASE_HW_ISSUE_8987) &&
+ (js == 2 || dep_js == 2)) {
+ ret = false;
+ break;
+ }
+ has_x_dep = true;
+ }
+
+ /* Dependency can be represented in ringbuffers */
+ }
+ }
+
+ /* If dependencies can be represented by ringbuffer then clear them from
+ * atom structure */
+ if (ret) {
+ for (i = 0; i < 2; i++) {
+ struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
+
+ if (dep_atom) {
+ int dep_js = kbase_js_get_slot(kbdev, dep_atom);
+
+ if ((js != dep_js) &&
+ (dep_atom->status !=
+ KBASE_JD_ATOM_STATE_COMPLETED)
+ && (dep_atom->status !=
+ KBASE_JD_ATOM_STATE_HW_COMPLETED)
+ && (dep_atom->status !=
+ KBASE_JD_ATOM_STATE_UNUSED)) {
+
+ katom->atom_flags |=
+ KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+ katom->x_pre_dep = dep_atom;
+ dep_atom->x_post_dep = katom;
+ if (kbase_jd_katom_dep_type(
+ &katom->dep[i]) ==
+ BASE_JD_DEP_TYPE_DATA)
+ katom->atom_flags |=
+ KBASE_KATOM_FLAG_FAIL_BLOCKER;
+ }
+ if ((kbase_jd_katom_dep_type(&katom->dep[i])
+ == BASE_JD_DEP_TYPE_DATA) &&
+ (js == dep_js)) {
+ katom->pre_dep = dep_atom;
+ dep_atom->post_dep = katom;
+ }
+
+ list_del(&katom->dep_item[i]);
+ kbase_jd_katom_dep_clear(&katom->dep[i]);
+ }
+ }
+ }
+
+ return ret;
+}
+
+bool kbasep_js_add_job(struct kbase_context *kctx,
+ struct kbase_jd_atom *atom)
+{
+ unsigned long flags;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ struct kbase_device *kbdev;
+ struct kbasep_js_device_data *js_devdata;
+
+ bool enqueue_required = false;
+ bool timer_sync = false;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(atom != NULL);
+ lockdep_assert_held(&kctx->jctx.lock);
+
+ kbdev = kctx->kbdev;
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ mutex_lock(&js_devdata->queue_mutex);
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /*
+ * Begin Runpool transaction
+ */
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ /* Refcount ctx.nr_jobs */
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs < U32_MAX);
+ ++(js_kctx_info->ctx.nr_jobs);
+
+ /* Setup any scheduling information */
+ kbasep_js_clear_job_retry_submit(atom);
+
+ /* Lock for state available during IRQ */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ if (!kbase_js_dep_validate(kctx, atom)) {
+ /* Dependencies could not be represented */
+ --(js_kctx_info->ctx.nr_jobs);
+
+ /* Setting atom status back to queued as it still has unresolved
+ * dependencies */
+ atom->status = KBASE_JD_ATOM_STATE_QUEUED;
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ goto out_unlock;
+ }
+
+ KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(atom, TL_ATOM_STATE_READY);
+ KBASE_TIMELINE_ATOM_READY(kctx, kbase_jd_atom_id(kctx, atom));
+
+ enqueue_required = kbase_js_dep_resolved_submit(kctx, atom);
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_ADD_JOB, kctx, atom, atom->jc,
+ kbasep_js_trace_get_refcnt(kbdev, kctx));
+
+ /* Context Attribute Refcounting */
+ kbasep_js_ctx_attr_ctx_retain_atom(kbdev, kctx, atom);
+
+ if (enqueue_required) {
+ if (kbase_js_ctx_pullable(kctx, atom->slot_nr, false))
+ timer_sync = kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, atom->slot_nr);
+ else
+ timer_sync = kbase_js_ctx_list_add_unpullable_nolock(
+ kbdev, kctx, atom->slot_nr);
+ }
+ /* If this context is active and the atom is the first on its slot,
+ * kick the job manager to attempt to fast-start the atom */
+ if (enqueue_required && kctx == kbdev->hwaccess.active_kctx)
+ kbase_jm_try_kick(kbdev, 1 << atom->slot_nr);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ if (timer_sync)
+ kbase_backend_ctx_count_changed(kbdev);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ /* End runpool transaction */
+
+ if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) {
+ if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+ /* A job got added while/after kbase_job_zap_context()
+ * was called on a non-scheduled context (e.g. KDS
+ * dependency resolved). Kill that job by killing the
+ * context. */
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx,
+ false);
+ } else if (js_kctx_info->ctx.nr_jobs == 1) {
+ /* Handle Refcount going from 0 to 1: schedule the
+ * context on the Queue */
+ KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+ dev_dbg(kbdev->dev, "JS: Enqueue Context %p", kctx);
+
+ /* Queue was updated - caller must try to
+ * schedule the head context */
+ WARN_ON(!enqueue_required);
+ }
+ }
+out_unlock:
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ return enqueue_required;
+}
+
+void kbasep_js_remove_job(struct kbase_device *kbdev,
+ struct kbase_context *kctx, struct kbase_jd_atom *atom)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+ struct kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(atom != NULL);
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_REMOVE_JOB, kctx, atom, atom->jc,
+ kbasep_js_trace_get_refcnt(kbdev, kctx));
+
+ /* De-refcount ctx.nr_jobs */
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs > 0);
+ --(js_kctx_info->ctx.nr_jobs);
+}
+
+bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
+ struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ unsigned long flags;
+ struct kbasep_js_atom_retained_state katom_retained_state;
+ struct kbasep_js_device_data *js_devdata;
+ bool attr_state_changed;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+
+ js_devdata = &kbdev->js_data;
+
+ kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
+ kbasep_js_remove_job(kbdev, kctx, katom);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* The atom has 'finished' (will not be re-run), so no need to call
+ * kbasep_js_has_atom_finished().
+ *
+ * This is because it returns false for soft-stopped atoms, but we
+ * want to override that, because we're cancelling an atom regardless of
+ * whether it was soft-stopped or not */
+ attr_state_changed = kbasep_js_ctx_attr_ctx_release_atom(kbdev, kctx,
+ &katom_retained_state);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return attr_state_changed;
+}
+
+bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ unsigned long flags;
+ struct kbasep_js_device_data *js_devdata;
+ bool result;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ result = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ return result;
+}
+
+struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev,
+ int as_nr)
+{
+ unsigned long flags;
+ struct kbasep_js_device_data *js_devdata;
+ struct kbase_context *found_kctx = NULL;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
+ js_devdata = &kbdev->js_data;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ found_kctx = kbdev->as_to_kctx[as_nr];
+
+ if (found_kctx != NULL)
+ kbase_ctx_sched_retain_ctx_refcount(found_kctx);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return found_kctx;
+}
+
+/**
+ * kbasep_js_release_result - Try running more jobs after releasing a context
+ * and/or atom
+ *
+ * @kbdev: The kbase_device to operate on
+ * @kctx: The kbase_context to operate on
+ * @katom_retained_state: Retained state from the atom
+ * @runpool_ctx_attr_change: True if the runpool context attributes have changed
+ *
+ * This collates a set of actions that must happen whilst hwaccess_lock is held.
+ *
+ * This includes running more jobs when:
+ * - The previously released kctx caused a ctx attribute change,
+ * - The released atom caused a ctx attribute change,
+ * - Slots were previously blocked due to affinity restrictions,
+ * - Submission during IRQ handling failed.
+ *
+ * Return: %KBASEP_JS_RELEASE_RESULT_SCHED_ALL if context attributes were
+ * changed. The caller should try scheduling all contexts
+ */
+static kbasep_js_release_result kbasep_js_run_jobs_after_ctx_and_atom_release(
+ struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ struct kbasep_js_atom_retained_state *katom_retained_state,
+ bool runpool_ctx_attr_change)
+{
+ struct kbasep_js_device_data *js_devdata;
+ kbasep_js_release_result result = 0;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(katom_retained_state != NULL);
+ js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (js_devdata->nr_user_contexts_running != 0) {
+ bool retry_submit = false;
+ int retry_jobslot = 0;
+
+ if (katom_retained_state)
+ retry_submit = kbasep_js_get_atom_retry_submit_slot(
+ katom_retained_state, &retry_jobslot);
+
+ if (runpool_ctx_attr_change || retry_submit) {
+ /* A change in runpool ctx attributes might mean we can
+ * run more jobs than before */
+ result = KBASEP_JS_RELEASE_RESULT_SCHED_ALL;
+
+ KBASE_TRACE_ADD_SLOT(kbdev, JD_DONE_TRY_RUN_NEXT_JOB,
+ kctx, NULL, 0u, retry_jobslot);
+ }
+ }
+ return result;
+}
+
+/*
+ * Internal function to release the reference on a ctx and an atom's "retained
+ * state", only taking the runpool and as transaction mutexes
+ *
+ * This also starts more jobs running in the case of an ctx-attribute state
+ * change
+ *
+ * This does none of the followup actions for scheduling:
+ * - It does not schedule in a new context
+ * - It does not requeue or handle dying contexts
+ *
+ * For those tasks, just call kbasep_js_runpool_release_ctx() instead
+ *
+ * Requires:
+ * - Context is scheduled in, and kctx->as_nr matches kctx_as_nr
+ * - Context has a non-zero refcount
+ * - Caller holds js_kctx_info->ctx.jsctx_mutex
+ * - Caller holds js_devdata->runpool_mutex
+ */
+static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
+ struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+ unsigned long flags;
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+
+ kbasep_js_release_result release_result = 0u;
+ bool runpool_ctx_attr_change = false;
+ int kctx_as_nr;
+ int new_ref_count;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_devdata = &kbdev->js_data;
+
+ /* Ensure context really is scheduled in */
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ kctx_as_nr = kctx->as_nr;
+ KBASE_DEBUG_ASSERT(kctx_as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0);
+
+ /*
+ * Transaction begins on AS and runpool_irq
+ *
+ * Assert about out calling contract
+ */
+ mutex_lock(&kbdev->pm.lock);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ KBASE_DEBUG_ASSERT(kctx_as_nr == kctx->as_nr);
+ KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0);
+
+ /* Update refcount */
+ kbase_ctx_sched_release_ctx(kctx);
+ new_ref_count = atomic_read(&kctx->refcount);
+
+ /* Release the atom if it finished (i.e. wasn't soft-stopped) */
+ if (kbasep_js_has_atom_finished(katom_retained_state))
+ runpool_ctx_attr_change |= kbasep_js_ctx_attr_ctx_release_atom(
+ kbdev, kctx, katom_retained_state);
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RELEASE_CTX, kctx, NULL, 0u,
+ new_ref_count);
+
+ if (new_ref_count == 2 && kbase_ctx_flag(kctx, KCTX_PRIVILEGED) &&
+ !kbase_pm_is_suspending(kbdev)) {
+ /* Context is kept scheduled into an address space even when
+ * there are no jobs, in this case we have to handle the
+ * situation where all jobs have been evicted from the GPU and
+ * submission is disabled.
+ *
+ * At this point we re-enable submission to allow further jobs
+ * to be executed
+ */
+ kbasep_js_set_submit_allowed(js_devdata, kctx);
+ }
+
+ /* Make a set of checks to see if the context should be scheduled out.
+ * Note that there'll always be at least 1 reference to the context
+ * which was previously acquired by kbasep_js_schedule_ctx(). */
+ if (new_ref_count == 1 &&
+ (!kbasep_js_is_submit_allowed(js_devdata, kctx) ||
+ kbdev->pm.suspending)) {
+ int num_slots = kbdev->gpu_props.num_job_slots;
+ int slot;
+
+ /* Last reference, and we've been told to remove this context
+ * from the Run Pool */
+ dev_dbg(kbdev->dev, "JS: RunPool Remove Context %p because refcount=%d, jobs=%d, allowed=%d",
+ kctx, new_ref_count, js_kctx_info->ctx.nr_jobs,
+ kbasep_js_is_submit_allowed(js_devdata, kctx));
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_mmu_as_released(kctx->as_nr);
+#endif
+ KBASE_TLSTREAM_TL_NRET_AS_CTX(&kbdev->as[kctx->as_nr], kctx);
+
+ kbase_backend_release_ctx_irq(kbdev, kctx);
+
+ if (kbdev->hwaccess.active_kctx == kctx)
+ kbdev->hwaccess.active_kctx = NULL;
+
+ /* Ctx Attribute handling
+ *
+ * Releasing atoms attributes must either happen before this, or
+ * after the KCTX_SHEDULED flag is changed, otherwise we
+ * double-decount the attributes
+ */
+ runpool_ctx_attr_change |=
+ kbasep_js_ctx_attr_runpool_release_ctx(kbdev, kctx);
+
+ /* Releasing the context and katom retained state can allow
+ * more jobs to run */
+ release_result |=
+ kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev,
+ kctx, katom_retained_state,
+ runpool_ctx_attr_change);
+
+ /*
+ * Transaction ends on AS and runpool_irq:
+ *
+ * By this point, the AS-related data is now clear and ready
+ * for re-use.
+ *
+ * Since releases only occur once for each previous successful
+ * retain, and no more retains are allowed on this context, no
+ * other thread will be operating in this
+ * code whilst we are
+ */
+
+ /* Recalculate pullable status for all slots */
+ for (slot = 0; slot < num_slots; slot++) {
+ if (kbase_js_ctx_pullable(kctx, slot, false))
+ kbase_js_ctx_list_add_pullable_nolock(kbdev,
+ kctx, slot);
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ kbase_backend_release_ctx_noirq(kbdev, kctx);
+
+ mutex_unlock(&kbdev->pm.lock);
+
+ /* Note: Don't reuse kctx_as_nr now */
+
+ /* Synchronize with any timers */
+ kbase_backend_ctx_count_changed(kbdev);
+
+ /* update book-keeping info */
+ kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
+ /* Signal any waiter that the context is not scheduled, so is
+ * safe for termination - once the jsctx_mutex is also dropped,
+ * and jobs have finished. */
+ wake_up(&js_kctx_info->ctx.is_scheduled_wait);
+
+ /* Queue an action to occur after we've dropped the lock */
+ release_result |= KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED |
+ KBASEP_JS_RELEASE_RESULT_SCHED_ALL;
+ } else {
+ kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx,
+ katom_retained_state, runpool_ctx_attr_change);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->pm.lock);
+ }
+
+ return release_result;
+}
+
+void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_atom_retained_state katom_retained_state;
+
+ /* Setup a dummy katom_retained_state */
+ kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
+
+ kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
+ &katom_retained_state);
+}
+
+void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx, bool has_pm_ref)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ /* This is called if and only if you've you've detached the context from
+ * the Runpool Queue, and not added it back to the Runpool
+ */
+ KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+ /* Dying: don't requeue, but kill all jobs on the context. This
+ * happens asynchronously */
+ dev_dbg(kbdev->dev,
+ "JS: ** Killing Context %p on RunPool Remove **", kctx);
+ kbase_js_foreach_ctx_job(kctx, &kbase_jd_cancel);
+ }
+}
+
+void kbasep_js_runpool_release_ctx_and_katom_retained_state(
+ struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ kbasep_js_release_result release_result;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_devdata = &kbdev->js_data;
+
+ mutex_lock(&js_devdata->queue_mutex);
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
+ katom_retained_state);
+
+ /* Drop the runpool mutex to allow requeing kctx */
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
+
+ /* Drop the jsctx_mutex to allow scheduling in a new context */
+
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ if (release_result & KBASEP_JS_RELEASE_RESULT_SCHED_ALL)
+ kbase_js_sched_all(kbdev);
+}
+
+void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_atom_retained_state katom_retained_state;
+
+ kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
+
+ kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
+ &katom_retained_state);
+}
+
+/* Variant of kbasep_js_runpool_release_ctx() that doesn't call into
+ * kbase_js_sched_all() */
+static void kbasep_js_runpool_release_ctx_no_schedule(
+ struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ kbasep_js_release_result release_result;
+ struct kbasep_js_atom_retained_state katom_retained_state_struct;
+ struct kbasep_js_atom_retained_state *katom_retained_state =
+ &katom_retained_state_struct;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_devdata = &kbdev->js_data;
+ kbasep_js_atom_retained_state_init_invalid(katom_retained_state);
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
+ katom_retained_state);
+
+ /* Drop the runpool mutex to allow requeing kctx */
+ mutex_unlock(&js_devdata->runpool_mutex);
+ if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
+
+ /* Drop the jsctx_mutex to allow scheduling in a new context */
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /* NOTE: could return release_result if the caller would like to know
+ * whether it should schedule a new context, but currently no callers do
+ */
+}
+
+void kbase_js_set_timeouts(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ kbase_backend_timeouts_changed(kbdev);
+}
+
+static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ unsigned long flags;
+ bool kctx_suspended = false;
+ int as_nr;
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* Pick available address space for this context */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ as_nr = kbase_ctx_sched_retain_ctx(kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ if (as_nr == KBASEP_AS_NR_INVALID) {
+ as_nr = kbase_backend_find_and_release_free_address_space(
+ kbdev, kctx);
+ if (as_nr != KBASEP_AS_NR_INVALID) {
+ /* Attempt to retain the context again, this should
+ * succeed */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ as_nr = kbase_ctx_sched_retain_ctx(kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ WARN_ON(as_nr == KBASEP_AS_NR_INVALID);
+ }
+ }
+ if (as_nr == KBASEP_AS_NR_INVALID)
+ return false; /* No address spaces currently available */
+
+ /*
+ * Atomic transaction on the Context and Run Pool begins
+ */
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* Check to see if context is dying due to kbase_job_zap_context() */
+ if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+ /* Roll back the transaction so far and return */
+ kbase_ctx_sched_release_ctx(kctx);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ return false;
+ }
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_TRY_SCHEDULE_HEAD_CTX, kctx, NULL,
+ 0u,
+ kbasep_js_trace_get_refcnt(kbdev, kctx));
+
+ kbase_ctx_flag_set(kctx, KCTX_SCHEDULED);
+
+ /* Assign context to previously chosen address space */
+ if (!kbase_backend_use_ctx(kbdev, kctx, as_nr)) {
+ /* Roll back the transaction so far and return */
+ kbase_ctx_sched_release_ctx(kctx);
+ kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ return false;
+ }
+
+ kbdev->hwaccess.active_kctx = kctx;
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_mmu_as_in_use(kctx->as_nr);
+#endif
+ KBASE_TLSTREAM_TL_RET_AS_CTX(&kbdev->as[kctx->as_nr], kctx);
+
+ /* Cause any future waiter-on-termination to wait until the context is
+ * descheduled */
+ wake_up(&js_kctx_info->ctx.is_scheduled_wait);
+
+ /* Re-check for suspending: a suspend could've occurred, and all the
+ * contexts could've been removed from the runpool before we took this
+ * lock. In this case, we don't want to allow this context to run jobs,
+ * we just want it out immediately.
+ *
+ * The DMB required to read the suspend flag was issued recently as part
+ * of the hwaccess_lock locking. If a suspend occurs *after* that lock
+ * was taken (i.e. this condition doesn't execute), then the
+ * kbasep_js_suspend() code will cleanup this context instead (by virtue
+ * of it being called strictly after the suspend flag is set, and will
+ * wait for this lock to drop) */
+ if (kbase_pm_is_suspending(kbdev)) {
+ /* Cause it to leave at some later point */
+ bool retained;
+
+ retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+ KBASE_DEBUG_ASSERT(retained);
+
+ kbasep_js_clear_submit_allowed(js_devdata, kctx);
+ kctx_suspended = true;
+ }
+
+ /* Transaction complete */
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ /* Synchronize with any timers */
+ kbase_backend_ctx_count_changed(kbdev);
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ /* Note: after this point, the context could potentially get scheduled
+ * out immediately */
+
+ if (kctx_suspended) {
+ /* Finishing forcing out the context due to a suspend. Use a
+ * variant of kbasep_js_runpool_release_ctx() that doesn't
+ * schedule a new context, to prevent a risk of recursion back
+ * into this function */
+ kbasep_js_runpool_release_ctx_no_schedule(kbdev, kctx);
+ return false;
+ }
+ return true;
+}
+
+static bool kbase_js_use_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) &&
+ kbase_backend_use_ctx_sched(kbdev, kctx)) {
+ /* Context already has ASID - mark as active */
+ kbdev->hwaccess.active_kctx = kctx;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ return true; /* Context already scheduled */
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return kbasep_js_schedule_ctx(kbdev, kctx);
+}
+
+void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+ struct kbasep_js_device_data *js_devdata;
+ bool is_scheduled;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* This must never be attempted whilst suspending - i.e. it should only
+ * happen in response to a syscall from a user-space thread */
+ BUG_ON(kbase_pm_is_suspending(kbdev));
+
+ mutex_lock(&js_devdata->queue_mutex);
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /* Mark the context as privileged */
+ kbase_ctx_flag_set(kctx, KCTX_PRIVILEGED);
+
+ is_scheduled = kbase_ctx_flag(kctx, KCTX_SCHEDULED);
+ if (!is_scheduled) {
+ /* Add the context to the pullable list */
+ if (kbase_js_ctx_list_add_pullable_head(kbdev, kctx, 0))
+ kbase_js_sync_timers(kbdev);
+
+ /* Fast-starting requires the jsctx_mutex to be dropped,
+ * because it works on multiple ctxs */
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ /* Try to schedule the context in */
+ kbase_js_sched_all(kbdev);
+
+ /* Wait for the context to be scheduled in */
+ wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait,
+ kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+ } else {
+ /* Already scheduled in - We need to retain it to keep the
+ * corresponding address space */
+ kbasep_js_runpool_retain_ctx(kbdev, kctx);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+ }
+}
+KBASE_EXPORT_TEST_API(kbasep_js_schedule_privileged_ctx);
+
+void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* We don't need to use the address space anymore */
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ kbase_ctx_flag_clear(kctx, KCTX_PRIVILEGED);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /* Release the context - it will be scheduled out */
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+
+ kbase_js_sched_all(kbdev);
+}
+KBASE_EXPORT_TEST_API(kbasep_js_release_privileged_ctx);
+
+void kbasep_js_suspend(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ struct kbasep_js_device_data *js_devdata;
+ int i;
+ u16 retained = 0u;
+ int nr_privileged_ctx = 0;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ KBASE_DEBUG_ASSERT(kbase_pm_is_suspending(kbdev));
+ js_devdata = &kbdev->js_data;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* Prevent all contexts from submitting */
+ js_devdata->runpool_irq.submit_allowed = 0;
+
+ /* Retain each of the contexts, so we can cause it to leave even if it
+ * had no refcount to begin with */
+ for (i = BASE_MAX_NR_AS - 1; i >= 0; --i) {
+ struct kbase_context *kctx = kbdev->as_to_kctx[i];
+
+ retained = retained << 1;
+
+ if (kctx) {
+ kbase_ctx_sched_retain_ctx_refcount(kctx);
+ retained |= 1u;
+ /* We can only cope with up to 1 privileged context -
+ * the instrumented context. It'll be suspended by
+ * disabling instrumentation */
+ if (kbase_ctx_flag(kctx, KCTX_PRIVILEGED)) {
+ ++nr_privileged_ctx;
+ WARN_ON(nr_privileged_ctx != 1);
+ }
+ }
+ }
+ CSTD_UNUSED(nr_privileged_ctx);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ /* De-ref the previous retain to ensure each context gets pulled out
+ * sometime later. */
+ for (i = 0;
+ i < BASE_MAX_NR_AS;
+ ++i, retained = retained >> 1) {
+ struct kbase_context *kctx = kbdev->as_to_kctx[i];
+
+ if (retained & 1u)
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+ }
+
+ /* Caller must wait for all Power Manager active references to be
+ * dropped */
+}
+
+void kbasep_js_resume(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata;
+ int js;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ js_devdata = &kbdev->js_data;
+ KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+
+ mutex_lock(&js_devdata->queue_mutex);
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ struct kbase_context *kctx, *n;
+
+ list_for_each_entry_safe(kctx, n,
+ &kbdev->js_data.ctx_list_unpullable[js],
+ jctx.sched_info.ctx.ctx_list_entry[js]) {
+ struct kbasep_js_kctx_info *js_kctx_info;
+ unsigned long flags;
+ bool timer_sync = false;
+
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED) &&
+ kbase_js_ctx_pullable(kctx, js, false))
+ timer_sync =
+ kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, js);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ if (timer_sync)
+ kbase_backend_ctx_count_changed(kbdev);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ }
+ }
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ /* Restart atom processing */
+ kbase_js_sched_all(kbdev);
+
+ /* JS Resume complete */
+}
+
+bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ if ((katom->core_req & BASE_JD_REQ_FS) &&
+ (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE |
+ BASE_JD_REQ_T)))
+ return false;
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987) &&
+ (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) &&
+ (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_T)))
+ return false;
+
+ return true;
+}
+
+static int kbase_js_get_slot(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ if (katom->core_req & BASE_JD_REQ_FS)
+ return 0;
+
+ if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
+ if (katom->device_nr == 1 &&
+ kbdev->gpu_props.num_core_groups == 2)
+ return 2;
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+ return 2;
+ }
+
+ return 1;
+}
+
+bool kbase_js_dep_resolved_submit(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom)
+{
+ bool enqueue_required;
+
+ katom->slot_nr = kbase_js_get_slot(kctx->kbdev, katom);
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+ lockdep_assert_held(&kctx->jctx.lock);
+
+ /* If slot will transition from unpullable to pullable then add to
+ * pullable list */
+ if (jsctx_rb_none_to_pull(kctx, katom->slot_nr)) {
+ enqueue_required = true;
+ } else {
+ enqueue_required = false;
+ }
+ if ((katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) ||
+ (katom->pre_dep && (katom->pre_dep->atom_flags &
+ KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) {
+ int prio = katom->sched_priority;
+ int js = katom->slot_nr;
+ struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+
+ list_add_tail(&katom->queue, &queue->x_dep_head);
+ katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
+ enqueue_required = false;
+ } else {
+ /* Check if there are lower priority jobs to soft stop */
+ kbase_job_slot_ctx_priority_check_locked(kctx, katom);
+
+ /* Add atom to ring buffer. */
+ jsctx_tree_add(kctx, katom);
+ katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_TREE;
+ }
+
+ return enqueue_required;
+}
+
+/**
+ * kbase_js_move_to_tree - Move atom (and any dependent atoms) to the
+ * runnable_tree, ready for execution
+ * @katom: Atom to submit
+ *
+ * It is assumed that @katom does not have KBASE_KATOM_FLAG_X_DEP_BLOCKED set,
+ * but is still present in the x_dep list. If @katom has a same-slot dependent
+ * atom then that atom (and any dependents) will also be moved.
+ */
+static void kbase_js_move_to_tree(struct kbase_jd_atom *katom)
+{
+ lockdep_assert_held(&katom->kctx->kbdev->hwaccess_lock);
+
+ while (katom) {
+ WARN_ON(!(katom->atom_flags &
+ KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST));
+
+ if (!(katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED)) {
+ list_del(&katom->queue);
+ katom->atom_flags &=
+ ~KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
+ jsctx_tree_add(katom->kctx, katom);
+ katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_TREE;
+ } else {
+ break;
+ }
+
+ katom = katom->post_dep;
+ }
+}
+
+
+/**
+ * kbase_js_evict_deps - Evict dependencies of a failed atom.
+ * @kctx: Context pointer
+ * @katom: Pointer to the atom that has failed.
+ * @js: The job slot the katom was run on.
+ * @prio: Priority of the katom.
+ *
+ * Remove all post dependencies of an atom from the context ringbuffers.
+ *
+ * The original atom's event_code will be propogated to all dependent atoms.
+ *
+ * Context: Caller must hold the HW access lock
+ */
+static void kbase_js_evict_deps(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js, int prio)
+{
+ struct kbase_jd_atom *x_dep = katom->x_post_dep;
+ struct kbase_jd_atom *next_katom = katom->post_dep;
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ if (next_katom) {
+ KBASE_DEBUG_ASSERT(next_katom->status !=
+ KBASE_JD_ATOM_STATE_HW_COMPLETED);
+ next_katom->will_fail_event_code = katom->event_code;
+
+ }
+
+ /* Has cross slot depenency. */
+ if (x_dep && (x_dep->atom_flags & (KBASE_KATOM_FLAG_JSCTX_IN_TREE |
+ KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) {
+ /* Remove dependency.*/
+ x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+
+ /* Fail if it had a data dependency. */
+ if (x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) {
+ x_dep->will_fail_event_code = katom->event_code;
+ }
+ if (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST)
+ kbase_js_move_to_tree(x_dep);
+ }
+}
+
+struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js)
+{
+ struct kbase_jd_atom *katom;
+ struct kbasep_js_device_data *js_devdata;
+ struct kbase_device *kbdev;
+ int pulled;
+
+ KBASE_DEBUG_ASSERT(kctx);
+
+ kbdev = kctx->kbdev;
+
+ js_devdata = &kbdev->js_data;
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
+ return NULL;
+ if (kbase_pm_is_suspending(kbdev))
+ return NULL;
+
+ katom = jsctx_rb_peek(kctx, js);
+ if (!katom)
+ return NULL;
+ if (kctx->blocked_js[js][katom->sched_priority])
+ return NULL;
+ if (atomic_read(&katom->blocked))
+ return NULL;
+
+ /* Due to ordering restrictions when unpulling atoms on failure, we do
+ * not allow multiple runs of fail-dep atoms from the same context to be
+ * present on the same slot */
+ if (katom->pre_dep && atomic_read(&kctx->atoms_pulled_slot[js])) {
+ struct kbase_jd_atom *prev_atom =
+ kbase_backend_inspect_tail(kbdev, js);
+
+ if (prev_atom && prev_atom->kctx != kctx)
+ return NULL;
+ }
+
+ if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) {
+ if (katom->x_pre_dep->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB ||
+ katom->x_pre_dep->will_fail_event_code)
+ return NULL;
+ if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
+ kbase_backend_nr_atoms_on_slot(kbdev, js))
+ return NULL;
+ }
+
+ kbase_ctx_flag_set(kctx, KCTX_PULLED);
+
+ pulled = atomic_inc_return(&kctx->atoms_pulled);
+ if (pulled == 1 && !kctx->slots_pullable) {
+ WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
+ atomic_inc(&kbdev->js_data.nr_contexts_runnable);
+ }
+ atomic_inc(&kctx->atoms_pulled_slot[katom->slot_nr]);
+ kctx->atoms_pulled_slot_pri[katom->slot_nr][katom->sched_priority]++;
+ jsctx_rb_pull(kctx, katom);
+
+ kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+
+ katom->atom_flags |= KBASE_KATOM_FLAG_HOLDING_CTX_REF;
+
+ katom->ticks = 0;
+
+ return katom;
+}
+
+
+static void js_return_worker(struct work_struct *data)
+{
+ struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
+ work);
+ struct kbase_context *kctx = katom->kctx;
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
+ struct kbasep_js_atom_retained_state retained_state;
+ int js = katom->slot_nr;
+ int prio = katom->sched_priority;
+ bool timer_sync = false;
+ bool context_idle = false;
+ unsigned long flags;
+ base_jd_core_req core_req = katom->core_req;
+ u64 affinity = katom->affinity;
+ enum kbase_atom_coreref_state coreref_state = katom->coreref_state;
+
+ KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX(katom);
+
+ kbase_backend_complete_wq(kbdev, katom);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+ kbase_as_poking_timer_release_atom(kbdev, kctx, katom);
+
+ kbasep_js_atom_retained_state_copy(&retained_state, katom);
+
+ mutex_lock(&js_devdata->queue_mutex);
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+ atomic_dec(&kctx->atoms_pulled);
+ atomic_dec(&kctx->atoms_pulled_slot[js]);
+
+ atomic_dec(&katom->blocked);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ kctx->atoms_pulled_slot_pri[js][katom->sched_priority]--;
+
+ if (!atomic_read(&kctx->atoms_pulled_slot[js]) &&
+ jsctx_rb_none_to_pull(kctx, js))
+ timer_sync |= kbase_js_ctx_list_remove_nolock(kbdev, kctx, js);
+
+ /* If this slot has been blocked due to soft-stopped atoms, and all
+ * atoms have now been processed, then unblock the slot */
+ if (!kctx->atoms_pulled_slot_pri[js][prio] &&
+ kctx->blocked_js[js][prio]) {
+ kctx->blocked_js[js][prio] = false;
+
+ /* Only mark the slot as pullable if the context is not idle -
+ * that case is handled below */
+ if (atomic_read(&kctx->atoms_pulled) &&
+ kbase_js_ctx_pullable(kctx, js, true))
+ timer_sync |= kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, js);
+ }
+
+ if (!atomic_read(&kctx->atoms_pulled)) {
+ if (!kctx->slots_pullable) {
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+ atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+ timer_sync = true;
+ }
+
+ if (kctx->as_nr != KBASEP_AS_NR_INVALID &&
+ !kbase_ctx_flag(kctx, KCTX_DYING)) {
+ int num_slots = kbdev->gpu_props.num_job_slots;
+ int slot;
+
+ if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
+ kbasep_js_set_submit_allowed(js_devdata, kctx);
+
+ for (slot = 0; slot < num_slots; slot++) {
+ if (kbase_js_ctx_pullable(kctx, slot, true))
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, slot);
+ }
+ }
+
+ kbase_jm_idle_ctx(kbdev, kctx);
+
+ context_idle = true;
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ if (context_idle) {
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
+ kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+ kbase_pm_context_idle(kbdev);
+ }
+
+ if (timer_sync)
+ kbase_js_sync_timers(kbdev);
+
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
+ kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
+ &retained_state);
+
+ kbase_js_sched_all(kbdev);
+
+ kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity,
+ coreref_state);
+}
+
+void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ jsctx_rb_unpull(kctx, katom);
+
+ WARN_ON(work_pending(&katom->work));
+
+ /* Block re-submission until workqueue has run */
+ atomic_inc(&katom->blocked);
+
+ kbase_job_check_leave_disjoint(kctx->kbdev, katom);
+
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
+ INIT_WORK(&katom->work, js_return_worker);
+ queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+ struct kbasep_js_device_data *js_devdata;
+ struct kbase_device *kbdev;
+ unsigned long flags;
+ bool timer_sync = false;
+ int atom_slot;
+ bool context_idle = false;
+ int prio = katom->sched_priority;
+
+ kbdev = kctx->kbdev;
+ atom_slot = katom->slot_nr;
+
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ if (katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE) {
+ context_idle = !atomic_dec_return(&kctx->atoms_pulled);
+ atomic_dec(&kctx->atoms_pulled_slot[atom_slot]);
+ kctx->atoms_pulled_slot_pri[atom_slot][prio]--;
+
+ if (!atomic_read(&kctx->atoms_pulled) &&
+ !kctx->slots_pullable) {
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+ atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+ timer_sync = true;
+ }
+
+ /* If this slot has been blocked due to soft-stopped atoms, and
+ * all atoms have now been processed, then unblock the slot */
+ if (!kctx->atoms_pulled_slot_pri[atom_slot][prio]
+ && kctx->blocked_js[atom_slot][prio]) {
+ kctx->blocked_js[atom_slot][prio] = false;
+ if (kbase_js_ctx_pullable(kctx, atom_slot, true))
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, atom_slot);
+ }
+ }
+ WARN_ON(!(katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE));
+
+ if (!atomic_read(&kctx->atoms_pulled_slot[atom_slot]) &&
+ jsctx_rb_none_to_pull(kctx, atom_slot)) {
+ if (!list_empty(
+ &kctx->jctx.sched_info.ctx.ctx_list_entry[atom_slot]))
+ timer_sync |= kbase_js_ctx_list_remove_nolock(
+ kctx->kbdev, kctx, atom_slot);
+ }
+
+ /*
+ * If submission is disabled on this context (most likely due to an
+ * atom failure) and there are now no atoms left in the system then
+ * re-enable submission so that context can be scheduled again.
+ */
+ if (!kbasep_js_is_submit_allowed(js_devdata, kctx) &&
+ !atomic_read(&kctx->atoms_pulled) &&
+ !kbase_ctx_flag(kctx, KCTX_DYING)) {
+ int js;
+
+ kbasep_js_set_submit_allowed(js_devdata, kctx);
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ if (kbase_js_ctx_pullable(kctx, js, true))
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, js);
+ }
+ } else if (katom->x_post_dep &&
+ kbasep_js_is_submit_allowed(js_devdata, kctx)) {
+ int js;
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ if (kbase_js_ctx_pullable(kctx, js, true))
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, js);
+ }
+ }
+
+ /* Mark context as inactive. The pm reference will be dropped later in
+ * jd_done_worker().
+ */
+ if (context_idle)
+ kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ if (timer_sync)
+ kbase_backend_ctx_count_changed(kbdev);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ return context_idle;
+}
+
+struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
+ ktime_t *end_timestamp)
+{
+ u64 microseconds_spent = 0;
+ struct kbase_device *kbdev;
+ struct kbase_context *kctx = katom->kctx;
+ struct kbase_jd_atom *x_dep = katom->x_post_dep;
+
+ kbdev = kctx->kbdev;
+
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ if (katom->will_fail_event_code)
+ katom->event_code = katom->will_fail_event_code;
+
+ katom->status = KBASE_JD_ATOM_STATE_HW_COMPLETED;
+
+ if (katom->event_code != BASE_JD_EVENT_DONE) {
+ kbase_js_evict_deps(kctx, katom, katom->slot_nr,
+ katom->sched_priority);
+ }
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_job_slots_event(GATOR_MAKE_EVENT(GATOR_JOB_SLOT_STOP,
+ katom->slot_nr), NULL, 0);
+#endif
+
+ /* Calculate the job's time used */
+ if (end_timestamp != NULL) {
+ /* Only calculating it for jobs that really run on the HW (e.g.
+ * removed from next jobs never actually ran, so really did take
+ * zero time) */
+ ktime_t tick_diff = ktime_sub(*end_timestamp,
+ katom->start_timestamp);
+
+ microseconds_spent = ktime_to_ns(tick_diff);
+
+ do_div(microseconds_spent, 1000);
+
+ /* Round up time spent to the minimum timer resolution */
+ if (microseconds_spent < KBASEP_JS_TICK_RESOLUTION_US)
+ microseconds_spent = KBASEP_JS_TICK_RESOLUTION_US;
+ }
+
+
+ kbase_jd_done(katom, katom->slot_nr, end_timestamp, 0);
+
+ /* Unblock cross dependency if present */
+ if (x_dep && (katom->event_code == BASE_JD_EVENT_DONE ||
+ !(x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER)) &&
+ (x_dep->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED)) {
+ bool was_pullable = kbase_js_ctx_pullable(kctx, x_dep->slot_nr,
+ false);
+ x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+ kbase_js_move_to_tree(x_dep);
+ if (!was_pullable && kbase_js_ctx_pullable(kctx, x_dep->slot_nr,
+ false))
+ kbase_js_ctx_list_add_pullable_nolock(kbdev, kctx,
+ x_dep->slot_nr);
+
+ if (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE)
+ return x_dep;
+ }
+
+ return NULL;
+}
+
+void kbase_js_sched(struct kbase_device *kbdev, int js_mask)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbase_context *last_active;
+ bool timer_sync = false;
+ bool ctx_waiting = false;
+
+ js_devdata = &kbdev->js_data;
+
+ down(&js_devdata->schedule_sem);
+ mutex_lock(&js_devdata->queue_mutex);
+
+ last_active = kbdev->hwaccess.active_kctx;
+
+ while (js_mask) {
+ int js;
+
+ js = ffs(js_mask) - 1;
+
+ while (1) {
+ struct kbase_context *kctx;
+ unsigned long flags;
+ bool context_idle = false;
+
+ kctx = kbase_js_ctx_list_pop_head(kbdev, js);
+
+ if (!kctx) {
+ js_mask &= ~(1 << js);
+ break; /* No contexts on pullable list */
+ }
+
+ if (!kbase_ctx_flag(kctx, KCTX_ACTIVE)) {
+ context_idle = true;
+
+ if (kbase_pm_context_active_handle_suspend(
+ kbdev,
+ KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) {
+ /* Suspend pending - return context to
+ * queue and stop scheduling */
+ mutex_lock(
+ &kctx->jctx.sched_info.ctx.jsctx_mutex);
+ if (kbase_js_ctx_list_add_pullable_head(
+ kctx->kbdev, kctx, js))
+ kbase_js_sync_timers(kbdev);
+ mutex_unlock(
+ &kctx->jctx.sched_info.ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+ up(&js_devdata->schedule_sem);
+ return;
+ }
+ kbase_ctx_flag_set(kctx, KCTX_ACTIVE);
+ }
+
+ if (!kbase_js_use_ctx(kbdev, kctx)) {
+ mutex_lock(
+ &kctx->jctx.sched_info.ctx.jsctx_mutex);
+ /* Context can not be used at this time */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ if (kbase_js_ctx_pullable(kctx, js, false)
+ || kbase_ctx_flag(kctx, KCTX_PRIVILEGED))
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_head_nolock(
+ kctx->kbdev, kctx, js);
+ else
+ timer_sync |=
+ kbase_js_ctx_list_add_unpullable_nolock(
+ kctx->kbdev, kctx, js);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ flags);
+ mutex_unlock(
+ &kctx->jctx.sched_info.ctx.jsctx_mutex);
+ if (context_idle) {
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
+ kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+ kbase_pm_context_idle(kbdev);
+ }
+
+ /* No more jobs can be submitted on this slot */
+ js_mask &= ~(1 << js);
+ break;
+ }
+ mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ kbase_ctx_flag_clear(kctx, KCTX_PULLED);
+
+ if (!kbase_jm_kick(kbdev, 1 << js))
+ /* No more jobs can be submitted on this slot */
+ js_mask &= ~(1 << js);
+
+ if (!kbase_ctx_flag(kctx, KCTX_PULLED)) {
+ bool pullable = kbase_js_ctx_pullable(kctx, js,
+ true);
+
+ /* Failed to pull jobs - push to head of list.
+ * Unless this context is already 'active', in
+ * which case it's effectively already scheduled
+ * so push it to the back of the list. */
+ if (pullable && kctx == last_active)
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_nolock(
+ kctx->kbdev,
+ kctx, js);
+ else if (pullable)
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_head_nolock(
+ kctx->kbdev,
+ kctx, js);
+ else
+ timer_sync |=
+ kbase_js_ctx_list_add_unpullable_nolock(
+ kctx->kbdev,
+ kctx, js);
+
+ /* If this context is not the active context,
+ * but the active context is pullable on this
+ * slot, then we need to remove the active
+ * marker to prevent it from submitting atoms in
+ * the IRQ handler, which would prevent this
+ * context from making progress. */
+ if (last_active && kctx != last_active &&
+ kbase_js_ctx_pullable(
+ last_active, js, true))
+ ctx_waiting = true;
+
+ if (context_idle) {
+ kbase_jm_idle_ctx(kbdev, kctx);
+ spin_unlock_irqrestore(
+ &kbdev->hwaccess_lock,
+ flags);
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
+ kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+ kbase_pm_context_idle(kbdev);
+ } else {
+ spin_unlock_irqrestore(
+ &kbdev->hwaccess_lock,
+ flags);
+ }
+ mutex_unlock(
+ &kctx->jctx.sched_info.ctx.jsctx_mutex);
+
+ js_mask &= ~(1 << js);
+ break; /* Could not run atoms on this slot */
+ }
+
+ /* Push to back of list */
+ if (kbase_js_ctx_pullable(kctx, js, true))
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_nolock(
+ kctx->kbdev, kctx, js);
+ else
+ timer_sync |=
+ kbase_js_ctx_list_add_unpullable_nolock(
+ kctx->kbdev, kctx, js);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ }
+ }
+
+ if (timer_sync)
+ kbase_js_sync_timers(kbdev);
+
+ if (kbdev->hwaccess.active_kctx == last_active && ctx_waiting)
+ kbdev->hwaccess.active_kctx = NULL;
+
+ mutex_unlock(&js_devdata->queue_mutex);
+ up(&js_devdata->schedule_sem);
+}
+
+void kbase_js_zap_context(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
+ int js;
+
+ /*
+ * Critical assumption: No more submission is possible outside of the
+ * workqueue. This is because the OS *must* prevent U/K calls (IOCTLs)
+ * whilst the struct kbase_context is terminating.
+ */
+
+ /* First, atomically do the following:
+ * - mark the context as dying
+ * - try to evict it from the queue */
+ mutex_lock(&kctx->jctx.lock);
+ mutex_lock(&js_devdata->queue_mutex);
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ kbase_ctx_flag_set(kctx, KCTX_DYING);
+
+ dev_dbg(kbdev->dev, "Zap: Try Evict Ctx %p", kctx);
+
+ /*
+ * At this point we know:
+ * - If eviction succeeded, it was in the queue, but now no
+ * longer is
+ * - We must cancel the jobs here. No Power Manager active reference to
+ * release.
+ * - This happens asynchronously - kbase_jd_zap_context() will wait for
+ * those jobs to be killed.
+ * - If eviction failed, then it wasn't in the queue. It is one
+ * of the following:
+ * - a. it didn't have any jobs, and so is not in the Queue or
+ * the Run Pool (not scheduled)
+ * - Hence, no more work required to cancel jobs. No Power Manager
+ * active reference to release.
+ * - b. it was in the middle of a scheduling transaction (and thus must
+ * have at least 1 job). This can happen from a syscall or a
+ * kernel thread. We still hold the jsctx_mutex, and so the thread
+ * must be waiting inside kbasep_js_try_schedule_head_ctx(),
+ * before checking whether the runpool is full. That thread will
+ * continue after we drop the mutex, and will notice the context
+ * is dying. It will rollback the transaction, killing all jobs at
+ * the same time. kbase_jd_zap_context() will wait for those jobs
+ * to be killed.
+ * - Hence, no more work required to cancel jobs, or to release the
+ * Power Manager active reference.
+ * - c. it is scheduled, and may or may not be running jobs
+ * - We must cause it to leave the runpool by stopping it from
+ * submitting any more jobs. When it finally does leave,
+ * kbasep_js_runpool_requeue_or_kill_ctx() will kill all remaining jobs
+ * (because it is dying), release the Power Manager active reference,
+ * and will not requeue the context in the queue.
+ * kbase_jd_zap_context() will wait for those jobs to be killed.
+ * - Hence, work required just to make it leave the runpool. Cancelling
+ * jobs and releasing the Power manager active reference will be
+ * handled when it leaves the runpool.
+ */
+ if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) {
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ if (!list_empty(
+ &kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
+ list_del_init(
+ &kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+ }
+
+ /* The following events require us to kill off remaining jobs
+ * and update PM book-keeping:
+ * - we evicted it correctly (it must have jobs to be in the
+ * Queue)
+ *
+ * These events need no action, but take this path anyway:
+ * - Case a: it didn't have any jobs, and was never in the Queue
+ * - Case b: scheduling transaction will be partially rolled-
+ * back (this already cancels the jobs)
+ */
+
+ KBASE_TRACE_ADD(kbdev, JM_ZAP_NON_SCHEDULED, kctx, NULL, 0u,
+ kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ dev_dbg(kbdev->dev, "Zap: Ctx %p scheduled=0", kctx);
+
+ /* Only cancel jobs when we evicted from the
+ * queue. No Power Manager active reference was held.
+ *
+ * Having is_dying set ensures that this kills, and
+ * doesn't requeue */
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, false);
+
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+ mutex_unlock(&kctx->jctx.lock);
+ } else {
+ unsigned long flags;
+ bool was_retained;
+
+ /* Case c: didn't evict, but it is scheduled - it's in the Run
+ * Pool */
+ KBASE_TRACE_ADD(kbdev, JM_ZAP_SCHEDULED, kctx, NULL, 0u,
+ kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+ dev_dbg(kbdev->dev, "Zap: Ctx %p is in RunPool", kctx);
+
+ /* Disable the ctx from submitting any more jobs */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ kbasep_js_clear_submit_allowed(js_devdata, kctx);
+
+ /* Retain and (later) release the context whilst it is is now
+ * disallowed from submitting jobs - ensures that someone
+ * somewhere will be removing the context later on */
+ was_retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+
+ /* Since it's scheduled and we have the jsctx_mutex, it must be
+ * retained successfully */
+ KBASE_DEBUG_ASSERT(was_retained);
+
+ dev_dbg(kbdev->dev, "Zap: Ctx %p Kill Any Running jobs", kctx);
+
+ /* Cancel any remaining running jobs for this kctx - if any.
+ * Submit is disallowed which takes effect immediately, so no
+ * more new jobs will appear after we do this. */
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
+ kbase_job_slot_hardstop(kctx, js, NULL);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+ mutex_unlock(&kctx->jctx.lock);
+
+ dev_dbg(kbdev->dev, "Zap: Ctx %p Release (may or may not schedule out immediately)",
+ kctx);
+
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+ }
+
+ KBASE_TRACE_ADD(kbdev, JM_ZAP_DONE, kctx, NULL, 0u, 0u);
+
+ /* After this, you must wait on both the
+ * kbase_jd_context::zero_jobs_wait and the
+ * kbasep_js_kctx_info::ctx::is_scheduled_waitq - to wait for the jobs
+ * to be destroyed, and the context to be de-scheduled (if it was on the
+ * runpool).
+ *
+ * kbase_jd_zap_context() will do this. */
+}
+
+static inline int trace_get_refcnt(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ return atomic_read(&kctx->refcount);
+}
+
+/**
+ * kbase_js_foreach_ctx_job(): - Call a function on all jobs in context
+ * @kctx: Pointer to context.
+ * @callback: Pointer to function to call for each job.
+ *
+ * Call a function on all jobs belonging to a non-queued, non-running
+ * context, and detach the jobs from the context as it goes.
+ *
+ * Due to the locks that might be held at the time of the call, the callback
+ * may need to defer work on a workqueue to complete its actions (e.g. when
+ * cancelling jobs)
+ *
+ * Atoms will be removed from the queue, so this must only be called when
+ * cancelling jobs (which occurs as part of context destruction).
+ *
+ * The locking conditions on the caller are as follows:
+ * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ */
+static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
+ kbasep_js_ctx_job_cb callback)
+{
+ struct kbase_device *kbdev;
+ unsigned long flags;
+ u32 js;
+
+ kbdev = kctx->kbdev;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_FOREACH_CTX_JOBS, kctx, NULL,
+ 0u, trace_get_refcnt(kbdev, kctx));
+
+ /* Invoke callback on jobs on each slot in turn */
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
+ jsctx_queue_foreach(kctx, js, callback);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_js.h b/drivers/gpu/arm_gpu/mali_kbase_js.h
new file mode 100644
index 000000000000..ddada8e468a1
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_js.h
@@ -0,0 +1,925 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_js.h
+ * Job Scheduler APIs.
+ */
+
+#ifndef _KBASE_JS_H_
+#define _KBASE_JS_H_
+
+#include "mali_kbase_js_defs.h"
+#include "mali_kbase_context.h"
+#include "mali_kbase_defs.h"
+#include "mali_kbase_debug.h"
+
+#include "mali_kbase_js_ctx_attr.h"
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js Job Scheduler Internal APIs
+ * @{
+ *
+ * These APIs are Internal to KBase.
+ */
+
+/**
+ * @brief Initialize the Job Scheduler
+ *
+ * The struct kbasep_js_device_data sub-structure of \a kbdev must be zero
+ * initialized before passing to the kbasep_js_devdata_init() function. This is
+ * to give efficient error path code.
+ */
+int kbasep_js_devdata_init(struct kbase_device * const kbdev);
+
+/**
+ * @brief Halt the Job Scheduler.
+ *
+ * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
+ * sub-structure was never initialized/failed initialization, to give efficient
+ * error-path code.
+ *
+ * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
+ * be zero initialized before passing to the kbasep_js_devdata_init()
+ * function. This is to give efficient error path code.
+ *
+ * It is a Programming Error to call this whilst there are still kbase_context
+ * structures registered with this scheduler.
+ *
+ */
+void kbasep_js_devdata_halt(struct kbase_device *kbdev);
+
+/**
+ * @brief Terminate the Job Scheduler
+ *
+ * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
+ * sub-structure was never initialized/failed initialization, to give efficient
+ * error-path code.
+ *
+ * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
+ * be zero initialized before passing to the kbasep_js_devdata_init()
+ * function. This is to give efficient error path code.
+ *
+ * It is a Programming Error to call this whilst there are still kbase_context
+ * structures registered with this scheduler.
+ */
+void kbasep_js_devdata_term(struct kbase_device *kbdev);
+
+/**
+ * @brief Initialize the Scheduling Component of a struct kbase_context on the Job Scheduler.
+ *
+ * This effectively registers a struct kbase_context with a Job Scheduler.
+ *
+ * It does not register any jobs owned by the struct kbase_context with the scheduler.
+ * Those must be separately registered by kbasep_js_add_job().
+ *
+ * The struct kbase_context must be zero intitialized before passing to the
+ * kbase_js_init() function. This is to give efficient error path code.
+ */
+int kbasep_js_kctx_init(struct kbase_context * const kctx);
+
+/**
+ * @brief Terminate the Scheduling Component of a struct kbase_context on the Job Scheduler
+ *
+ * This effectively de-registers a struct kbase_context from its Job Scheduler
+ *
+ * It is safe to call this on a struct kbase_context that has never had or failed
+ * initialization of its jctx.sched_info member, to give efficient error-path
+ * code.
+ *
+ * For this to work, the struct kbase_context must be zero intitialized before passing
+ * to the kbase_js_init() function.
+ *
+ * It is a Programming Error to call this whilst there are still jobs
+ * registered with this context.
+ */
+void kbasep_js_kctx_term(struct kbase_context *kctx);
+
+/**
+ * @brief Add a job chain to the Job Scheduler, and take necessary actions to
+ * schedule the context/run the job.
+ *
+ * This atomically does the following:
+ * - Update the numbers of jobs information
+ * - Add the job to the run pool if necessary (part of init_job)
+ *
+ * Once this is done, then an appropriate action is taken:
+ * - If the ctx is scheduled, it attempts to start the next job (which might be
+ * this added job)
+ * - Otherwise, and if this is the first job on the context, it enqueues it on
+ * the Policy Queue
+ *
+ * The Policy's Queue can be updated by this in the following ways:
+ * - In the above case that this is the first job on the context
+ * - If the context is high priority and the context is not scheduled, then it
+ * could cause the Policy to schedule out a low-priority context, allowing
+ * this context to be scheduled in.
+ *
+ * If the context is already scheduled on the RunPool, then adding a job to it
+ * is guarenteed not to update the Policy Queue. And so, the caller is
+ * guarenteed to not need to try scheduling a context from the Run Pool - it
+ * can safely assert that the result is false.
+ *
+ * It is a programming error to have more than U32_MAX jobs in flight at a time.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold hwaccess_lock (as this will be obtained internally)
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
+ *
+ * @return true indicates that the Policy Queue was updated, and so the
+ * caller will need to try scheduling a context onto the Run Pool.
+ * @return false indicates that no updates were made to the Policy Queue,
+ * so no further action is required from the caller. This is \b always returned
+ * when the context is currently scheduled.
+ */
+bool kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom);
+
+/**
+ * @brief Remove a job chain from the Job Scheduler, except for its 'retained state'.
+ *
+ * Completely removing a job requires several calls:
+ * - kbasep_js_copy_atom_retained_state(), to capture the 'retained state' of
+ * the atom
+ * - kbasep_js_remove_job(), to partially remove the atom from the Job Scheduler
+ * - kbasep_js_runpool_release_ctx_and_katom_retained_state(), to release the
+ * remaining state held as part of the job having been run.
+ *
+ * In the common case of atoms completing normally, this set of actions is more optimal for spinlock purposes than having kbasep_js_remove_job() handle all of the actions.
+ *
+ * In the case of cancelling atoms, it is easier to call kbasep_js_remove_cancelled_job(), which handles all the necessary actions.
+ *
+ * It is a programming error to call this when:
+ * - \a atom is not a job belonging to kctx.
+ * - \a atom has already been removed from the Job Scheduler.
+ * - \a atom is still in the runpool
+ *
+ * Do not use this for removing jobs being killed by kbase_jd_cancel() - use
+ * kbasep_js_remove_cancelled_job() instead.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ *
+ */
+void kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *atom);
+
+/**
+ * @brief Completely remove a job chain from the Job Scheduler, in the case
+ * where the job chain was cancelled.
+ *
+ * This is a variant of kbasep_js_remove_job() that takes care of removing all
+ * of the retained state too. This is generally useful for cancelled atoms,
+ * which need not be handled in an optimal way.
+ *
+ * It is a programming error to call this when:
+ * - \a atom is not a job belonging to kctx.
+ * - \a atom has already been removed from the Job Scheduler.
+ * - \a atom is still in the runpool:
+ * - it is not being killed with kbasep_jd_cancel()
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold the hwaccess_lock, (as this will be obtained
+ * internally)
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this could be
+ * obtained internally)
+ *
+ * @return true indicates that ctx attributes have changed and the caller
+ * should call kbase_js_sched_all() to try to run more jobs
+ * @return false otherwise
+ */
+bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ struct kbase_jd_atom *katom);
+
+/**
+ * @brief Refcount a context as being busy, preventing it from being scheduled
+ * out.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold mmu_hw_mutex and hwaccess_lock, because they will be
+ * used internally.
+ *
+ * @return value != false if the retain succeeded, and the context will not be scheduled out.
+ * @return false if the retain failed (because the context is being/has been scheduled out).
+ */
+bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Refcount a context as being busy, preventing it from being scheduled
+ * out.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locks must be held by the caller:
+ * - mmu_hw_mutex, hwaccess_lock
+ *
+ * @return value != false if the retain succeeded, and the context will not be scheduled out.
+ * @return false if the retain failed (because the context is being/has been scheduled out).
+ */
+bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Lookup a context in the Run Pool based upon its current address space
+ * and ensure that is stays scheduled in.
+ *
+ * The context is refcounted as being busy to prevent it from scheduling
+ * out. It must be released with kbasep_js_runpool_release_ctx() when it is no
+ * longer required to stay scheduled in.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * If the hwaccess_lock is already held, then the caller should use
+ * kbasep_js_runpool_lookup_ctx_nolock() instead.
+ *
+ * @return a valid struct kbase_context on success, which has been refcounted as being busy.
+ * @return NULL on failure, indicating that no context was found in \a as_nr
+ */
+struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, int as_nr);
+
+/**
+ * @brief Handling the requeuing/killing of a context that was evicted from the
+ * policy queue or runpool.
+ *
+ * This should be used whenever handing off a context that has been evicted
+ * from the policy queue or the runpool:
+ * - If the context is not dying and has jobs, it gets re-added to the policy
+ * queue
+ * - Otherwise, it is not added
+ *
+ * In addition, if the context is dying the jobs are killed asynchronously.
+ *
+ * In all cases, the Power Manager active reference is released
+ * (kbase_pm_context_idle()) whenever the has_pm_ref parameter is true. \a
+ * has_pm_ref must be set to false whenever the context was not previously in
+ * the runpool and does not hold a Power Manager active refcount. Note that
+ * contexts in a rollback of kbasep_js_try_schedule_head_ctx() might have an
+ * active refcount even though they weren't in the runpool.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
+ * obtained internally)
+ */
+void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, bool has_pm_ref);
+
+/**
+ * @brief Release a refcount of a context being busy, allowing it to be
+ * scheduled out.
+ *
+ * When the refcount reaches zero and the context \em might be scheduled out
+ * (depending on whether the Scheudling Policy has deemed it so, or if it has run
+ * out of jobs).
+ *
+ * If the context does get scheduled out, then The following actions will be
+ * taken as part of deschduling a context:
+ * - For the context being descheduled:
+ * - If the context is in the processing of dying (all the jobs are being
+ * removed from it), then descheduling also kills off any jobs remaining in the
+ * context.
+ * - If the context is not dying, and any jobs remain after descheduling the
+ * context then it is re-enqueued to the Policy's Queue.
+ * - Otherwise, the context is still known to the scheduler, but remains absent
+ * from the Policy Queue until a job is next added to it.
+ * - In all descheduling cases, the Power Manager active reference (obtained
+ * during kbasep_js_try_schedule_head_ctx()) is released (kbase_pm_context_idle()).
+ *
+ * Whilst the context is being descheduled, this also handles actions that
+ * cause more atoms to be run:
+ * - Attempt submitting atoms when the Context Attributes on the Runpool have
+ * changed. This is because the context being scheduled out could mean that
+ * there are more opportunities to run atoms.
+ * - Attempt submitting to a slot that was previously blocked due to affinity
+ * restrictions. This is usually only necessary when releasing a context
+ * happens as part of completing a previous job, but is harmless nonetheless.
+ * - Attempt scheduling in a new context (if one is available), and if necessary,
+ * running a job from that new context.
+ *
+ * Unlike retaining a context in the runpool, this function \b cannot be called
+ * from IRQ context.
+ *
+ * It is a programming error to call this on a \a kctx that is not currently
+ * scheduled, or that already has a zero refcount.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
+ * obtained internally)
+ *
+ */
+void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Variant of kbasep_js_runpool_release_ctx() that handles additional
+ * actions from completing an atom.
+ *
+ * This is usually called as part of completing an atom and releasing the
+ * refcount on the context held by the atom.
+ *
+ * Therefore, the extra actions carried out are part of handling actions queued
+ * on a completed atom, namely:
+ * - Releasing the atom's context attributes
+ * - Retrying the submission on a particular slot, because we couldn't submit
+ * on that slot from an IRQ handler.
+ *
+ * The locking conditions of this function are the same as those for
+ * kbasep_js_runpool_release_ctx()
+ */
+void kbasep_js_runpool_release_ctx_and_katom_retained_state(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state);
+
+/**
+ * @brief Variant of kbase_js_runpool_release_ctx() that assumes that
+ * kbasep_js_device_data::runpool_mutex and
+ * kbasep_js_kctx_info::ctx::jsctx_mutex are held by the caller, and does not
+ * attempt to schedule new contexts.
+ */
+void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx);
+
+/**
+ * @brief Schedule in a privileged context
+ *
+ * This schedules a context in regardless of the context priority.
+ * If the runpool is full, a context will be forced out of the runpool and the function will wait
+ * for the new context to be scheduled in.
+ * The context will be kept scheduled in (and the corresponding address space reserved) until
+ * kbasep_js_release_privileged_ctx is called).
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex, because it will
+ * be used internally.
+ *
+ */
+void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Release a privileged context, allowing it to be scheduled out.
+ *
+ * See kbasep_js_runpool_release_ctx for potential side effects.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
+ *
+ */
+void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Try to submit the next job on each slot
+ *
+ * The following locks may be used:
+ * - kbasep_js_device_data::runpool_mutex
+ * - hwaccess_lock
+ */
+void kbase_js_try_run_jobs(struct kbase_device *kbdev);
+
+/**
+ * @brief Suspend the job scheduler during a Power Management Suspend event.
+ *
+ * Causes all contexts to be removed from the runpool, and prevents any
+ * contexts from (re)entering the runpool.
+ *
+ * This does not handle suspending the one privileged context: the caller must
+ * instead do this by by suspending the GPU HW Counter Instrumentation.
+ *
+ * This will eventually cause all Power Management active references held by
+ * contexts on the runpool to be released, without running any more atoms.
+ *
+ * The caller must then wait for all Power Mangement active refcount to become
+ * zero before completing the suspend.
+ *
+ * The emptying mechanism may take some time to complete, since it can wait for
+ * jobs to complete naturally instead of forcing them to end quickly. However,
+ * this is bounded by the Job Scheduler's Job Timeouts. Hence, this
+ * function is guaranteed to complete in a finite time.
+ */
+void kbasep_js_suspend(struct kbase_device *kbdev);
+
+/**
+ * @brief Resume the Job Scheduler after a Power Management Resume event.
+ *
+ * This restores the actions from kbasep_js_suspend():
+ * - Schedules contexts back into the runpool
+ * - Resumes running atoms on the GPU
+ */
+void kbasep_js_resume(struct kbase_device *kbdev);
+
+/**
+ * @brief Submit an atom to the job scheduler.
+ *
+ * The atom is enqueued on the context's ringbuffer. The caller must have
+ * ensured that all dependencies can be represented in the ringbuffer.
+ *
+ * Caller must hold jctx->lock
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] atom Pointer to the atom to submit
+ *
+ * @return Whether the context requires to be enqueued. */
+bool kbase_js_dep_resolved_submit(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom);
+
+/**
+ * jsctx_ll_flush_to_rb() - Pushes atoms from the linked list to ringbuffer.
+ * @kctx: Context Pointer
+ * @prio: Priority (specifies the queue together with js).
+ * @js: Job slot (specifies the queue together with prio).
+ *
+ * Pushes all possible atoms from the linked list to the ringbuffer.
+ * Number of atoms are limited to free space in the ringbuffer and
+ * number of available atoms in the linked list.
+ *
+ */
+void jsctx_ll_flush_to_rb(struct kbase_context *kctx, int prio, int js);
+/**
+ * @brief Pull an atom from a context in the job scheduler for execution.
+ *
+ * The atom will not be removed from the ringbuffer at this stage.
+ *
+ * The HW access lock must be held when calling this function.
+ *
+ * @param[in] kctx Context to pull from
+ * @param[in] js Job slot to pull from
+ * @return Pointer to an atom, or NULL if there are no atoms for this
+ * slot that can be currently run.
+ */
+struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js);
+
+/**
+ * @brief Return an atom to the job scheduler ringbuffer.
+ *
+ * An atom is 'unpulled' if execution is stopped but intended to be returned to
+ * later. The most common reason for this is that the atom has been
+ * soft-stopped.
+ *
+ * Note that if multiple atoms are to be 'unpulled', they must be returned in
+ * the reverse order to which they were originally pulled. It is a programming
+ * error to return atoms in any other order.
+ *
+ * The HW access lock must be held when calling this function.
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] atom Pointer to the atom to unpull
+ */
+void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+ * @brief Complete an atom from jd_done_worker(), removing it from the job
+ * scheduler ringbuffer.
+ *
+ * If the atom failed then all dependee atoms marked for failure propagation
+ * will also fail.
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] katom Pointer to the atom to complete
+ * @return true if the context is now idle (no jobs pulled)
+ * false otherwise
+ */
+bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom);
+
+/**
+ * @brief Complete an atom.
+ *
+ * Most of the work required to complete an atom will be performed by
+ * jd_done_worker().
+ *
+ * The HW access lock must be held when calling this function.
+ *
+ * @param[in] katom Pointer to the atom to complete
+ * @param[in] end_timestamp The time that the atom completed (may be NULL)
+ *
+ * Return: Atom that has now been unblocked and can now be run, or NULL if none
+ */
+struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
+ ktime_t *end_timestamp);
+
+/**
+ * @brief Submit atoms from all available contexts.
+ *
+ * This will attempt to submit as many jobs as possible to the provided job
+ * slots. It will exit when either all job slots are full, or all contexts have
+ * been used.
+ *
+ * @param[in] kbdev Device pointer
+ * @param[in] js_mask Mask of job slots to submit to
+ */
+void kbase_js_sched(struct kbase_device *kbdev, int js_mask);
+
+/**
+ * kbase_jd_zap_context - Attempt to deschedule a context that is being
+ * destroyed
+ * @kctx: Context pointer
+ *
+ * This will attempt to remove a context from any internal job scheduler queues
+ * and perform any other actions to ensure a context will not be submitted
+ * from.
+ *
+ * If the context is currently scheduled, then the caller must wait for all
+ * pending jobs to complete before taking any further action.
+ */
+void kbase_js_zap_context(struct kbase_context *kctx);
+
+/**
+ * @brief Validate an atom
+ *
+ * This will determine whether the atom can be scheduled onto the GPU. Atoms
+ * with invalid combinations of core requirements will be rejected.
+ *
+ * @param[in] kbdev Device pointer
+ * @param[in] katom Atom to validate
+ * @return true if atom is valid
+ * false otherwise
+ */
+bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+
+/**
+ * kbase_js_set_timeouts - update all JS timeouts with user specified data
+ * @kbdev: Device pointer
+ *
+ * Timeouts are specified through the 'js_timeouts' sysfs file. If a timeout is
+ * set to a positive number then that becomes the new value used, if a timeout
+ * is negative then the default is set.
+ */
+void kbase_js_set_timeouts(struct kbase_device *kbdev);
+
+/*
+ * Helpers follow
+ */
+
+/**
+ * @brief Check that a context is allowed to submit jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * As with any bool, never test the return value with true.
+ *
+ * The caller must hold hwaccess_lock.
+ */
+static inline bool kbasep_js_is_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
+{
+ u16 test_bit;
+
+ /* Ensure context really is scheduled in */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ test_bit = (u16) (1u << kctx->as_nr);
+
+ return (bool) (js_devdata->runpool_irq.submit_allowed & test_bit);
+}
+
+/**
+ * @brief Allow a context to submit jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * The caller must hold hwaccess_lock.
+ */
+static inline void kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
+{
+ u16 set_bit;
+
+ /* Ensure context really is scheduled in */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ set_bit = (u16) (1u << kctx->as_nr);
+
+ dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
+
+ js_devdata->runpool_irq.submit_allowed |= set_bit;
+}
+
+/**
+ * @brief Prevent a context from submitting more jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * The caller must hold hwaccess_lock.
+ */
+static inline void kbasep_js_clear_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
+{
+ u16 clear_bit;
+ u16 clear_mask;
+
+ /* Ensure context really is scheduled in */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ clear_bit = (u16) (1u << kctx->as_nr);
+ clear_mask = ~clear_bit;
+
+ dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
+
+ js_devdata->runpool_irq.submit_allowed &= clear_mask;
+}
+
+/**
+ * @brief Manage the 'retry_submit_on_slot' part of a kbase_jd_atom
+ */
+static inline void kbasep_js_clear_job_retry_submit(struct kbase_jd_atom *atom)
+{
+ atom->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID;
+}
+
+/**
+ * Mark a slot as requiring resubmission by carrying that information on a
+ * completing atom.
+ *
+ * @note This can ASSERT in debug builds if the submit slot has been set to
+ * something other than the current value for @a js. This is because you might
+ * be unintentionally stopping more jobs being submitted on the old submit
+ * slot, and that might cause a scheduling-hang.
+ *
+ * @note If you can guarantee that the atoms for the original slot will be
+ * submitted on some other slot, then call kbasep_js_clear_job_retry_submit()
+ * first to silence the ASSERT.
+ */
+static inline void kbasep_js_set_job_retry_submit_slot(struct kbase_jd_atom *atom, int js)
+{
+ KBASE_DEBUG_ASSERT(0 <= js && js <= BASE_JM_MAX_NR_SLOTS);
+ KBASE_DEBUG_ASSERT((atom->retry_submit_on_slot ==
+ KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID)
+ || (atom->retry_submit_on_slot == js));
+
+ atom->retry_submit_on_slot = js;
+}
+
+/**
+ * Create an initial 'invalid' atom retained state, that requires no
+ * atom-related work to be done on releasing with
+ * kbasep_js_runpool_release_ctx_and_katom_retained_state()
+ */
+static inline void kbasep_js_atom_retained_state_init_invalid(struct kbasep_js_atom_retained_state *retained_state)
+{
+ retained_state->event_code = BASE_JD_EVENT_NOT_STARTED;
+ retained_state->core_req = KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID;
+ retained_state->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID;
+}
+
+/**
+ * Copy atom state that can be made available after jd_done_nolock() is called
+ * on that atom.
+ */
+static inline void kbasep_js_atom_retained_state_copy(struct kbasep_js_atom_retained_state *retained_state, const struct kbase_jd_atom *katom)
+{
+ retained_state->event_code = katom->event_code;
+ retained_state->core_req = katom->core_req;
+ retained_state->retry_submit_on_slot = katom->retry_submit_on_slot;
+ retained_state->sched_priority = katom->sched_priority;
+ retained_state->device_nr = katom->device_nr;
+}
+
+/**
+ * @brief Determine whether an atom has finished (given its retained state),
+ * and so should be given back to userspace/removed from the system.
+ *
+ * Reasons for an atom not finishing include:
+ * - Being soft-stopped (and so, the atom should be resubmitted sometime later)
+ *
+ * @param[in] katom_retained_state the retained state of the atom to check
+ * @return false if the atom has not finished
+ * @return !=false if the atom has finished
+ */
+static inline bool kbasep_js_has_atom_finished(const struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+ return (bool) (katom_retained_state->event_code != BASE_JD_EVENT_STOPPED && katom_retained_state->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT);
+}
+
+/**
+ * @brief Determine whether a struct kbasep_js_atom_retained_state is valid
+ *
+ * An invalid struct kbasep_js_atom_retained_state is allowed, and indicates that the
+ * code should just ignore it.
+ *
+ * @param[in] katom_retained_state the atom's retained state to check
+ * @return false if the retained state is invalid, and can be ignored
+ * @return !=false if the retained state is valid
+ */
+static inline bool kbasep_js_atom_retained_state_is_valid(const struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+ return (bool) (katom_retained_state->core_req != KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID);
+}
+
+static inline bool kbasep_js_get_atom_retry_submit_slot(const struct kbasep_js_atom_retained_state *katom_retained_state, int *res)
+{
+ int js = katom_retained_state->retry_submit_on_slot;
+
+ *res = js;
+ return (bool) (js >= 0);
+}
+
+/**
+ * @brief Variant of kbasep_js_runpool_lookup_ctx() that can be used when the
+ * context is guaranteed to be already previously retained.
+ *
+ * It is a programming error to supply the \a as_nr of a context that has not
+ * been previously retained/has a busy refcount of zero. The only exception is
+ * when there is no ctx in \a as_nr (NULL returned).
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ *
+ * @return a valid struct kbase_context on success, with a refcount that is guaranteed
+ * to be non-zero and unmodified by this function.
+ * @return NULL on failure, indicating that no context was found in \a as_nr
+ */
+static inline struct kbase_context *kbasep_js_runpool_lookup_ctx_noretain(struct kbase_device *kbdev, int as_nr)
+{
+ struct kbase_context *found_kctx;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
+
+ found_kctx = kbdev->as_to_kctx[as_nr];
+ KBASE_DEBUG_ASSERT(found_kctx == NULL ||
+ atomic_read(&found_kctx->refcount) > 0);
+
+ return found_kctx;
+}
+
+/*
+ * The following locking conditions are made on the caller:
+ * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - The caller must hold the kbasep_js_device_data::runpool_mutex
+ */
+static inline void kbase_js_runpool_inc_context_count(
+ struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+
+ /* Track total contexts */
+ KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running < S8_MAX);
+ ++(js_devdata->nr_all_contexts_running);
+
+ if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+ /* Track contexts that can submit jobs */
+ KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running <
+ S8_MAX);
+ ++(js_devdata->nr_user_contexts_running);
+ }
+}
+
+/*
+ * The following locking conditions are made on the caller:
+ * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - The caller must hold the kbasep_js_device_data::runpool_mutex
+ */
+static inline void kbase_js_runpool_dec_context_count(
+ struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+
+ /* Track total contexts */
+ --(js_devdata->nr_all_contexts_running);
+ KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running >= 0);
+
+ if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+ /* Track contexts that can submit jobs */
+ --(js_devdata->nr_user_contexts_running);
+ KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running >= 0);
+ }
+}
+
+
+/**
+ * @brief Submit atoms from all available contexts to all job slots.
+ *
+ * This will attempt to submit as many jobs as possible. It will exit when
+ * either all job slots are full, or all contexts have been used.
+ *
+ * @param[in] kbdev Device pointer
+ */
+static inline void kbase_js_sched_all(struct kbase_device *kbdev)
+{
+ kbase_js_sched(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1);
+}
+
+extern const int
+kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS];
+
+extern const base_jd_prio
+kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+
+/**
+ * kbasep_js_atom_prio_to_sched_prio(): - Convert atom priority (base_jd_prio)
+ * to relative ordering
+ * @atom_prio: Priority ID to translate.
+ *
+ * Atom priority values for @ref base_jd_prio cannot be compared directly to
+ * find out which are higher or lower.
+ *
+ * This function will convert base_jd_prio values for successively lower
+ * priorities into a monotonically increasing sequence. That is, the lower the
+ * base_jd_prio priority, the higher the value produced by this function. This
+ * is in accordance with how the rest of the kernel treates priority.
+ *
+ * The mapping is 1:1 and the size of the valid input range is the same as the
+ * size of the valid output range, i.e.
+ * KBASE_JS_ATOM_SCHED_PRIO_COUNT == BASE_JD_NR_PRIO_LEVELS
+ *
+ * Note This must be kept in sync with BASE_JD_PRIO_<...> definitions
+ *
+ * Return: On success: a value in the inclusive range
+ * 0..KBASE_JS_ATOM_SCHED_PRIO_COUNT-1. On failure:
+ * KBASE_JS_ATOM_SCHED_PRIO_INVALID
+ */
+static inline int kbasep_js_atom_prio_to_sched_prio(base_jd_prio atom_prio)
+{
+ if (atom_prio >= BASE_JD_NR_PRIO_LEVELS)
+ return KBASE_JS_ATOM_SCHED_PRIO_INVALID;
+
+ return kbasep_js_atom_priority_to_relative[atom_prio];
+}
+
+static inline base_jd_prio kbasep_js_sched_prio_to_atom_prio(int sched_prio)
+{
+ unsigned int prio_idx;
+
+ KBASE_DEBUG_ASSERT(0 <= sched_prio
+ && sched_prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT);
+
+ prio_idx = (unsigned int)sched_prio;
+
+ return kbasep_js_relative_priority_to_atom[prio_idx];
+}
+
+ /** @} *//* end group kbase_js */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_JS_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_js_ctx_attr.c b/drivers/gpu/arm_gpu/mali_kbase_js_ctx_attr.c
new file mode 100644
index 000000000000..321506ada835
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_js_ctx_attr.c
@@ -0,0 +1,301 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_config.h>
+
+/*
+ * Private functions follow
+ */
+
+/**
+ * @brief Check whether a ctx has a certain attribute, and if so, retain that
+ * attribute on the runpool.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx is scheduled on the runpool
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_runpool_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ bool runpool_state_changed = false;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) {
+ KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] < S8_MAX);
+ ++(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]);
+
+ if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 1) {
+ /* First refcount indicates a state change */
+ runpool_state_changed = true;
+ KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_RUNPOOL, kctx, NULL, 0u, attribute);
+ }
+ }
+
+ return runpool_state_changed;
+}
+
+/**
+ * @brief Check whether a ctx has a certain attribute, and if so, release that
+ * attribute on the runpool.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx is scheduled on the runpool
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_runpool_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ bool runpool_state_changed = false;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) {
+ KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] > 0);
+ --(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]);
+
+ if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 0) {
+ /* Last de-refcount indicates a state change */
+ runpool_state_changed = true;
+ KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_RUNPOOL, kctx, NULL, 0u, attribute);
+ }
+ }
+
+ return runpool_state_changed;
+}
+
+/**
+ * @brief Retain a certain attribute on a ctx, also retaining it on the runpool
+ * if the context is scheduled.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * This may allow the scheduler to submit more jobs than previously.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_ctx_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+ bool runpool_state_changed = false;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] < U32_MAX);
+
+ ++(js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+
+ if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) {
+ /* Only ref-count the attribute on the runpool for the first time this contexts sees this attribute */
+ KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_CTX, kctx, NULL, 0u, attribute);
+ runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, attribute);
+ }
+
+ return runpool_state_changed;
+}
+
+/*
+ * @brief Release a certain attribute on a ctx, also releasing it from the runpool
+ * if the context is scheduled.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * This may allow the scheduler to submit more jobs than previously.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_ctx_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+ bool runpool_state_changed = false;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] > 0);
+
+ if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) {
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ /* Only de-ref-count the attribute on the runpool when this is the last ctx-reference to it */
+ runpool_state_changed = kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, attribute);
+ KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_CTX, kctx, NULL, 0u, attribute);
+ }
+
+ /* De-ref must happen afterwards, because kbasep_js_ctx_attr_runpool_release() needs to check it too */
+ --(js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+
+ return runpool_state_changed;
+}
+
+/*
+ * More commonly used public functions
+ */
+
+void kbasep_js_ctx_attr_set_initial_attrs(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ bool runpool_state_changed = false;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ if (kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+ /* This context never submits, so don't track any scheduling attributes */
+ return;
+ }
+
+ /* Transfer attributes held in the context flags for contexts that have submit enabled */
+
+ /* ... More attributes can be added here ... */
+
+ /* The context should not have been scheduled yet, so ASSERT if this caused
+ * runpool state changes (note that other threads *can't* affect the value
+ * of runpool_state_changed, due to how it's calculated) */
+ KBASE_DEBUG_ASSERT(runpool_state_changed == false);
+ CSTD_UNUSED(runpool_state_changed);
+}
+
+void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ bool runpool_state_changed;
+ int i;
+
+ /* Retain any existing attributes */
+ for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
+ if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) {
+ /* The context is being scheduled in, so update the runpool with the new attributes */
+ runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i);
+
+ /* We don't need to know about state changed, because retaining a
+ * context occurs on scheduling it, and that itself will also try
+ * to run new atoms */
+ CSTD_UNUSED(runpool_state_changed);
+ }
+ }
+}
+
+bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ bool runpool_state_changed = false;
+ int i;
+
+ /* Release any existing attributes */
+ for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
+ if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) {
+ /* The context is being scheduled out, so update the runpool on the removed attributes */
+ runpool_state_changed |= kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i);
+ }
+ }
+
+ return runpool_state_changed;
+}
+
+void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ bool runpool_state_changed = false;
+ base_jd_core_req core_req;
+
+ KBASE_DEBUG_ASSERT(katom);
+ core_req = katom->core_req;
+
+ if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
+ else
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
+
+ if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) {
+ /* Atom that can run on slot1 or slot2, and can use all cores */
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES);
+ }
+
+ /* We don't need to know about state changed, because retaining an
+ * atom occurs on adding it, and that itself will also try to run
+ * new atoms */
+ CSTD_UNUSED(runpool_state_changed);
+}
+
+bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+ bool runpool_state_changed = false;
+ base_jd_core_req core_req;
+
+ KBASE_DEBUG_ASSERT(katom_retained_state);
+ core_req = katom_retained_state->core_req;
+
+ /* No-op for invalid atoms */
+ if (kbasep_js_atom_retained_state_is_valid(katom_retained_state) == false)
+ return false;
+
+ if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
+ else
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
+
+ if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) {
+ /* Atom that can run on slot1 or slot2, and can use all cores */
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES);
+ }
+
+ return runpool_state_changed;
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_js_ctx_attr.h b/drivers/gpu/arm_gpu/mali_kbase_js_ctx_attr.h
new file mode 100644
index 000000000000..ce9183326a57
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_js_ctx_attr.h
@@ -0,0 +1,158 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_js_ctx_attr.h
+ * Job Scheduler Context Attribute APIs
+ */
+
+#ifndef _KBASE_JS_CTX_ATTR_H_
+#define _KBASE_JS_CTX_ATTR_H_
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js
+ * @{
+ */
+
+/**
+ * Set the initial attributes of a context (when context create flags are set)
+ *
+ * Requires:
+ * - Hold the jsctx_mutex
+ */
+void kbasep_js_ctx_attr_set_initial_attrs(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * Retain all attributes of a context
+ *
+ * This occurs on scheduling in the context on the runpool (but after
+ * is_scheduled is set)
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx->is_scheduled is true
+ */
+void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * Release all attributes of a context
+ *
+ * This occurs on scheduling out the context from the runpool (but before
+ * is_scheduled is cleared)
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx->is_scheduled is true
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * Retain all attributes of an atom
+ *
+ * This occurs on adding an atom to a context
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ */
+void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+ * Release all attributes of an atom, given its retained state.
+ *
+ * This occurs after (permanently) removing an atom from a context
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * This is a no-op when \a katom_retained_state is invalid.
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state);
+
+/**
+ * Requires:
+ * - runpool_irq spinlock
+ */
+static inline s8 kbasep_js_ctx_attr_count_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute)
+{
+ struct kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_devdata = &kbdev->js_data;
+
+ return js_devdata->runpool_irq.ctx_attr_ref_count[attribute];
+}
+
+/**
+ * Requires:
+ * - runpool_irq spinlock
+ */
+static inline bool kbasep_js_ctx_attr_is_attr_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute)
+{
+ /* In general, attributes are 'on' when they have a non-zero refcount (note: the refcount will never be < 0) */
+ return (bool) kbasep_js_ctx_attr_count_on_runpool(kbdev, attribute);
+}
+
+/**
+ * Requires:
+ * - jsctx mutex
+ */
+static inline bool kbasep_js_ctx_attr_is_attr_on_ctx(struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* In general, attributes are 'on' when they have a refcount (which should never be < 0) */
+ return (bool) (js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+}
+
+ /** @} *//* end group kbase_js */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_JS_DEFS_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_js_defs.h b/drivers/gpu/arm_gpu/mali_kbase_js_defs.h
new file mode 100644
index 000000000000..ba8b6441549b
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_js_defs.h
@@ -0,0 +1,386 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_js.h
+ * Job Scheduler Type Definitions
+ */
+
+#ifndef _KBASE_JS_DEFS_H_
+#define _KBASE_JS_DEFS_H_
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js
+ * @{
+ */
+/* Forward decls */
+struct kbase_device;
+struct kbase_jd_atom;
+
+
+typedef u32 kbase_context_flags;
+
+struct kbasep_atom_req {
+ base_jd_core_req core_req;
+ kbase_context_flags ctx_req;
+ u32 device_nr;
+};
+
+/** Callback function run on all of a context's jobs registered with the Job
+ * Scheduler */
+typedef void (*kbasep_js_ctx_job_cb)(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
+
+/**
+ * @brief Maximum number of jobs that can be submitted to a job slot whilst
+ * inside the IRQ handler.
+ *
+ * This is important because GPU NULL jobs can complete whilst the IRQ handler
+ * is running. Otherwise, it potentially allows an unlimited number of GPU NULL
+ * jobs to be submitted inside the IRQ handler, which increases IRQ latency.
+ */
+#define KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ 2
+
+/**
+ * @brief Context attributes
+ *
+ * Each context attribute can be thought of as a boolean value that caches some
+ * state information about either the runpool, or the context:
+ * - In the case of the runpool, it is a cache of "Do any contexts owned by
+ * the runpool have attribute X?"
+ * - In the case of a context, it is a cache of "Do any atoms owned by the
+ * context have attribute X?"
+ *
+ * The boolean value of the context attributes often affect scheduling
+ * decisions, such as affinities to use and job slots to use.
+ *
+ * To accomodate changes of state in the context, each attribute is refcounted
+ * in the context, and in the runpool for all running contexts. Specifically:
+ * - The runpool holds a refcount of how many contexts in the runpool have this
+ * attribute.
+ * - The context holds a refcount of how many atoms have this attribute.
+ */
+enum kbasep_js_ctx_attr {
+ /** Attribute indicating a context that contains Compute jobs. That is,
+ * the context has jobs of type @ref BASE_JD_REQ_ONLY_COMPUTE
+ *
+ * @note A context can be both 'Compute' and 'Non Compute' if it contains
+ * both types of jobs.
+ */
+ KBASEP_JS_CTX_ATTR_COMPUTE,
+
+ /** Attribute indicating a context that contains Non-Compute jobs. That is,
+ * the context has some jobs that are \b not of type @ref
+ * BASE_JD_REQ_ONLY_COMPUTE.
+ *
+ * @note A context can be both 'Compute' and 'Non Compute' if it contains
+ * both types of jobs.
+ */
+ KBASEP_JS_CTX_ATTR_NON_COMPUTE,
+
+ /** Attribute indicating that a context contains compute-job atoms that
+ * aren't restricted to a coherent group, and can run on all cores.
+ *
+ * Specifically, this is when the atom's \a core_req satisfy:
+ * - (\a core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T) // uses slot 1 or slot 2
+ * - && !(\a core_req & BASE_JD_REQ_COHERENT_GROUP) // not restricted to coherent groups
+ *
+ * Such atoms could be blocked from running if one of the coherent groups
+ * is being used by another job slot, so tracking this context attribute
+ * allows us to prevent such situations.
+ *
+ * @note This doesn't take into account the 1-coregroup case, where all
+ * compute atoms would effectively be able to run on 'all cores', but
+ * contexts will still not always get marked with this attribute. Instead,
+ * it is the caller's responsibility to take into account the number of
+ * coregroups when interpreting this attribute.
+ *
+ * @note Whilst Tiler atoms are normally combined with
+ * BASE_JD_REQ_COHERENT_GROUP, it is possible to send such atoms without
+ * BASE_JD_REQ_COHERENT_GROUP set. This is an unlikely case, but it's easy
+ * enough to handle anyway.
+ */
+ KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES,
+
+ /** Must be the last in the enum */
+ KBASEP_JS_CTX_ATTR_COUNT
+};
+
+enum {
+ /** Bit indicating that new atom should be started because this atom completed */
+ KBASE_JS_ATOM_DONE_START_NEW_ATOMS = (1u << 0),
+ /** Bit indicating that the atom was evicted from the JS_NEXT registers */
+ KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT = (1u << 1)
+};
+
+/** Combination of KBASE_JS_ATOM_DONE_<...> bits */
+typedef u32 kbasep_js_atom_done_code;
+
+/**
+ * @brief KBase Device Data Job Scheduler sub-structure
+ *
+ * This encapsulates the current context of the Job Scheduler on a particular
+ * device. This context is global to the device, and is not tied to any
+ * particular struct kbase_context running on the device.
+ *
+ * nr_contexts_running and as_free are optimized for packing together (by making
+ * them smaller types than u32). The operations on them should rarely involve
+ * masking. The use of signed types for arithmetic indicates to the compiler that
+ * the value will not rollover (which would be undefined behavior), and so under
+ * the Total License model, it is free to make optimizations based on that (i.e.
+ * to remove masking).
+ */
+struct kbasep_js_device_data {
+ /* Sub-structure to collect together Job Scheduling data used in IRQ
+ * context. The hwaccess_lock must be held when accessing. */
+ struct runpool_irq {
+ /** Bitvector indicating whether a currently scheduled context is allowed to submit jobs.
+ * When bit 'N' is set in this, it indicates whether the context bound to address space
+ * 'N' is allowed to submit jobs.
+ */
+ u16 submit_allowed;
+
+ /** Context Attributes:
+ * Each is large enough to hold a refcount of the number of contexts
+ * that can fit into the runpool. This is currently BASE_MAX_NR_AS
+ *
+ * Note that when BASE_MAX_NR_AS==16 we need 5 bits (not 4) to store
+ * the refcount. Hence, it's not worthwhile reducing this to
+ * bit-manipulation on u32s to save space (where in contrast, 4 bit
+ * sub-fields would be easy to do and would save space).
+ *
+ * Whilst this must not become negative, the sign bit is used for:
+ * - error detection in debug builds
+ * - Optimization: it is undefined for a signed int to overflow, and so
+ * the compiler can optimize for that never happening (thus, no masking
+ * is required on updating the variable) */
+ s8 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT];
+
+ /*
+ * Affinity management and tracking
+ */
+ /** Bitvector to aid affinity checking. Element 'n' bit 'i' indicates
+ * that slot 'n' is using core i (i.e. slot_affinity_refcount[n][i] > 0) */
+ u64 slot_affinities[BASE_JM_MAX_NR_SLOTS];
+ /** Refcount for each core owned by each slot. Used to generate the
+ * slot_affinities array of bitvectors
+ *
+ * The value of the refcount will not exceed BASE_JM_SUBMIT_SLOTS,
+ * because it is refcounted only when a job is definitely about to be
+ * submitted to a slot, and is de-refcounted immediately after a job
+ * finishes */
+ s8 slot_affinity_refcount[BASE_JM_MAX_NR_SLOTS][64];
+ } runpool_irq;
+
+ /**
+ * Run Pool mutex, for managing contexts within the runpool.
+ * Unless otherwise specified, you must hold this lock whilst accessing any
+ * members that follow
+ *
+ * In addition, this is used to access:
+ * - the kbasep_js_kctx_info::runpool substructure
+ */
+ struct mutex runpool_mutex;
+
+ /**
+ * Queue Lock, used to access the Policy's queue of contexts independently
+ * of the Run Pool.
+ *
+ * Of course, you don't need the Run Pool lock to access this.
+ */
+ struct mutex queue_mutex;
+
+ /**
+ * Scheduling semaphore. This must be held when calling
+ * kbase_jm_kick()
+ */
+ struct semaphore schedule_sem;
+
+ /**
+ * List of contexts that can currently be pulled from
+ */
+ struct list_head ctx_list_pullable[BASE_JM_MAX_NR_SLOTS];
+ /**
+ * List of contexts that can not currently be pulled from, but have
+ * jobs currently running.
+ */
+ struct list_head ctx_list_unpullable[BASE_JM_MAX_NR_SLOTS];
+
+ /** Number of currently scheduled user contexts (excluding ones that are not submitting jobs) */
+ s8 nr_user_contexts_running;
+ /** Number of currently scheduled contexts (including ones that are not submitting jobs) */
+ s8 nr_all_contexts_running;
+
+ /** Core Requirements to match up with base_js_atom's core_req memeber
+ * @note This is a write-once member, and so no locking is required to read */
+ base_jd_core_req js_reqs[BASE_JM_MAX_NR_SLOTS];
+
+ u32 scheduling_period_ns; /*< Value for JS_SCHEDULING_PERIOD_NS */
+ u32 soft_stop_ticks; /*< Value for JS_SOFT_STOP_TICKS */
+ u32 soft_stop_ticks_cl; /*< Value for JS_SOFT_STOP_TICKS_CL */
+ u32 hard_stop_ticks_ss; /*< Value for JS_HARD_STOP_TICKS_SS */
+ u32 hard_stop_ticks_cl; /*< Value for JS_HARD_STOP_TICKS_CL */
+ u32 hard_stop_ticks_dumping; /*< Value for JS_HARD_STOP_TICKS_DUMPING */
+ u32 gpu_reset_ticks_ss; /*< Value for JS_RESET_TICKS_SS */
+ u32 gpu_reset_ticks_cl; /*< Value for JS_RESET_TICKS_CL */
+ u32 gpu_reset_ticks_dumping; /*< Value for JS_RESET_TICKS_DUMPING */
+ u32 ctx_timeslice_ns; /**< Value for JS_CTX_TIMESLICE_NS */
+
+ /**< Value for JS_SOFT_JOB_TIMEOUT */
+ atomic_t soft_job_timeout_ms;
+
+ /** List of suspended soft jobs */
+ struct list_head suspended_soft_jobs_list;
+
+#ifdef CONFIG_MALI_DEBUG
+ /* Support soft-stop on a single context */
+ bool softstop_always;
+#endif /* CONFIG_MALI_DEBUG */
+
+ /** The initalized-flag is placed at the end, to avoid cache-pollution (we should
+ * only be using this during init/term paths).
+ * @note This is a write-once member, and so no locking is required to read */
+ int init_status;
+
+ /* Number of contexts that can currently be pulled from */
+ u32 nr_contexts_pullable;
+
+ /* Number of contexts that can either be pulled from or are currently
+ * running */
+ atomic_t nr_contexts_runnable;
+};
+
+/**
+ * @brief KBase Context Job Scheduling information structure
+ *
+ * This is a substructure in the struct kbase_context that encapsulates all the
+ * scheduling information.
+ */
+struct kbasep_js_kctx_info {
+
+ /**
+ * Job Scheduler Context information sub-structure. These members are
+ * accessed regardless of whether the context is:
+ * - In the Policy's Run Pool
+ * - In the Policy's Queue
+ * - Not queued nor in the Run Pool.
+ *
+ * You must obtain the jsctx_mutex before accessing any other members of
+ * this substructure.
+ *
+ * You may not access any of these members from IRQ context.
+ */
+ struct kbase_jsctx {
+ struct mutex jsctx_mutex; /**< Job Scheduler Context lock */
+
+ /** Number of jobs <b>ready to run</b> - does \em not include the jobs waiting in
+ * the dispatcher, and dependency-only jobs. See kbase_jd_context::job_nr
+ * for such jobs*/
+ u32 nr_jobs;
+
+ /** Context Attributes:
+ * Each is large enough to hold a refcount of the number of atoms on
+ * the context. **/
+ u32 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT];
+
+ /**
+ * Wait queue to wait for KCTX_SHEDULED flag state changes.
+ * */
+ wait_queue_head_t is_scheduled_wait;
+
+ /** Link implementing JS queues. Context can be present on one
+ * list per job slot
+ */
+ struct list_head ctx_list_entry[BASE_JM_MAX_NR_SLOTS];
+ } ctx;
+
+ /* The initalized-flag is placed at the end, to avoid cache-pollution (we should
+ * only be using this during init/term paths) */
+ int init_status;
+};
+
+/** Subset of atom state that can be available after jd_done_nolock() is called
+ * on that atom. A copy must be taken via kbasep_js_atom_retained_state_copy(),
+ * because the original atom could disappear. */
+struct kbasep_js_atom_retained_state {
+ /** Event code - to determine whether the atom has finished */
+ enum base_jd_event_code event_code;
+ /** core requirements */
+ base_jd_core_req core_req;
+ /* priority */
+ int sched_priority;
+ /** Job Slot to retry submitting to if submission from IRQ handler failed */
+ int retry_submit_on_slot;
+ /* Core group atom was executed on */
+ u32 device_nr;
+
+};
+
+/**
+ * Value signifying 'no retry on a slot required' for:
+ * - kbase_js_atom_retained_state::retry_submit_on_slot
+ * - kbase_jd_atom::retry_submit_on_slot
+ */
+#define KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID (-1)
+
+/**
+ * base_jd_core_req value signifying 'invalid' for a kbase_jd_atom_retained_state.
+ *
+ * @see kbase_atom_retained_state_is_valid()
+ */
+#define KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID BASE_JD_REQ_DEP
+
+/**
+ * @brief The JS timer resolution, in microseconds
+ *
+ * Any non-zero difference in time will be at least this size.
+ */
+#define KBASEP_JS_TICK_RESOLUTION_US 1
+
+/*
+ * Internal atom priority defines for kbase_jd_atom::sched_prio
+ */
+enum {
+ KBASE_JS_ATOM_SCHED_PRIO_HIGH = 0,
+ KBASE_JS_ATOM_SCHED_PRIO_MED,
+ KBASE_JS_ATOM_SCHED_PRIO_LOW,
+ KBASE_JS_ATOM_SCHED_PRIO_COUNT,
+};
+
+/* Invalid priority for kbase_jd_atom::sched_prio */
+#define KBASE_JS_ATOM_SCHED_PRIO_INVALID -1
+
+/* Default priority in the case of contexts with no atoms, or being lenient
+ * about invalid priorities from userspace */
+#define KBASE_JS_ATOM_SCHED_PRIO_DEFAULT KBASE_JS_ATOM_SCHED_PRIO_MED
+
+ /** @} *//* end group kbase_js */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_JS_DEFS_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_linux.h b/drivers/gpu/arm_gpu/mali_kbase_linux.h
new file mode 100644
index 000000000000..6d1e61fd41e0
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_linux.h
@@ -0,0 +1,43 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_linux.h
+ * Base kernel APIs, Linux implementation.
+ */
+
+#ifndef _KBASE_LINUX_H_
+#define _KBASE_LINUX_H_
+
+/* All things that are needed for the Linux port. */
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/atomic.h>
+
+#if (defined(MALI_KERNEL_TEST_API) && (1 == MALI_KERNEL_TEST_API))
+ #define KBASE_EXPORT_TEST_API(func) EXPORT_SYMBOL(func)
+#else
+ #define KBASE_EXPORT_TEST_API(func)
+#endif
+
+#define KBASE_EXPORT_SYMBOL(func) EXPORT_SYMBOL(func)
+
+#endif /* _KBASE_LINUX_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mem.c b/drivers/gpu/arm_gpu/mali_kbase_mem.c
new file mode 100644
index 000000000000..6fefffe7f493
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mem.c
@@ -0,0 +1,2869 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_mem.c
+ * Base kernel memory APIs
+ */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+#include <linux/dma-buf.h>
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+#ifdef CONFIG_UMP
+#include <linux/ump.h>
+#endif /* CONFIG_UMP */
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/compat.h>
+#include <linux/version.h>
+
+#include <mali_kbase_config.h>
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_cache_policy.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_tlstream.h>
+
+/* This function finds out which RB tree the given GPU VA region belongs to
+ * based on the region zone */
+static struct rb_root *kbase_reg_flags_to_rbtree(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ struct rb_root *rbtree = NULL;
+
+ switch (reg->flags & KBASE_REG_ZONE_MASK) {
+ case KBASE_REG_ZONE_CUSTOM_VA:
+ rbtree = &kctx->reg_rbtree_custom;
+ break;
+ case KBASE_REG_ZONE_EXEC:
+ rbtree = &kctx->reg_rbtree_exec;
+ break;
+ case KBASE_REG_ZONE_SAME_VA:
+ rbtree = &kctx->reg_rbtree_same;
+ /* fall through */
+ default:
+ rbtree = &kctx->reg_rbtree_same;
+ break;
+ }
+
+ return rbtree;
+}
+
+/* This function finds out which RB tree the given pfn from the GPU VA belongs
+ * to based on the memory zone the pfn refers to */
+static struct rb_root *kbase_gpu_va_to_rbtree(struct kbase_context *kctx,
+ u64 gpu_pfn)
+{
+ struct rb_root *rbtree = NULL;
+
+#ifdef CONFIG_64BIT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+#endif /* CONFIG_64BIT */
+ if (gpu_pfn >= KBASE_REG_ZONE_CUSTOM_VA_BASE)
+ rbtree = &kctx->reg_rbtree_custom;
+ else if (gpu_pfn >= KBASE_REG_ZONE_EXEC_BASE)
+ rbtree = &kctx->reg_rbtree_exec;
+ else
+ rbtree = &kctx->reg_rbtree_same;
+#ifdef CONFIG_64BIT
+ } else {
+ if (gpu_pfn >= kctx->same_va_end)
+ rbtree = &kctx->reg_rbtree_custom;
+ else
+ rbtree = &kctx->reg_rbtree_same;
+ }
+#endif /* CONFIG_64BIT */
+
+ return rbtree;
+}
+
+/* This function inserts a region into the tree. */
+static void kbase_region_tracker_insert(struct kbase_context *kctx,
+ struct kbase_va_region *new_reg)
+{
+ u64 start_pfn = new_reg->start_pfn;
+ struct rb_node **link = NULL;
+ struct rb_node *parent = NULL;
+ struct rb_root *rbtree = NULL;
+
+ rbtree = kbase_reg_flags_to_rbtree(kctx, new_reg);
+
+ link = &(rbtree->rb_node);
+ /* Find the right place in the tree using tree search */
+ while (*link) {
+ struct kbase_va_region *old_reg;
+
+ parent = *link;
+ old_reg = rb_entry(parent, struct kbase_va_region, rblink);
+
+ /* RBTree requires no duplicate entries. */
+ KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn);
+
+ if (old_reg->start_pfn > start_pfn)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+
+ /* Put the new node there, and rebalance tree */
+ rb_link_node(&(new_reg->rblink), parent, link);
+
+ rb_insert_color(&(new_reg->rblink), rbtree);
+}
+
+/* Find allocated region enclosing free range. */
+static struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range_free(
+ struct kbase_context *kctx, u64 start_pfn, size_t nr_pages)
+{
+ struct rb_node *rbnode = NULL;
+ struct kbase_va_region *reg = NULL;
+ struct rb_root *rbtree = NULL;
+
+ u64 end_pfn = start_pfn + nr_pages;
+
+ rbtree = kbase_gpu_va_to_rbtree(kctx, start_pfn);
+
+ rbnode = rbtree->rb_node;
+
+ while (rbnode) {
+ u64 tmp_start_pfn, tmp_end_pfn;
+
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ tmp_start_pfn = reg->start_pfn;
+ tmp_end_pfn = reg->start_pfn + reg->nr_pages;
+
+ /* If start is lower than this, go left. */
+ if (start_pfn < tmp_start_pfn)
+ rbnode = rbnode->rb_left;
+ /* If end is higher than this, then go right. */
+ else if (end_pfn > tmp_end_pfn)
+ rbnode = rbnode->rb_right;
+ else /* Enclosing */
+ return reg;
+ }
+
+ return NULL;
+}
+
+/* Find region enclosing given address. */
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr)
+{
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
+ u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+ struct rb_root *rbtree = NULL;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ rbtree = kbase_gpu_va_to_rbtree(kctx, gpu_pfn);
+
+ rbnode = rbtree->rb_node;
+
+ while (rbnode) {
+ u64 tmp_start_pfn, tmp_end_pfn;
+
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ tmp_start_pfn = reg->start_pfn;
+ tmp_end_pfn = reg->start_pfn + reg->nr_pages;
+
+ /* If start is lower than this, go left. */
+ if (gpu_pfn < tmp_start_pfn)
+ rbnode = rbnode->rb_left;
+ /* If end is higher than this, then go right. */
+ else if (gpu_pfn >= tmp_end_pfn)
+ rbnode = rbnode->rb_right;
+ else /* Enclosing */
+ return reg;
+ }
+
+ return NULL;
+}
+
+KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_enclosing_address);
+
+/* Find region with given base address */
+struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr)
+{
+ u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+ struct rb_node *rbnode = NULL;
+ struct kbase_va_region *reg = NULL;
+ struct rb_root *rbtree = NULL;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ rbtree = kbase_gpu_va_to_rbtree(kctx, gpu_pfn);
+
+ rbnode = rbtree->rb_node;
+
+ while (rbnode) {
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ if (reg->start_pfn > gpu_pfn)
+ rbnode = rbnode->rb_left;
+ else if (reg->start_pfn < gpu_pfn)
+ rbnode = rbnode->rb_right;
+ else
+ return reg;
+
+ }
+
+ return NULL;
+}
+
+KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_base_address);
+
+/* Find region meeting given requirements */
+static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(struct kbase_context *kctx, struct kbase_va_region *reg_reqs, size_t nr_pages, size_t align)
+{
+ struct rb_node *rbnode = NULL;
+ struct kbase_va_region *reg = NULL;
+ struct rb_root *rbtree = NULL;
+
+ /* Note that this search is a linear search, as we do not have a target
+ address in mind, so does not benefit from the rbtree search */
+
+ rbtree = kbase_reg_flags_to_rbtree(kctx, reg_reqs);
+
+ rbnode = rb_first(rbtree);
+
+ while (rbnode) {
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ if ((reg->nr_pages >= nr_pages) &&
+ (reg->flags & KBASE_REG_FREE)) {
+ /* Check alignment */
+ u64 start_pfn = (reg->start_pfn + align - 1) & ~(align - 1);
+
+ if ((start_pfn >= reg->start_pfn) &&
+ (start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) &&
+ ((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages - 1)))
+ return reg;
+ }
+ rbnode = rb_next(rbnode);
+ }
+
+ return NULL;
+}
+
+/**
+ * @brief Remove a region object from the global list.
+ *
+ * The region reg is removed, possibly by merging with other free and
+ * compatible adjacent regions. It must be called with the context
+ * region lock held. The associated memory is not released (see
+ * kbase_free_alloced_region). Internal use only.
+ */
+static int kbase_remove_va_region(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ struct rb_node *rbprev;
+ struct kbase_va_region *prev = NULL;
+ struct rb_node *rbnext;
+ struct kbase_va_region *next = NULL;
+ struct rb_root *reg_rbtree = NULL;
+
+ int merged_front = 0;
+ int merged_back = 0;
+ int err = 0;
+
+ reg_rbtree = kbase_reg_flags_to_rbtree(kctx, reg);
+
+ /* Try to merge with the previous block first */
+ rbprev = rb_prev(&(reg->rblink));
+ if (rbprev) {
+ prev = rb_entry(rbprev, struct kbase_va_region, rblink);
+ if (prev->flags & KBASE_REG_FREE) {
+ /* We're compatible with the previous VMA,
+ * merge with it */
+ WARN_ON((prev->flags & KBASE_REG_ZONE_MASK) !=
+ (reg->flags & KBASE_REG_ZONE_MASK));
+ prev->nr_pages += reg->nr_pages;
+ rb_erase(&(reg->rblink), reg_rbtree);
+ reg = prev;
+ merged_front = 1;
+ }
+ }
+
+ /* Try to merge with the next block second */
+ /* Note we do the lookup here as the tree may have been rebalanced. */
+ rbnext = rb_next(&(reg->rblink));
+ if (rbnext) {
+ /* We're compatible with the next VMA, merge with it */
+ next = rb_entry(rbnext, struct kbase_va_region, rblink);
+ if (next->flags & KBASE_REG_FREE) {
+ WARN_ON((next->flags & KBASE_REG_ZONE_MASK) !=
+ (reg->flags & KBASE_REG_ZONE_MASK));
+ next->start_pfn = reg->start_pfn;
+ next->nr_pages += reg->nr_pages;
+ rb_erase(&(reg->rblink), reg_rbtree);
+ merged_back = 1;
+ if (merged_front) {
+ /* We already merged with prev, free it */
+ kbase_free_alloced_region(reg);
+ }
+ }
+ }
+
+ /* If we failed to merge then we need to add a new block */
+ if (!(merged_front || merged_back)) {
+ /*
+ * We didn't merge anything. Add a new free
+ * placeholder and remove the original one.
+ */
+ struct kbase_va_region *free_reg;
+
+ free_reg = kbase_alloc_free_region(kctx, reg->start_pfn, reg->nr_pages, reg->flags & KBASE_REG_ZONE_MASK);
+ if (!free_reg) {
+ err = -ENOMEM;
+ goto out;
+ }
+ rb_replace_node(&(reg->rblink), &(free_reg->rblink), reg_rbtree);
+ }
+
+ out:
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_remove_va_region);
+
+/**
+ * @brief Insert a VA region to the list, replacing the current at_reg.
+ */
+static int kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages)
+{
+ struct rb_root *reg_rbtree = NULL;
+ int err = 0;
+
+ reg_rbtree = kbase_reg_flags_to_rbtree(kctx, at_reg);
+
+ /* Must be a free region */
+ KBASE_DEBUG_ASSERT((at_reg->flags & KBASE_REG_FREE) != 0);
+ /* start_pfn should be contained within at_reg */
+ KBASE_DEBUG_ASSERT((start_pfn >= at_reg->start_pfn) && (start_pfn < at_reg->start_pfn + at_reg->nr_pages));
+ /* at least nr_pages from start_pfn should be contained within at_reg */
+ KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= at_reg->start_pfn + at_reg->nr_pages);
+
+ new_reg->start_pfn = start_pfn;
+ new_reg->nr_pages = nr_pages;
+
+ /* Regions are a whole use, so swap and delete old one. */
+ if (at_reg->start_pfn == start_pfn && at_reg->nr_pages == nr_pages) {
+ rb_replace_node(&(at_reg->rblink), &(new_reg->rblink),
+ reg_rbtree);
+ kbase_free_alloced_region(at_reg);
+ }
+ /* New region replaces the start of the old one, so insert before. */
+ else if (at_reg->start_pfn == start_pfn) {
+ at_reg->start_pfn += nr_pages;
+ KBASE_DEBUG_ASSERT(at_reg->nr_pages >= nr_pages);
+ at_reg->nr_pages -= nr_pages;
+
+ kbase_region_tracker_insert(kctx, new_reg);
+ }
+ /* New region replaces the end of the old one, so insert after. */
+ else if ((at_reg->start_pfn + at_reg->nr_pages) == (start_pfn + nr_pages)) {
+ at_reg->nr_pages -= nr_pages;
+
+ kbase_region_tracker_insert(kctx, new_reg);
+ }
+ /* New region splits the old one, so insert and create new */
+ else {
+ struct kbase_va_region *new_front_reg;
+
+ new_front_reg = kbase_alloc_free_region(kctx,
+ at_reg->start_pfn,
+ start_pfn - at_reg->start_pfn,
+ at_reg->flags & KBASE_REG_ZONE_MASK);
+
+ if (new_front_reg) {
+ at_reg->nr_pages -= nr_pages + new_front_reg->nr_pages;
+ at_reg->start_pfn = start_pfn + nr_pages;
+
+ kbase_region_tracker_insert(kctx, new_front_reg);
+ kbase_region_tracker_insert(kctx, new_reg);
+ } else {
+ err = -ENOMEM;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * @brief Add a VA region to the list.
+ */
+int kbase_add_va_region(struct kbase_context *kctx,
+ struct kbase_va_region *reg, u64 addr,
+ size_t nr_pages, size_t align)
+{
+ struct kbase_va_region *tmp;
+ u64 gpu_pfn = addr >> PAGE_SHIFT;
+ int err = 0;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != reg);
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ if (!align)
+ align = 1;
+
+ /* must be a power of 2 */
+ KBASE_DEBUG_ASSERT((align & (align - 1)) == 0);
+ KBASE_DEBUG_ASSERT(nr_pages > 0);
+
+ /* Path 1: Map a specific address. Find the enclosing region, which *must* be free. */
+ if (gpu_pfn) {
+ struct device *dev = kctx->kbdev->dev;
+
+ KBASE_DEBUG_ASSERT(!(gpu_pfn & (align - 1)));
+
+ tmp = kbase_region_tracker_find_region_enclosing_range_free(kctx, gpu_pfn, nr_pages);
+ if (!tmp) {
+ dev_warn(dev, "Enclosing region not found: 0x%08llx gpu_pfn, %zu nr_pages", gpu_pfn, nr_pages);
+ err = -ENOMEM;
+ goto exit;
+ }
+ if (!(tmp->flags & KBASE_REG_FREE)) {
+ dev_warn(dev, "Zone mismatch: %lu != %lu", tmp->flags & KBASE_REG_ZONE_MASK, reg->flags & KBASE_REG_ZONE_MASK);
+ dev_warn(dev, "!(tmp->flags & KBASE_REG_FREE): tmp->start_pfn=0x%llx tmp->flags=0x%lx tmp->nr_pages=0x%zx gpu_pfn=0x%llx nr_pages=0x%zx\n", tmp->start_pfn, tmp->flags, tmp->nr_pages, gpu_pfn, nr_pages);
+ dev_warn(dev, "in function %s (%p, %p, 0x%llx, 0x%zx, 0x%zx)\n", __func__, kctx, reg, addr, nr_pages, align);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = kbase_insert_va_region_nolock(kctx, reg, tmp, gpu_pfn, nr_pages);
+ if (err) {
+ dev_warn(dev, "Failed to insert va region");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ goto exit;
+ }
+
+ /* Path 2: Map any free address which meets the requirements. */
+ {
+ u64 start_pfn;
+
+ /*
+ * Depending on the zone the allocation request is for
+ * we might need to retry it.
+ */
+ do {
+ tmp = kbase_region_tracker_find_region_meeting_reqs(
+ kctx, reg, nr_pages, align);
+ if (tmp) {
+ start_pfn = (tmp->start_pfn + align - 1) &
+ ~(align - 1);
+ err = kbase_insert_va_region_nolock(kctx, reg,
+ tmp, start_pfn, nr_pages);
+ break;
+ }
+
+ /*
+ * If the allocation is not from the same zone as JIT
+ * then don't retry, we're out of VA and there is
+ * nothing which can be done about it.
+ */
+ if ((reg->flags & KBASE_REG_ZONE_MASK) !=
+ KBASE_REG_ZONE_CUSTOM_VA)
+ break;
+ } while (kbase_jit_evict(kctx));
+
+ if (!tmp)
+ err = -ENOMEM;
+ }
+
+ exit:
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_add_va_region);
+
+/**
+ * @brief Initialize the internal region tracker data structure.
+ */
+static void kbase_region_tracker_ds_init(struct kbase_context *kctx,
+ struct kbase_va_region *same_va_reg,
+ struct kbase_va_region *exec_reg,
+ struct kbase_va_region *custom_va_reg)
+{
+ kctx->reg_rbtree_same = RB_ROOT;
+ kbase_region_tracker_insert(kctx, same_va_reg);
+
+ /* Although exec and custom_va_reg don't always exist,
+ * initialize unconditionally because of the mem_view debugfs
+ * implementation which relies on these being empty */
+ kctx->reg_rbtree_exec = RB_ROOT;
+ kctx->reg_rbtree_custom = RB_ROOT;
+
+ if (exec_reg)
+ kbase_region_tracker_insert(kctx, exec_reg);
+ if (custom_va_reg)
+ kbase_region_tracker_insert(kctx, custom_va_reg);
+}
+
+static void kbase_region_tracker_erase_rbtree(struct rb_root *rbtree)
+{
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
+
+ do {
+ rbnode = rb_first(rbtree);
+ if (rbnode) {
+ rb_erase(rbnode, rbtree);
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ kbase_free_alloced_region(reg);
+ }
+ } while (rbnode);
+}
+
+void kbase_region_tracker_term(struct kbase_context *kctx)
+{
+ kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_same);
+ kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_exec);
+ kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_custom);
+}
+
+/**
+ * Initialize the region tracker data structure.
+ */
+int kbase_region_tracker_init(struct kbase_context *kctx)
+{
+ struct kbase_va_region *same_va_reg;
+ struct kbase_va_region *exec_reg = NULL;
+ struct kbase_va_region *custom_va_reg = NULL;
+ size_t same_va_bits = sizeof(void *) * BITS_PER_BYTE;
+ u64 custom_va_size = KBASE_REG_ZONE_CUSTOM_VA_SIZE;
+ u64 gpu_va_limit = (1ULL << kctx->kbdev->gpu_props.mmu.va_bits) >> PAGE_SHIFT;
+ u64 same_va_pages;
+ int err;
+
+ /* Take the lock as kbase_free_alloced_region requires it */
+ kbase_gpu_vm_lock(kctx);
+
+#if defined(CONFIG_ARM64)
+ same_va_bits = VA_BITS;
+#elif defined(CONFIG_X86_64)
+ same_va_bits = 47;
+#elif defined(CONFIG_64BIT)
+#error Unsupported 64-bit architecture
+#endif
+
+#ifdef CONFIG_64BIT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ same_va_bits = 32;
+ else if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA))
+ same_va_bits = 33;
+#endif
+
+ if (kctx->kbdev->gpu_props.mmu.va_bits < same_va_bits) {
+ err = -EINVAL;
+ goto fail_unlock;
+ }
+
+ same_va_pages = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
+ /* all have SAME_VA */
+ same_va_reg = kbase_alloc_free_region(kctx, 1,
+ same_va_pages,
+ KBASE_REG_ZONE_SAME_VA);
+
+ if (!same_va_reg) {
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+#ifdef CONFIG_64BIT
+ /* 32-bit clients have exec and custom VA zones */
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+#endif
+ if (gpu_va_limit <= KBASE_REG_ZONE_CUSTOM_VA_BASE) {
+ err = -EINVAL;
+ goto fail_free_same_va;
+ }
+ /* If the current size of TMEM is out of range of the
+ * virtual address space addressable by the MMU then
+ * we should shrink it to fit
+ */
+ if ((KBASE_REG_ZONE_CUSTOM_VA_BASE + KBASE_REG_ZONE_CUSTOM_VA_SIZE) >= gpu_va_limit)
+ custom_va_size = gpu_va_limit - KBASE_REG_ZONE_CUSTOM_VA_BASE;
+
+ exec_reg = kbase_alloc_free_region(kctx,
+ KBASE_REG_ZONE_EXEC_BASE,
+ KBASE_REG_ZONE_EXEC_SIZE,
+ KBASE_REG_ZONE_EXEC);
+
+ if (!exec_reg) {
+ err = -ENOMEM;
+ goto fail_free_same_va;
+ }
+
+ custom_va_reg = kbase_alloc_free_region(kctx,
+ KBASE_REG_ZONE_CUSTOM_VA_BASE,
+ custom_va_size, KBASE_REG_ZONE_CUSTOM_VA);
+
+ if (!custom_va_reg) {
+ err = -ENOMEM;
+ goto fail_free_exec;
+ }
+#ifdef CONFIG_64BIT
+ }
+#endif
+
+ kbase_region_tracker_ds_init(kctx, same_va_reg, exec_reg, custom_va_reg);
+
+ kctx->same_va_end = same_va_pages + 1;
+
+ kbase_gpu_vm_unlock(kctx);
+ return 0;
+
+fail_free_exec:
+ kbase_free_alloced_region(exec_reg);
+fail_free_same_va:
+ kbase_free_alloced_region(same_va_reg);
+fail_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return err;
+}
+
+int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages)
+{
+#ifdef CONFIG_64BIT
+ struct kbase_va_region *same_va;
+ struct kbase_va_region *custom_va_reg;
+ u64 same_va_bits;
+ u64 total_va_size;
+ int err;
+
+ /*
+ * Nothing to do for 32-bit clients, JIT uses the existing
+ * custom VA zone.
+ */
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ return 0;
+
+#if defined(CONFIG_ARM64)
+ same_va_bits = VA_BITS;
+#elif defined(CONFIG_X86_64)
+ same_va_bits = 47;
+#elif defined(CONFIG_64BIT)
+#error Unsupported 64-bit architecture
+#endif
+
+ if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA))
+ same_va_bits = 33;
+
+ total_va_size = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
+
+ kbase_gpu_vm_lock(kctx);
+
+ /*
+ * Modify the same VA free region after creation. Be careful to ensure
+ * that allocations haven't been made as they could cause an overlap
+ * to happen with existing same VA allocations and the custom VA zone.
+ */
+ same_va = kbase_region_tracker_find_region_base_address(kctx,
+ PAGE_SIZE);
+ if (!same_va) {
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ /* The region flag or region size has changed since creation so bail. */
+ if ((!(same_va->flags & KBASE_REG_FREE)) ||
+ (same_va->nr_pages != total_va_size)) {
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ if (same_va->nr_pages < jit_va_pages ||
+ kctx->same_va_end < jit_va_pages) {
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ /* It's safe to adjust the same VA zone now */
+ same_va->nr_pages -= jit_va_pages;
+ kctx->same_va_end -= jit_va_pages;
+
+ /*
+ * Create a custom VA zone at the end of the VA for allocations which
+ * JIT can use so it doesn't have to allocate VA from the kernel.
+ */
+ custom_va_reg = kbase_alloc_free_region(kctx,
+ kctx->same_va_end,
+ jit_va_pages,
+ KBASE_REG_ZONE_CUSTOM_VA);
+
+ if (!custom_va_reg) {
+ /*
+ * The context will be destroyed if we fail here so no point
+ * reverting the change we made to same_va.
+ */
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ kbase_region_tracker_insert(kctx, custom_va_reg);
+
+ kbase_gpu_vm_unlock(kctx);
+ return 0;
+
+fail_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return err;
+#else
+ return 0;
+#endif
+}
+
+int kbase_mem_init(struct kbase_device *kbdev)
+{
+ struct kbasep_mem_device *memdev;
+ int ret;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ memdev = &kbdev->memdev;
+ kbdev->mem_pool_max_size_default = KBASE_MEM_POOL_MAX_SIZE_KCTX;
+
+ /* Initialize memory usage */
+ atomic_set(&memdev->used_pages, 0);
+
+ ret = kbase_mem_pool_init(&kbdev->mem_pool,
+ KBASE_MEM_POOL_MAX_SIZE_KBDEV,
+ KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER,
+ kbdev,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = kbase_mem_pool_init(&kbdev->lp_mem_pool,
+ (KBASE_MEM_POOL_MAX_SIZE_KBDEV >> 9),
+ KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER,
+ kbdev,
+ NULL);
+ if (ret)
+ kbase_mem_pool_term(&kbdev->mem_pool);
+
+ return ret;
+}
+
+void kbase_mem_halt(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+void kbase_mem_term(struct kbase_device *kbdev)
+{
+ struct kbasep_mem_device *memdev;
+ int pages;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ memdev = &kbdev->memdev;
+
+ pages = atomic_read(&memdev->used_pages);
+ if (pages != 0)
+ dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
+
+ kbase_mem_pool_term(&kbdev->mem_pool);
+ kbase_mem_pool_term(&kbdev->lp_mem_pool);
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_term);
+
+
+
+
+/**
+ * @brief Allocate a free region object.
+ *
+ * The allocated object is not part of any list yet, and is flagged as
+ * KBASE_REG_FREE. No mapping is allocated yet.
+ *
+ * zone is KBASE_REG_ZONE_CUSTOM_VA, KBASE_REG_ZONE_SAME_VA, or KBASE_REG_ZONE_EXEC
+ *
+ */
+struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone)
+{
+ struct kbase_va_region *new_reg;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ /* zone argument should only contain zone related region flags */
+ KBASE_DEBUG_ASSERT((zone & ~KBASE_REG_ZONE_MASK) == 0);
+ KBASE_DEBUG_ASSERT(nr_pages > 0);
+ /* 64-bit address range is the max */
+ KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= (U64_MAX / PAGE_SIZE));
+
+ new_reg = kzalloc(sizeof(*new_reg), GFP_KERNEL);
+
+ if (!new_reg)
+ return NULL;
+
+ new_reg->cpu_alloc = NULL; /* no alloc bound yet */
+ new_reg->gpu_alloc = NULL; /* no alloc bound yet */
+ new_reg->kctx = kctx;
+ new_reg->flags = zone | KBASE_REG_FREE;
+
+ new_reg->flags |= KBASE_REG_GROWABLE;
+
+ new_reg->start_pfn = start_pfn;
+ new_reg->nr_pages = nr_pages;
+
+ return new_reg;
+}
+
+KBASE_EXPORT_TEST_API(kbase_alloc_free_region);
+
+/**
+ * @brief Free a region object.
+ *
+ * The described region must be freed of any mapping.
+ *
+ * If the region is not flagged as KBASE_REG_FREE, the region's
+ * alloc object will be released.
+ * It is a bug if no alloc object exists for non-free regions.
+ *
+ */
+void kbase_free_alloced_region(struct kbase_va_region *reg)
+{
+ if (!(reg->flags & KBASE_REG_FREE)) {
+ /*
+ * The physical allocation should have been removed from the
+ * eviction list before this function is called. However, in the
+ * case of abnormal process termination or the app leaking the
+ * memory kbase_mem_free_region is not called so it can still be
+ * on the list at termination time of the region tracker.
+ */
+ if (!list_empty(&reg->gpu_alloc->evict_node)) {
+ /*
+ * Unlink the physical allocation before unmaking it
+ * evictable so that the allocation isn't grown back to
+ * its last backed size as we're going to unmap it
+ * anyway.
+ */
+ reg->cpu_alloc->reg = NULL;
+ if (reg->cpu_alloc != reg->gpu_alloc)
+ reg->gpu_alloc->reg = NULL;
+
+ /*
+ * If a region has been made evictable then we must
+ * unmake it before trying to free it.
+ * If the memory hasn't been reclaimed it will be
+ * unmapped and freed below, if it has been reclaimed
+ * then the operations below are no-ops.
+ */
+ if (reg->flags & KBASE_REG_DONT_NEED) {
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc->type ==
+ KBASE_MEM_TYPE_NATIVE);
+ kbase_mem_evictable_unmake(reg->gpu_alloc);
+ }
+ }
+
+ /*
+ * Remove the region from the sticky resource metadata
+ * list should it be there.
+ */
+ kbase_sticky_resource_release(reg->kctx, NULL,
+ reg->start_pfn << PAGE_SHIFT);
+
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+ /* To detect use-after-free in debug builds */
+ KBASE_DEBUG_CODE(reg->flags |= KBASE_REG_FREE);
+ }
+ kfree(reg);
+}
+
+KBASE_EXPORT_TEST_API(kbase_free_alloced_region);
+
+int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align)
+{
+ int err;
+ size_t i = 0;
+ unsigned long attr;
+ unsigned long mask = ~KBASE_REG_MEMATTR_MASK;
+
+ if ((kctx->kbdev->system_coherency == COHERENCY_ACE) &&
+ (reg->flags & KBASE_REG_SHARE_BOTH))
+ attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_OUTER_WA);
+ else
+ attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_WRITE_ALLOC);
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != reg);
+
+ err = kbase_add_va_region(kctx, reg, addr, nr_pages, align);
+ if (err)
+ return err;
+
+ if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
+ u64 stride;
+ struct kbase_mem_phy_alloc *alloc;
+
+ alloc = reg->gpu_alloc;
+ stride = alloc->imported.alias.stride;
+ KBASE_DEBUG_ASSERT(alloc->imported.alias.aliased);
+ for (i = 0; i < alloc->imported.alias.nents; i++) {
+ if (alloc->imported.alias.aliased[i].alloc) {
+ err = kbase_mmu_insert_pages(kctx,
+ reg->start_pfn + (i * stride),
+ alloc->imported.alias.aliased[i].alloc->pages + alloc->imported.alias.aliased[i].offset,
+ alloc->imported.alias.aliased[i].length,
+ reg->flags);
+ if (err)
+ goto bad_insert;
+
+ kbase_mem_phy_alloc_gpu_mapped(alloc->imported.alias.aliased[i].alloc);
+ } else {
+ err = kbase_mmu_insert_single_page(kctx,
+ reg->start_pfn + i * stride,
+ kctx->aliasing_sink_page,
+ alloc->imported.alias.aliased[i].length,
+ (reg->flags & mask) | attr);
+
+ if (err)
+ goto bad_insert;
+ }
+ }
+ } else {
+ err = kbase_mmu_insert_pages(kctx, reg->start_pfn,
+ kbase_get_gpu_phy_pages(reg),
+ kbase_reg_current_backed_size(reg),
+ reg->flags);
+ if (err)
+ goto bad_insert;
+ kbase_mem_phy_alloc_gpu_mapped(reg->gpu_alloc);
+ }
+
+ return err;
+
+bad_insert:
+ if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
+ u64 stride;
+
+ stride = reg->gpu_alloc->imported.alias.stride;
+ KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
+ while (i--)
+ if (reg->gpu_alloc->imported.alias.aliased[i].alloc) {
+ kbase_mmu_teardown_pages(kctx, reg->start_pfn + (i * stride), reg->gpu_alloc->imported.alias.aliased[i].length);
+ kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
+ }
+ }
+
+ kbase_remove_va_region(kctx, reg);
+
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_mmap);
+
+static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
+ struct kbase_mem_phy_alloc *alloc, bool writeable);
+
+int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ int err;
+
+ if (reg->start_pfn == 0)
+ return 0;
+
+ if (reg->gpu_alloc && reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
+ size_t i;
+
+ err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, reg->nr_pages);
+ KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
+ for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++)
+ if (reg->gpu_alloc->imported.alias.aliased[i].alloc)
+ kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
+ } else {
+ err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, kbase_reg_current_backed_size(reg));
+ kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc);
+ }
+
+ if (reg->gpu_alloc && reg->gpu_alloc->type ==
+ KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
+ struct kbase_alloc_import_user_buf *user_buf =
+ &reg->gpu_alloc->imported.user_buf;
+
+ if (user_buf->current_mapping_usage_count & PINNED_ON_IMPORT) {
+ user_buf->current_mapping_usage_count &=
+ ~PINNED_ON_IMPORT;
+
+ kbase_jd_user_buf_unmap(kctx, reg->gpu_alloc,
+ (reg->flags & KBASE_REG_GPU_WR));
+ }
+ }
+
+ if (err)
+ return err;
+
+ err = kbase_remove_va_region(kctx, reg);
+ return err;
+}
+
+static struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping(
+ struct kbase_context *kctx,
+ unsigned long uaddr, size_t size, u64 *offset)
+{
+ struct vm_area_struct *vma;
+ struct kbase_cpu_mapping *map;
+ unsigned long vm_pgoff_in_region;
+ unsigned long vm_off_in_region;
+ unsigned long map_start;
+ size_t map_size;
+
+ lockdep_assert_held(&current->mm->mmap_sem);
+
+ if ((uintptr_t) uaddr + size < (uintptr_t) uaddr) /* overflow check */
+ return NULL;
+
+ vma = find_vma_intersection(current->mm, uaddr, uaddr+size);
+
+ if (!vma || vma->vm_start > uaddr)
+ return NULL;
+ if (vma->vm_ops != &kbase_vm_ops)
+ /* Not ours! */
+ return NULL;
+
+ map = vma->vm_private_data;
+
+ if (map->kctx != kctx)
+ /* Not from this context! */
+ return NULL;
+
+ vm_pgoff_in_region = vma->vm_pgoff - map->region->start_pfn;
+ vm_off_in_region = vm_pgoff_in_region << PAGE_SHIFT;
+ map_start = vma->vm_start - vm_off_in_region;
+ map_size = map->region->nr_pages << PAGE_SHIFT;
+
+ if ((uaddr + size) > (map_start + map_size))
+ /* Not within the CPU mapping */
+ return NULL;
+
+ *offset = (uaddr - vma->vm_start) + vm_off_in_region;
+
+ return map;
+}
+
+int kbasep_find_enclosing_cpu_mapping_offset(
+ struct kbase_context *kctx,
+ unsigned long uaddr, size_t size, u64 *offset)
+{
+ struct kbase_cpu_mapping *map;
+
+ kbase_os_mem_map_lock(kctx);
+
+ map = kbasep_find_enclosing_cpu_mapping(kctx, uaddr, size, offset);
+
+ kbase_os_mem_map_unlock(kctx);
+
+ if (!map)
+ return -EINVAL;
+
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbasep_find_enclosing_cpu_mapping_offset);
+
+void kbase_sync_single(struct kbase_context *kctx,
+ struct tagged_addr t_cpu_pa, struct tagged_addr t_gpu_pa,
+ off_t offset, size_t size, enum kbase_sync_type sync_fn)
+{
+ struct page *cpu_page;
+ phys_addr_t cpu_pa = as_phys_addr_t(t_cpu_pa);
+ phys_addr_t gpu_pa = as_phys_addr_t(t_gpu_pa);
+
+ cpu_page = pfn_to_page(PFN_DOWN(cpu_pa));
+
+ if (likely(cpu_pa == gpu_pa)) {
+ dma_addr_t dma_addr;
+
+ BUG_ON(!cpu_page);
+ BUG_ON(offset + size > PAGE_SIZE);
+
+ dma_addr = kbase_dma_addr(cpu_page) + offset;
+ if (sync_fn == KBASE_SYNC_TO_CPU)
+ dma_sync_single_for_cpu(kctx->kbdev->dev, dma_addr,
+ size, DMA_BIDIRECTIONAL);
+ else if (sync_fn == KBASE_SYNC_TO_DEVICE)
+ dma_sync_single_for_device(kctx->kbdev->dev, dma_addr,
+ size, DMA_BIDIRECTIONAL);
+ } else {
+ void *src = NULL;
+ void *dst = NULL;
+ struct page *gpu_page;
+
+ if (WARN(!gpu_pa, "No GPU PA found for infinite cache op"))
+ return;
+
+ gpu_page = pfn_to_page(PFN_DOWN(gpu_pa));
+
+ if (sync_fn == KBASE_SYNC_TO_DEVICE) {
+ src = ((unsigned char *)kmap(cpu_page)) + offset;
+ dst = ((unsigned char *)kmap(gpu_page)) + offset;
+ } else if (sync_fn == KBASE_SYNC_TO_CPU) {
+ dma_sync_single_for_cpu(kctx->kbdev->dev,
+ kbase_dma_addr(gpu_page) + offset,
+ size, DMA_BIDIRECTIONAL);
+ src = ((unsigned char *)kmap(gpu_page)) + offset;
+ dst = ((unsigned char *)kmap(cpu_page)) + offset;
+ }
+ memcpy(dst, src, size);
+ kunmap(gpu_page);
+ kunmap(cpu_page);
+ if (sync_fn == KBASE_SYNC_TO_DEVICE)
+ dma_sync_single_for_device(kctx->kbdev->dev,
+ kbase_dma_addr(gpu_page) + offset,
+ size, DMA_BIDIRECTIONAL);
+ }
+}
+
+static int kbase_do_syncset(struct kbase_context *kctx,
+ struct basep_syncset *sset, enum kbase_sync_type sync_fn)
+{
+ int err = 0;
+ struct kbase_va_region *reg;
+ struct kbase_cpu_mapping *map;
+ unsigned long start;
+ size_t size;
+ struct tagged_addr *cpu_pa;
+ struct tagged_addr *gpu_pa;
+ u64 page_off, page_count;
+ u64 i;
+ u64 offset;
+
+ kbase_os_mem_map_lock(kctx);
+ kbase_gpu_vm_lock(kctx);
+
+ /* find the region where the virtual address is contained */
+ reg = kbase_region_tracker_find_region_enclosing_address(kctx,
+ sset->mem_handle.basep.handle);
+ if (!reg) {
+ dev_warn(kctx->kbdev->dev, "Can't find region at VA 0x%016llX",
+ sset->mem_handle.basep.handle);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!(reg->flags & KBASE_REG_CPU_CACHED) ||
+ kbase_mem_is_imported(reg->gpu_alloc->type))
+ goto out_unlock;
+
+ start = (uintptr_t)sset->user_addr;
+ size = (size_t)sset->size;
+
+ map = kbasep_find_enclosing_cpu_mapping(kctx, start, size, &offset);
+ if (!map) {
+ dev_warn(kctx->kbdev->dev, "Can't find CPU mapping 0x%016lX for VA 0x%016llX",
+ start, sset->mem_handle.basep.handle);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ page_off = offset >> PAGE_SHIFT;
+ offset &= ~PAGE_MASK;
+ page_count = (size + offset + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ cpu_pa = kbase_get_cpu_phy_pages(reg);
+ gpu_pa = kbase_get_gpu_phy_pages(reg);
+
+ if (page_off > reg->nr_pages ||
+ page_off + page_count > reg->nr_pages) {
+ /* Sync overflows the region */
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Sync first page */
+ if (as_phys_addr_t(cpu_pa[page_off])) {
+ size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
+
+ kbase_sync_single(kctx, cpu_pa[page_off], gpu_pa[page_off],
+ offset, sz, sync_fn);
+ }
+
+ /* Sync middle pages (if any) */
+ for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+ /* we grow upwards, so bail on first non-present page */
+ if (!as_phys_addr_t(cpu_pa[page_off + i]))
+ break;
+
+ kbase_sync_single(kctx, cpu_pa[page_off + i],
+ gpu_pa[page_off + i], 0, PAGE_SIZE, sync_fn);
+ }
+
+ /* Sync last page (if any) */
+ if (page_count > 1 &&
+ as_phys_addr_t(cpu_pa[page_off + page_count - 1])) {
+ size_t sz = ((start + size - 1) & ~PAGE_MASK) + 1;
+
+ kbase_sync_single(kctx, cpu_pa[page_off + page_count - 1],
+ gpu_pa[page_off + page_count - 1], 0, sz,
+ sync_fn);
+ }
+
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ kbase_os_mem_map_unlock(kctx);
+ return err;
+}
+
+int kbase_sync_now(struct kbase_context *kctx, struct basep_syncset *sset)
+{
+ int err = -EINVAL;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(sset != NULL);
+
+ if (sset->mem_handle.basep.handle & ~PAGE_MASK) {
+ dev_warn(kctx->kbdev->dev,
+ "mem_handle: passed parameter is invalid");
+ return -EINVAL;
+ }
+
+ switch (sset->type) {
+ case BASE_SYNCSET_OP_MSYNC:
+ err = kbase_do_syncset(kctx, sset, KBASE_SYNC_TO_DEVICE);
+ break;
+
+ case BASE_SYNCSET_OP_CSYNC:
+ err = kbase_do_syncset(kctx, sset, KBASE_SYNC_TO_CPU);
+ break;
+
+ default:
+ dev_warn(kctx->kbdev->dev, "Unknown msync op %d\n", sset->type);
+ break;
+ }
+
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_sync_now);
+
+/* vm lock must be held */
+int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ int err;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != reg);
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /*
+ * Unlink the physical allocation before unmaking it evictable so
+ * that the allocation isn't grown back to its last backed size
+ * as we're going to unmap it anyway.
+ */
+ reg->cpu_alloc->reg = NULL;
+ if (reg->cpu_alloc != reg->gpu_alloc)
+ reg->gpu_alloc->reg = NULL;
+
+ /*
+ * If a region has been made evictable then we must unmake it
+ * before trying to free it.
+ * If the memory hasn't been reclaimed it will be unmapped and freed
+ * below, if it has been reclaimed then the operations below are no-ops.
+ */
+ if (reg->flags & KBASE_REG_DONT_NEED) {
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc->type ==
+ KBASE_MEM_TYPE_NATIVE);
+ kbase_mem_evictable_unmake(reg->gpu_alloc);
+ }
+
+ err = kbase_gpu_munmap(kctx, reg);
+ if (err) {
+ dev_warn(reg->kctx->kbdev->dev, "Could not unmap from the GPU...\n");
+ goto out;
+ }
+
+ /* This will also free the physical pages */
+ kbase_free_alloced_region(reg);
+
+ out:
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_free_region);
+
+/**
+ * @brief Free the region from the GPU and unregister it.
+ *
+ * This function implements the free operation on a memory segment.
+ * It will loudly fail if called with outstanding mappings.
+ */
+int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr)
+{
+ int err = 0;
+ struct kbase_va_region *reg;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ if ((gpu_addr & ~PAGE_MASK) && (gpu_addr >= PAGE_SIZE)) {
+ dev_warn(kctx->kbdev->dev, "kbase_mem_free: gpu_addr parameter is invalid");
+ return -EINVAL;
+ }
+
+ if (0 == gpu_addr) {
+ dev_warn(kctx->kbdev->dev, "gpu_addr 0 is reserved for the ringbuffer and it's an error to try to free it using kbase_mem_free\n");
+ return -EINVAL;
+ }
+ kbase_gpu_vm_lock(kctx);
+
+ if (gpu_addr >= BASE_MEM_COOKIE_BASE &&
+ gpu_addr < BASE_MEM_FIRST_FREE_ADDRESS) {
+ int cookie = PFN_DOWN(gpu_addr - BASE_MEM_COOKIE_BASE);
+
+ reg = kctx->pending_regions[cookie];
+ if (!reg) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* ask to unlink the cookie as we'll free it */
+
+ kctx->pending_regions[cookie] = NULL;
+ kctx->cookies |= (1UL << cookie);
+
+ kbase_free_alloced_region(reg);
+ } else {
+ /* A real GPU va */
+ /* Validate the region */
+ reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+ if (!reg || (reg->flags & KBASE_REG_FREE)) {
+ dev_warn(kctx->kbdev->dev, "kbase_mem_free called with nonexistent gpu_addr 0x%llX",
+ gpu_addr);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ if ((reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_SAME_VA) {
+ /* SAME_VA must be freed through munmap */
+ dev_warn(kctx->kbdev->dev, "%s called on SAME_VA memory 0x%llX", __func__,
+ gpu_addr);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ err = kbase_mem_free_region(kctx, reg);
+ }
+
+ out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_free);
+
+int kbase_update_region_flags(struct kbase_context *kctx,
+ struct kbase_va_region *reg, unsigned long flags)
+{
+ KBASE_DEBUG_ASSERT(NULL != reg);
+ KBASE_DEBUG_ASSERT((flags & ~((1ul << BASE_MEM_FLAGS_NR_BITS) - 1)) == 0);
+
+ reg->flags |= kbase_cache_enabled(flags, reg->nr_pages);
+ /* all memory is now growable */
+ reg->flags |= KBASE_REG_GROWABLE;
+
+ if (flags & BASE_MEM_GROW_ON_GPF)
+ reg->flags |= KBASE_REG_PF_GROW;
+
+ if (flags & BASE_MEM_PROT_CPU_WR)
+ reg->flags |= KBASE_REG_CPU_WR;
+
+ if (flags & BASE_MEM_PROT_CPU_RD)
+ reg->flags |= KBASE_REG_CPU_RD;
+
+ if (flags & BASE_MEM_PROT_GPU_WR)
+ reg->flags |= KBASE_REG_GPU_WR;
+
+ if (flags & BASE_MEM_PROT_GPU_RD)
+ reg->flags |= KBASE_REG_GPU_RD;
+
+ if (0 == (flags & BASE_MEM_PROT_GPU_EX))
+ reg->flags |= KBASE_REG_GPU_NX;
+
+ if (!kbase_device_is_cpu_coherent(kctx->kbdev)) {
+ if (flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED)
+ return -EINVAL;
+ } else if (flags & (BASE_MEM_COHERENT_SYSTEM |
+ BASE_MEM_COHERENT_SYSTEM_REQUIRED)) {
+ reg->flags |= KBASE_REG_SHARE_BOTH;
+ }
+
+ if (!(reg->flags & KBASE_REG_SHARE_BOTH) &&
+ flags & BASE_MEM_COHERENT_LOCAL) {
+ reg->flags |= KBASE_REG_SHARE_IN;
+ }
+
+ /* Set up default MEMATTR usage */
+ if (kctx->kbdev->system_coherency == COHERENCY_ACE &&
+ (reg->flags & KBASE_REG_SHARE_BOTH)) {
+ reg->flags |=
+ KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT_ACE);
+ } else {
+ reg->flags |=
+ KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT);
+ }
+
+ return 0;
+}
+
+int kbase_alloc_phy_pages_helper(
+ struct kbase_mem_phy_alloc *alloc,
+ size_t nr_pages_requested)
+{
+ int new_page_count __maybe_unused;
+ size_t old_page_count = alloc->nents;
+ size_t nr_left = nr_pages_requested;
+ int res;
+ struct kbase_context *kctx;
+ struct tagged_addr *tp;
+
+ KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
+ KBASE_DEBUG_ASSERT(alloc->imported.kctx);
+
+ kctx = alloc->imported.kctx;
+
+ if (nr_pages_requested == 0)
+ goto done; /*nothing to do*/
+
+ new_page_count = kbase_atomic_add_pages(
+ nr_pages_requested, &kctx->used_pages);
+ kbase_atomic_add_pages(nr_pages_requested,
+ &kctx->kbdev->memdev.used_pages);
+
+ /* Increase mm counters before we allocate pages so that this
+ * allocation is visible to the OOM killer */
+ kbase_process_page_usage_inc(kctx, nr_pages_requested);
+
+ tp = alloc->pages + old_page_count;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+ /* Check if we have enough pages requested so we can allocate a large
+ * page (512 * 4KB = 2MB )
+ */
+ if (nr_left >= (SZ_2M / SZ_4K)) {
+ int nr_lp = nr_left / (SZ_2M / SZ_4K);
+
+ res = kbase_mem_pool_alloc_pages(&kctx->lp_mem_pool,
+ nr_lp * (SZ_2M / SZ_4K),
+ tp,
+ true);
+
+ if (res > 0) {
+ nr_left -= res;
+ tp += res;
+ }
+
+ if (nr_left) {
+ struct kbase_sub_alloc *sa, *temp_sa;
+
+ mutex_lock(&kctx->mem_partials_lock);
+
+ list_for_each_entry_safe(sa, temp_sa,
+ &kctx->mem_partials, link) {
+ int pidx = 0;
+
+ while (nr_left) {
+ pidx = find_next_zero_bit(sa->sub_pages,
+ SZ_2M / SZ_4K,
+ pidx);
+ bitmap_set(sa->sub_pages, pidx, 1);
+ *tp++ = as_tagged_tag(page_to_phys(sa->page +
+ pidx),
+ FROM_PARTIAL);
+ nr_left--;
+
+ if (bitmap_full(sa->sub_pages, SZ_2M / SZ_4K)) {
+ /* unlink from partial list when full */
+ list_del_init(&sa->link);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&kctx->mem_partials_lock);
+ }
+
+ /* only if we actually have a chunk left <512. If more it indicates
+ * that we couldn't allocate a 2MB above, so no point to retry here.
+ */
+ if (nr_left > 0 && nr_left < (SZ_2M / SZ_4K)) {
+ /* create a new partial and suballocate the rest from it */
+ struct page *np = NULL;
+
+ do {
+ int err = kbase_mem_pool_grow(&kctx->lp_mem_pool, 1);
+
+ if (err)
+ break;
+ np = kbase_mem_pool_alloc(&kctx->lp_mem_pool);
+ } while (!np);
+
+ if (np) {
+ int i;
+ struct kbase_sub_alloc *sa;
+ struct page *p;
+
+ sa = kmalloc(sizeof(*sa), GFP_KERNEL);
+ if (!sa) {
+ kbase_mem_pool_free(&kctx->lp_mem_pool, np, false);
+ goto no_new_partial;
+ }
+
+ /* store pointers back to the control struct */
+ np->lru.next = (void *)sa;
+ for (p = np; p < np + SZ_2M / SZ_4K; p++)
+ p->lru.prev = (void *)np;
+ INIT_LIST_HEAD(&sa->link);
+ bitmap_zero(sa->sub_pages, SZ_2M / SZ_4K);
+ sa->page = np;
+
+ for (i = 0; i < nr_left; i++)
+ *tp++ = as_tagged_tag(page_to_phys(np + i), FROM_PARTIAL);
+
+ bitmap_set(sa->sub_pages, 0, nr_left);
+ nr_left = 0;
+
+ /* expose for later use */
+ mutex_lock(&kctx->mem_partials_lock);
+ list_add(&sa->link, &kctx->mem_partials);
+ mutex_unlock(&kctx->mem_partials_lock);
+ }
+ }
+ }
+no_new_partial:
+#endif
+
+ if (nr_left) {
+ res = kbase_mem_pool_alloc_pages(&kctx->mem_pool,
+ nr_left,
+ tp,
+ false);
+ if (res <= 0)
+ goto alloc_failed;
+ }
+
+ /*
+ * Request a zone cache update, this scans only the new pages an
+ * appends their information to the zone cache. if the update
+ * fails then clear the cache so we fall-back to doing things
+ * page by page.
+ */
+ if (kbase_zone_cache_update(alloc, old_page_count) != 0)
+ kbase_zone_cache_clear(alloc);
+
+ KBASE_TLSTREAM_AUX_PAGESALLOC(
+ (u32)kctx->id,
+ (u64)new_page_count);
+
+ alloc->nents += nr_pages_requested;
+done:
+ return 0;
+
+alloc_failed:
+ /* rollback needed if got one or more 2MB but failed later */
+ if (nr_left != nr_pages_requested)
+ kbase_mem_pool_free_pages(&kctx->lp_mem_pool,
+ nr_pages_requested - nr_left,
+ alloc->pages + old_page_count,
+ false,
+ false);
+
+ kbase_process_page_usage_dec(kctx, nr_pages_requested);
+ kbase_atomic_sub_pages(nr_pages_requested, &kctx->used_pages);
+ kbase_atomic_sub_pages(nr_pages_requested,
+ &kctx->kbdev->memdev.used_pages);
+
+ return -ENOMEM;
+}
+
+static void free_partial(struct kbase_context *kctx, struct tagged_addr tp)
+{
+ struct page *p, *head_page;
+ struct kbase_sub_alloc *sa;
+
+ p = phys_to_page(as_phys_addr_t(tp));
+ head_page = (struct page *)p->lru.prev;
+ sa = (struct kbase_sub_alloc *)head_page->lru.next;
+ mutex_lock(&kctx->mem_partials_lock);
+ clear_bit(p - head_page, sa->sub_pages);
+ if (bitmap_empty(sa->sub_pages, SZ_2M / SZ_4K)) {
+ list_del(&sa->link);
+ kbase_mem_pool_free(&kctx->lp_mem_pool, head_page, true);
+ kfree(sa);
+ } else if (bitmap_weight(sa->sub_pages, SZ_2M / SZ_4K) ==
+ SZ_2M / SZ_4K - 1) {
+ /* expose the partial again */
+ list_add(&sa->link, &kctx->mem_partials);
+ }
+ mutex_unlock(&kctx->mem_partials_lock);
+}
+
+int kbase_free_phy_pages_helper(
+ struct kbase_mem_phy_alloc *alloc,
+ size_t nr_pages_to_free)
+{
+ struct kbase_context *kctx = alloc->imported.kctx;
+ bool syncback;
+ bool reclaimed = (alloc->evicted != 0);
+ struct tagged_addr *start_free;
+ int new_page_count __maybe_unused;
+ size_t freed = 0;
+
+ KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
+ KBASE_DEBUG_ASSERT(alloc->imported.kctx);
+ KBASE_DEBUG_ASSERT(alloc->nents >= nr_pages_to_free);
+
+ /* early out if nothing to do */
+ if (0 == nr_pages_to_free)
+ return 0;
+
+ start_free = alloc->pages + alloc->nents - nr_pages_to_free;
+
+ syncback = alloc->properties & KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
+
+ /* pad start_free to a valid start location */
+ while (nr_pages_to_free && is_huge(*start_free) &&
+ !is_huge_head(*start_free)) {
+ nr_pages_to_free--;
+ start_free++;
+ }
+
+ /*
+ * Clear the zone cache, we don't expect JIT allocations to be
+ * shrunk in parts so there is no point trying to optimize for that
+ * by scanning for the changes caused by freeing this memory and
+ * updating the existing cache entries.
+ */
+ kbase_zone_cache_clear(alloc);
+
+
+ while (nr_pages_to_free) {
+ if (is_huge_head(*start_free)) {
+ /* This is a 2MB entry, so free all the 512 pages that
+ * it points to
+ */
+ kbase_mem_pool_free_pages(&kctx->lp_mem_pool,
+ 512,
+ start_free,
+ syncback,
+ reclaimed);
+ nr_pages_to_free -= 512;
+ start_free += 512;
+ freed += 512;
+ } else if (is_partial(*start_free)) {
+ free_partial(kctx, *start_free);
+ nr_pages_to_free--;
+ start_free++;
+ freed++;
+ } else {
+ struct tagged_addr *local_end_free;
+
+ local_end_free = start_free;
+ while (nr_pages_to_free &&
+ !is_huge(*local_end_free) &&
+ !is_partial(*local_end_free)) {
+ local_end_free++;
+ nr_pages_to_free--;
+ }
+ kbase_mem_pool_free_pages(&kctx->mem_pool,
+ local_end_free - start_free,
+ start_free,
+ syncback,
+ reclaimed);
+ freed += local_end_free - start_free;
+ start_free += local_end_free - start_free;
+ }
+ }
+
+ alloc->nents -= freed;
+
+ /*
+ * If the allocation was not evicted (i.e. evicted == 0) then
+ * the page accounting needs to be done.
+ */
+ if (!reclaimed) {
+ kbase_process_page_usage_dec(kctx, freed);
+ new_page_count = kbase_atomic_sub_pages(freed,
+ &kctx->used_pages);
+ kbase_atomic_sub_pages(freed,
+ &kctx->kbdev->memdev.used_pages);
+
+ KBASE_TLSTREAM_AUX_PAGESALLOC(
+ (u32)kctx->id,
+ (u64)new_page_count);
+ }
+
+ return 0;
+}
+
+void kbase_mem_kref_free(struct kref *kref)
+{
+ struct kbase_mem_phy_alloc *alloc;
+
+ alloc = container_of(kref, struct kbase_mem_phy_alloc, kref);
+
+ switch (alloc->type) {
+ case KBASE_MEM_TYPE_NATIVE: {
+ WARN_ON(!alloc->imported.kctx);
+ /*
+ * The physical allocation must have been removed from the
+ * eviction list before trying to free it.
+ */
+ WARN_ON(!list_empty(&alloc->evict_node));
+ kbase_free_phy_pages_helper(alloc, alloc->nents);
+ break;
+ }
+ case KBASE_MEM_TYPE_ALIAS: {
+ /* just call put on the underlying phy allocs */
+ size_t i;
+ struct kbase_aliased *aliased;
+
+ aliased = alloc->imported.alias.aliased;
+ if (aliased) {
+ for (i = 0; i < alloc->imported.alias.nents; i++)
+ if (aliased[i].alloc)
+ kbase_mem_phy_alloc_put(aliased[i].alloc);
+ vfree(aliased);
+ }
+ break;
+ }
+ case KBASE_MEM_TYPE_RAW:
+ /* raw pages, external cleanup */
+ break;
+ #ifdef CONFIG_UMP
+ case KBASE_MEM_TYPE_IMPORTED_UMP:
+ ump_dd_release(alloc->imported.ump_handle);
+ break;
+#endif
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case KBASE_MEM_TYPE_IMPORTED_UMM:
+ dma_buf_detach(alloc->imported.umm.dma_buf,
+ alloc->imported.umm.dma_attachment);
+ dma_buf_put(alloc->imported.umm.dma_buf);
+ break;
+#endif
+ case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+ if (alloc->imported.user_buf.mm)
+ mmdrop(alloc->imported.user_buf.mm);
+ kfree(alloc->imported.user_buf.pages);
+ break;
+ case KBASE_MEM_TYPE_TB:{
+ void *tb;
+
+ tb = alloc->imported.kctx->jctx.tb;
+ kbase_device_trace_buffer_uninstall(alloc->imported.kctx);
+ vfree(tb);
+ break;
+ }
+ default:
+ WARN(1, "Unexecpted free of type %d\n", alloc->type);
+ break;
+ }
+
+ /* Free based on allocation type */
+ if (alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE)
+ vfree(alloc);
+ else
+ kfree(alloc);
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_kref_free);
+
+int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size)
+{
+ KBASE_DEBUG_ASSERT(NULL != reg);
+ KBASE_DEBUG_ASSERT(vsize > 0);
+
+ /* validate user provided arguments */
+ if (size > vsize || vsize > reg->nr_pages)
+ goto out_term;
+
+ /* Prevent vsize*sizeof from wrapping around.
+ * For instance, if vsize is 2**29+1, we'll allocate 1 byte and the alloc won't fail.
+ */
+ if ((size_t) vsize > ((size_t) -1 / sizeof(*reg->cpu_alloc->pages)))
+ goto out_term;
+
+ KBASE_DEBUG_ASSERT(0 != vsize);
+
+ if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, size) != 0)
+ goto out_term;
+
+ reg->cpu_alloc->reg = reg;
+ if (reg->cpu_alloc != reg->gpu_alloc) {
+ if (kbase_alloc_phy_pages_helper(reg->gpu_alloc, size) != 0)
+ goto out_rollback;
+ reg->gpu_alloc->reg = reg;
+ }
+
+ return 0;
+
+out_rollback:
+ kbase_free_phy_pages_helper(reg->cpu_alloc, size);
+out_term:
+ return -1;
+}
+
+KBASE_EXPORT_TEST_API(kbase_alloc_phy_pages);
+
+bool kbase_check_alloc_flags(unsigned long flags)
+{
+ /* Only known input flags should be set. */
+ if (flags & ~BASE_MEM_FLAGS_INPUT_MASK)
+ return false;
+
+ /* At least one flag should be set */
+ if (flags == 0)
+ return false;
+
+ /* Either the GPU or CPU must be reading from the allocated memory */
+ if ((flags & (BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD)) == 0)
+ return false;
+
+ /* Either the GPU or CPU must be writing to the allocated memory */
+ if ((flags & (BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR)) == 0)
+ return false;
+
+ /* GPU cannot be writing to GPU executable memory and cannot grow the memory on page fault. */
+ if ((flags & BASE_MEM_PROT_GPU_EX) && (flags & (BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF)))
+ return false;
+
+ /* GPU should have at least read or write access otherwise there is no
+ reason for allocating. */
+ if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
+ return false;
+
+ /* BASE_MEM_IMPORT_SHARED is only valid for imported memory */
+ if ((flags & BASE_MEM_IMPORT_SHARED) == BASE_MEM_IMPORT_SHARED)
+ return false;
+
+ return true;
+}
+
+bool kbase_check_import_flags(unsigned long flags)
+{
+ /* Only known input flags should be set. */
+ if (flags & ~BASE_MEM_FLAGS_INPUT_MASK)
+ return false;
+
+ /* At least one flag should be set */
+ if (flags == 0)
+ return false;
+
+ /* Imported memory cannot be GPU executable */
+ if (flags & BASE_MEM_PROT_GPU_EX)
+ return false;
+
+ /* Imported memory cannot grow on page fault */
+ if (flags & BASE_MEM_GROW_ON_GPF)
+ return false;
+
+ /* GPU should have at least read or write access otherwise there is no
+ reason for importing. */
+ if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
+ return false;
+
+ /* Secure memory cannot be read by the CPU */
+ if ((flags & BASE_MEM_SECURE) && (flags & BASE_MEM_PROT_CPU_RD))
+ return false;
+
+ return true;
+}
+
+/**
+ * @brief Acquire the per-context region list lock
+ */
+void kbase_gpu_vm_lock(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ mutex_lock(&kctx->reg_lock);
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_vm_lock);
+
+/**
+ * @brief Release the per-context region list lock
+ */
+void kbase_gpu_vm_unlock(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ mutex_unlock(&kctx->reg_lock);
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_vm_unlock);
+
+#ifdef CONFIG_DEBUG_FS
+struct kbase_jit_debugfs_data {
+ int (*func)(struct kbase_jit_debugfs_data *);
+ struct mutex lock;
+ struct kbase_context *kctx;
+ u64 active_value;
+ u64 pool_value;
+ u64 destroy_value;
+ char buffer[50];
+};
+
+static int kbase_jit_debugfs_common_open(struct inode *inode,
+ struct file *file, int (*func)(struct kbase_jit_debugfs_data *))
+{
+ struct kbase_jit_debugfs_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->func = func;
+ mutex_init(&data->lock);
+ data->kctx = (struct kbase_context *) inode->i_private;
+
+ file->private_data = data;
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t kbase_jit_debugfs_common_read(struct file *file,
+ char __user *buf, size_t len, loff_t *ppos)
+{
+ struct kbase_jit_debugfs_data *data;
+ size_t size;
+ int ret;
+
+ data = (struct kbase_jit_debugfs_data *) file->private_data;
+ mutex_lock(&data->lock);
+
+ if (*ppos) {
+ size = strnlen(data->buffer, sizeof(data->buffer));
+ } else {
+ if (!data->func) {
+ ret = -EACCES;
+ goto out_unlock;
+ }
+
+ if (data->func(data)) {
+ ret = -EACCES;
+ goto out_unlock;
+ }
+
+ size = scnprintf(data->buffer, sizeof(data->buffer),
+ "%llu,%llu,%llu", data->active_value,
+ data->pool_value, data->destroy_value);
+ }
+
+ ret = simple_read_from_buffer(buf, len, ppos, data->buffer, size);
+
+out_unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int kbase_jit_debugfs_common_release(struct inode *inode,
+ struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+#define KBASE_JIT_DEBUGFS_DECLARE(__fops, __func) \
+static int __fops ## _open(struct inode *inode, struct file *file) \
+{ \
+ return kbase_jit_debugfs_common_open(inode, file, __func); \
+} \
+static const struct file_operations __fops = { \
+ .owner = THIS_MODULE, \
+ .open = __fops ## _open, \
+ .release = kbase_jit_debugfs_common_release, \
+ .read = kbase_jit_debugfs_common_read, \
+ .write = NULL, \
+ .llseek = generic_file_llseek, \
+}
+
+static int kbase_jit_debugfs_count_get(struct kbase_jit_debugfs_data *data)
+{
+ struct kbase_context *kctx = data->kctx;
+ struct list_head *tmp;
+
+ mutex_lock(&kctx->jit_evict_lock);
+ list_for_each(tmp, &kctx->jit_active_head) {
+ data->active_value++;
+ }
+
+ list_for_each(tmp, &kctx->jit_pool_head) {
+ data->pool_value++;
+ }
+
+ list_for_each(tmp, &kctx->jit_destroy_head) {
+ data->destroy_value++;
+ }
+ mutex_unlock(&kctx->jit_evict_lock);
+
+ return 0;
+}
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_count_fops,
+ kbase_jit_debugfs_count_get);
+
+static int kbase_jit_debugfs_vm_get(struct kbase_jit_debugfs_data *data)
+{
+ struct kbase_context *kctx = data->kctx;
+ struct kbase_va_region *reg;
+
+ mutex_lock(&kctx->jit_evict_lock);
+ list_for_each_entry(reg, &kctx->jit_active_head, jit_node) {
+ data->active_value += reg->nr_pages;
+ }
+
+ list_for_each_entry(reg, &kctx->jit_pool_head, jit_node) {
+ data->pool_value += reg->nr_pages;
+ }
+
+ list_for_each_entry(reg, &kctx->jit_destroy_head, jit_node) {
+ data->destroy_value += reg->nr_pages;
+ }
+ mutex_unlock(&kctx->jit_evict_lock);
+
+ return 0;
+}
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_vm_fops,
+ kbase_jit_debugfs_vm_get);
+
+static int kbase_jit_debugfs_phys_get(struct kbase_jit_debugfs_data *data)
+{
+ struct kbase_context *kctx = data->kctx;
+ struct kbase_va_region *reg;
+
+ mutex_lock(&kctx->jit_evict_lock);
+ list_for_each_entry(reg, &kctx->jit_active_head, jit_node) {
+ data->active_value += reg->gpu_alloc->nents;
+ }
+
+ list_for_each_entry(reg, &kctx->jit_pool_head, jit_node) {
+ data->pool_value += reg->gpu_alloc->nents;
+ }
+
+ list_for_each_entry(reg, &kctx->jit_destroy_head, jit_node) {
+ data->destroy_value += reg->gpu_alloc->nents;
+ }
+ mutex_unlock(&kctx->jit_evict_lock);
+
+ return 0;
+}
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_phys_fops,
+ kbase_jit_debugfs_phys_get);
+
+void kbase_jit_debugfs_init(struct kbase_context *kctx)
+{
+ /* Debugfs entry for getting the number of JIT allocations. */
+ debugfs_create_file("mem_jit_count", S_IRUGO, kctx->kctx_dentry,
+ kctx, &kbase_jit_debugfs_count_fops);
+
+ /*
+ * Debugfs entry for getting the total number of virtual pages
+ * used by JIT allocations.
+ */
+ debugfs_create_file("mem_jit_vm", S_IRUGO, kctx->kctx_dentry,
+ kctx, &kbase_jit_debugfs_vm_fops);
+
+ /*
+ * Debugfs entry for getting the number of physical pages used
+ * by JIT allocations.
+ */
+ debugfs_create_file("mem_jit_phys", S_IRUGO, kctx->kctx_dentry,
+ kctx, &kbase_jit_debugfs_phys_fops);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * kbase_jit_destroy_worker - Deferred worker which frees JIT allocations
+ * @work: Work item
+ *
+ * This function does the work of freeing JIT allocations whose physical
+ * backing has been released.
+ */
+static void kbase_jit_destroy_worker(struct work_struct *work)
+{
+ struct kbase_context *kctx;
+ struct kbase_va_region *reg;
+
+ kctx = container_of(work, struct kbase_context, jit_work);
+ do {
+ mutex_lock(&kctx->jit_evict_lock);
+ if (list_empty(&kctx->jit_destroy_head)) {
+ mutex_unlock(&kctx->jit_evict_lock);
+ break;
+ }
+
+ reg = list_first_entry(&kctx->jit_destroy_head,
+ struct kbase_va_region, jit_node);
+
+ list_del(&reg->jit_node);
+ mutex_unlock(&kctx->jit_evict_lock);
+
+ kbase_gpu_vm_lock(kctx);
+ kbase_mem_free_region(kctx, reg);
+ kbase_gpu_vm_unlock(kctx);
+ } while (1);
+}
+
+int kbase_jit_init(struct kbase_context *kctx)
+{
+ INIT_LIST_HEAD(&kctx->jit_active_head);
+ INIT_LIST_HEAD(&kctx->jit_pool_head);
+ INIT_LIST_HEAD(&kctx->jit_destroy_head);
+ INIT_WORK(&kctx->jit_work, kbase_jit_destroy_worker);
+
+ INIT_LIST_HEAD(&kctx->jit_pending_alloc);
+ INIT_LIST_HEAD(&kctx->jit_atoms_head);
+
+ return 0;
+}
+
+struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
+ struct base_jit_alloc_info *info)
+{
+ struct kbase_va_region *reg = NULL;
+ struct kbase_va_region *walker;
+ struct kbase_va_region *temp;
+ size_t current_diff = SIZE_MAX;
+
+ int ret;
+
+ mutex_lock(&kctx->jit_evict_lock);
+ /*
+ * Scan the pool for an existing allocation which meets our
+ * requirements and remove it.
+ */
+ list_for_each_entry_safe(walker, temp, &kctx->jit_pool_head, jit_node) {
+
+ if (walker->nr_pages >= info->va_pages) {
+ size_t min_size, max_size, diff;
+
+ /*
+ * The JIT allocations VA requirements have been
+ * meet, it's suitable but other allocations
+ * might be a better fit.
+ */
+ min_size = min_t(size_t, walker->gpu_alloc->nents,
+ info->commit_pages);
+ max_size = max_t(size_t, walker->gpu_alloc->nents,
+ info->commit_pages);
+ diff = max_size - min_size;
+
+ if (current_diff > diff) {
+ current_diff = diff;
+ reg = walker;
+ }
+
+ /* The allocation is an exact match, stop looking */
+ if (current_diff == 0)
+ break;
+ }
+ }
+
+ if (reg) {
+ /*
+ * Remove the found region from the pool and add it to the
+ * active list.
+ */
+ list_move(&reg->jit_node, &kctx->jit_active_head);
+
+ /*
+ * Remove the allocation from the eviction list as it's no
+ * longer eligible for eviction. This must be done before
+ * dropping the jit_evict_lock
+ */
+ list_del_init(&reg->gpu_alloc->evict_node);
+ mutex_unlock(&kctx->jit_evict_lock);
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* Make the physical backing no longer reclaimable */
+ if (!kbase_mem_evictable_unmake(reg->gpu_alloc))
+ goto update_failed;
+
+ /* Grow the backing if required */
+ if (reg->gpu_alloc->nents < info->commit_pages) {
+ size_t delta;
+ size_t old_size = reg->gpu_alloc->nents;
+
+ /* Allocate some more pages */
+ delta = info->commit_pages - reg->gpu_alloc->nents;
+ if (kbase_alloc_phy_pages_helper(reg->gpu_alloc, delta)
+ != 0)
+ goto update_failed;
+
+ if (reg->cpu_alloc != reg->gpu_alloc) {
+ if (kbase_alloc_phy_pages_helper(
+ reg->cpu_alloc, delta) != 0) {
+ kbase_free_phy_pages_helper(
+ reg->gpu_alloc, delta);
+ goto update_failed;
+ }
+ }
+
+ ret = kbase_mem_grow_gpu_mapping(kctx, reg,
+ info->commit_pages, old_size);
+ /*
+ * The grow failed so put the allocation back in the
+ * pool and return failure.
+ */
+ if (ret)
+ goto update_failed;
+ }
+ kbase_gpu_vm_unlock(kctx);
+ } else {
+ /* No suitable JIT allocation was found so create a new one */
+ u64 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD |
+ BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF |
+ BASE_MEM_COHERENT_LOCAL;
+ u64 gpu_addr;
+
+ mutex_unlock(&kctx->jit_evict_lock);
+
+ reg = kbase_mem_alloc(kctx, info->va_pages, info->commit_pages,
+ info->extent, &flags, &gpu_addr);
+ if (!reg)
+ goto out_unlocked;
+
+ mutex_lock(&kctx->jit_evict_lock);
+ list_add(&reg->jit_node, &kctx->jit_active_head);
+ mutex_unlock(&kctx->jit_evict_lock);
+ }
+
+ return reg;
+
+update_failed:
+ /*
+ * An update to an allocation from the pool failed, chances
+ * are slim a new allocation would fair any better so return
+ * the allocation to the pool and return the function with failure.
+ */
+ kbase_gpu_vm_unlock(kctx);
+ mutex_lock(&kctx->jit_evict_lock);
+ list_move(&reg->jit_node, &kctx->jit_pool_head);
+ mutex_unlock(&kctx->jit_evict_lock);
+out_unlocked:
+ return NULL;
+}
+
+void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ /* The physical backing of memory in the pool is always reclaimable */
+ kbase_gpu_vm_lock(kctx);
+ kbase_mem_evictable_make(reg->gpu_alloc);
+ kbase_gpu_vm_unlock(kctx);
+
+ mutex_lock(&kctx->jit_evict_lock);
+ list_move(&reg->jit_node, &kctx->jit_pool_head);
+ mutex_unlock(&kctx->jit_evict_lock);
+}
+
+void kbase_jit_backing_lost(struct kbase_va_region *reg)
+{
+ struct kbase_context *kctx = reg->kctx;
+
+ lockdep_assert_held(&kctx->jit_evict_lock);
+
+ /*
+ * JIT allocations will always be on a list, if the region
+ * is not on a list then it's not a JIT allocation.
+ */
+ if (list_empty(&reg->jit_node))
+ return;
+
+ /*
+ * Freeing the allocation requires locks we might not be able
+ * to take now, so move the allocation to the free list and kick
+ * the worker which will do the freeing.
+ */
+ list_move(&reg->jit_node, &kctx->jit_destroy_head);
+
+ schedule_work(&kctx->jit_work);
+}
+
+bool kbase_jit_evict(struct kbase_context *kctx)
+{
+ struct kbase_va_region *reg = NULL;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /* Free the oldest allocation from the pool */
+ mutex_lock(&kctx->jit_evict_lock);
+ if (!list_empty(&kctx->jit_pool_head)) {
+ reg = list_entry(kctx->jit_pool_head.prev,
+ struct kbase_va_region, jit_node);
+ list_del(&reg->jit_node);
+ }
+ mutex_unlock(&kctx->jit_evict_lock);
+
+ if (reg)
+ kbase_mem_free_region(kctx, reg);
+
+ return (reg != NULL);
+}
+
+void kbase_jit_term(struct kbase_context *kctx)
+{
+ struct kbase_va_region *walker;
+
+ /* Free all allocations for this context */
+
+ /*
+ * Flush the freeing of allocations whose backing has been freed
+ * (i.e. everything in jit_destroy_head).
+ */
+ cancel_work_sync(&kctx->jit_work);
+
+ kbase_gpu_vm_lock(kctx);
+ mutex_lock(&kctx->jit_evict_lock);
+ /* Free all allocations from the pool */
+ while (!list_empty(&kctx->jit_pool_head)) {
+ walker = list_first_entry(&kctx->jit_pool_head,
+ struct kbase_va_region, jit_node);
+ list_del(&walker->jit_node);
+ mutex_unlock(&kctx->jit_evict_lock);
+ kbase_mem_free_region(kctx, walker);
+ mutex_lock(&kctx->jit_evict_lock);
+ }
+
+ /* Free all allocations from active list */
+ while (!list_empty(&kctx->jit_active_head)) {
+ walker = list_first_entry(&kctx->jit_active_head,
+ struct kbase_va_region, jit_node);
+ list_del(&walker->jit_node);
+ mutex_unlock(&kctx->jit_evict_lock);
+ kbase_mem_free_region(kctx, walker);
+ mutex_lock(&kctx->jit_evict_lock);
+ }
+ mutex_unlock(&kctx->jit_evict_lock);
+ kbase_gpu_vm_unlock(kctx);
+}
+
+static int kbase_jd_user_buf_map(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ long pinned_pages;
+ struct kbase_mem_phy_alloc *alloc;
+ struct page **pages;
+ struct tagged_addr *pa;
+ long i;
+ int err = -ENOMEM;
+ unsigned long address;
+ struct mm_struct *mm;
+ struct device *dev;
+ unsigned long offset;
+ unsigned long local_size;
+
+ alloc = reg->gpu_alloc;
+ pa = kbase_get_gpu_phy_pages(reg);
+ address = alloc->imported.user_buf.address;
+ mm = alloc->imported.user_buf.mm;
+
+ KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+
+ pages = alloc->imported.user_buf.pages;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ pinned_pages = get_user_pages(NULL, mm,
+ address,
+ alloc->imported.user_buf.nr_pages,
+ reg->flags & KBASE_REG_GPU_WR,
+ 0, pages, NULL);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+ pinned_pages = get_user_pages_remote(NULL, mm,
+ address,
+ alloc->imported.user_buf.nr_pages,
+ reg->flags & KBASE_REG_GPU_WR,
+ 0, pages, NULL);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ pinned_pages = get_user_pages_remote(NULL, mm,
+ address,
+ alloc->imported.user_buf.nr_pages,
+ reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+ pages, NULL);
+#else
+ pinned_pages = get_user_pages_remote(NULL, mm,
+ address,
+ alloc->imported.user_buf.nr_pages,
+ reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+ pages, NULL, NULL);
+#endif
+
+ if (pinned_pages <= 0)
+ return pinned_pages;
+
+ if (pinned_pages != alloc->imported.user_buf.nr_pages) {
+ for (i = 0; i < pinned_pages; i++)
+ put_page(pages[i]);
+ return -ENOMEM;
+ }
+
+ dev = kctx->kbdev->dev;
+ offset = address & ~PAGE_MASK;
+ local_size = alloc->imported.user_buf.size;
+
+ for (i = 0; i < pinned_pages; i++) {
+ dma_addr_t dma_addr;
+ unsigned long min;
+
+ min = MIN(PAGE_SIZE - offset, local_size);
+ dma_addr = dma_map_page(dev, pages[i],
+ offset, min,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr))
+ goto unwind;
+
+ alloc->imported.user_buf.dma_addrs[i] = dma_addr;
+ pa[i] = as_tagged(page_to_phys(pages[i]));
+
+ local_size -= min;
+ offset = 0;
+ }
+
+ alloc->nents = pinned_pages;
+
+ err = kbase_mmu_insert_pages(kctx, reg->start_pfn, pa,
+ kbase_reg_current_backed_size(reg),
+ reg->flags);
+ if (err == 0)
+ return 0;
+
+ alloc->nents = 0;
+ /* fall down */
+unwind:
+ while (i--) {
+ dma_unmap_page(kctx->kbdev->dev,
+ alloc->imported.user_buf.dma_addrs[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+
+ while (++i < pinned_pages) {
+ put_page(pages[i]);
+ pages[i] = NULL;
+ }
+
+ return err;
+}
+
+static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
+ struct kbase_mem_phy_alloc *alloc, bool writeable)
+{
+ long i;
+ struct page **pages;
+ unsigned long size = alloc->imported.user_buf.size;
+
+ KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+ pages = alloc->imported.user_buf.pages;
+ for (i = 0; i < alloc->imported.user_buf.nr_pages; i++) {
+ unsigned long local_size;
+ dma_addr_t dma_addr = alloc->imported.user_buf.dma_addrs[i];
+
+ local_size = MIN(size, PAGE_SIZE - (dma_addr & ~PAGE_MASK));
+ dma_unmap_page(kctx->kbdev->dev, dma_addr, local_size,
+ DMA_BIDIRECTIONAL);
+ if (writeable)
+ set_page_dirty_lock(pages[i]);
+ put_page(pages[i]);
+ pages[i] = NULL;
+
+ size -= local_size;
+ }
+ alloc->nents = 0;
+}
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+static int kbase_jd_umm_map(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ struct sg_table *sgt;
+ struct scatterlist *s;
+ int i;
+ struct tagged_addr *pa;
+ int err;
+ size_t count = 0;
+ struct kbase_mem_phy_alloc *alloc;
+
+ alloc = reg->gpu_alloc;
+
+ KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM);
+ KBASE_DEBUG_ASSERT(NULL == alloc->imported.umm.sgt);
+ sgt = dma_buf_map_attachment(alloc->imported.umm.dma_attachment,
+ DMA_BIDIRECTIONAL);
+
+ if (IS_ERR_OR_NULL(sgt))
+ return -EINVAL;
+
+ /* save for later */
+ alloc->imported.umm.sgt = sgt;
+
+ pa = kbase_get_gpu_phy_pages(reg);
+ KBASE_DEBUG_ASSERT(pa);
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ int j;
+ size_t pages = PFN_UP(sg_dma_len(s));
+
+ WARN_ONCE(sg_dma_len(s) & (PAGE_SIZE-1),
+ "sg_dma_len(s)=%u is not a multiple of PAGE_SIZE\n",
+ sg_dma_len(s));
+
+ WARN_ONCE(sg_dma_address(s) & (PAGE_SIZE-1),
+ "sg_dma_address(s)=%llx is not aligned to PAGE_SIZE\n",
+ (unsigned long long) sg_dma_address(s));
+
+ for (j = 0; (j < pages) && (count < reg->nr_pages); j++,
+ count++)
+ *pa++ = as_tagged(sg_dma_address(s) +
+ (j << PAGE_SHIFT));
+ WARN_ONCE(j < pages,
+ "sg list from dma_buf_map_attachment > dma_buf->size=%zu\n",
+ alloc->imported.umm.dma_buf->size);
+ }
+
+ if (!(reg->flags & KBASE_REG_IMPORT_PAD) &&
+ WARN_ONCE(count < reg->nr_pages,
+ "sg list from dma_buf_map_attachment < dma_buf->size=%zu\n",
+ alloc->imported.umm.dma_buf->size)) {
+ err = -EINVAL;
+ goto err_unmap_attachment;
+ }
+
+ /* Update nents as we now have pages to map */
+ alloc->nents = reg->nr_pages;
+
+ err = kbase_mmu_insert_pages(kctx, reg->start_pfn,
+ kbase_get_gpu_phy_pages(reg),
+ count,
+ reg->flags | KBASE_REG_GPU_WR | KBASE_REG_GPU_RD);
+ if (err)
+ goto err_unmap_attachment;
+
+ if (reg->flags & KBASE_REG_IMPORT_PAD) {
+ err = kbase_mmu_insert_single_page(kctx,
+ reg->start_pfn + count,
+ kctx->aliasing_sink_page,
+ reg->nr_pages - count,
+ (reg->flags | KBASE_REG_GPU_RD) &
+ ~KBASE_REG_GPU_WR);
+ if (err)
+ goto err_teardown_orig_pages;
+ }
+
+ return 0;
+
+err_teardown_orig_pages:
+ kbase_mmu_teardown_pages(kctx, reg->start_pfn, count);
+err_unmap_attachment:
+ dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
+ alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
+ alloc->imported.umm.sgt = NULL;
+
+ return err;
+}
+
+static void kbase_jd_umm_unmap(struct kbase_context *kctx,
+ struct kbase_mem_phy_alloc *alloc)
+{
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(alloc);
+ KBASE_DEBUG_ASSERT(alloc->imported.umm.dma_attachment);
+ KBASE_DEBUG_ASSERT(alloc->imported.umm.sgt);
+ dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
+ alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
+ alloc->imported.umm.sgt = NULL;
+ alloc->nents = 0;
+}
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+#if (defined(CONFIG_KDS) && defined(CONFIG_UMP)) \
+ || defined(CONFIG_DMA_SHARED_BUFFER_USES_KDS)
+static void add_kds_resource(struct kds_resource *kds_res,
+ struct kds_resource **kds_resources, u32 *kds_res_count,
+ unsigned long *kds_access_bitmap, bool exclusive)
+{
+ u32 i;
+
+ for (i = 0; i < *kds_res_count; i++) {
+ /* Duplicate resource, ignore */
+ if (kds_resources[i] == kds_res)
+ return;
+ }
+
+ kds_resources[*kds_res_count] = kds_res;
+ if (exclusive)
+ set_bit(*kds_res_count, kds_access_bitmap);
+ (*kds_res_count)++;
+}
+#endif
+
+struct kbase_mem_phy_alloc *kbase_map_external_resource(
+ struct kbase_context *kctx, struct kbase_va_region *reg,
+ struct mm_struct *locked_mm
+#ifdef CONFIG_KDS
+ , u32 *kds_res_count, struct kds_resource **kds_resources,
+ unsigned long *kds_access_bitmap, bool exclusive
+#endif
+ )
+{
+ int err;
+
+ /* decide what needs to happen for this resource */
+ switch (reg->gpu_alloc->type) {
+ case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
+ if (reg->gpu_alloc->imported.user_buf.mm != locked_mm)
+ goto exit;
+
+ reg->gpu_alloc->imported.user_buf.current_mapping_usage_count++;
+ if (1 == reg->gpu_alloc->imported.user_buf.current_mapping_usage_count) {
+ err = kbase_jd_user_buf_map(kctx, reg);
+ if (err) {
+ reg->gpu_alloc->imported.user_buf.current_mapping_usage_count--;
+ goto exit;
+ }
+ }
+ }
+ break;
+ case KBASE_MEM_TYPE_IMPORTED_UMP: {
+#if defined(CONFIG_KDS) && defined(CONFIG_UMP)
+ if (kds_res_count) {
+ struct kds_resource *kds_res;
+
+ kds_res = ump_dd_kds_resource_get(
+ reg->gpu_alloc->imported.ump_handle);
+ if (kds_res)
+ add_kds_resource(kds_res, kds_resources,
+ kds_res_count,
+ kds_access_bitmap, exclusive);
+ }
+#endif /*defined(CONFIG_KDS) && defined(CONFIG_UMP) */
+ break;
+ }
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case KBASE_MEM_TYPE_IMPORTED_UMM: {
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ if (kds_res_count) {
+ struct kds_resource *kds_res;
+
+ kds_res = get_dma_buf_kds_resource(
+ reg->gpu_alloc->imported.umm.dma_buf);
+ if (kds_res)
+ add_kds_resource(kds_res, kds_resources,
+ kds_res_count,
+ kds_access_bitmap, exclusive);
+ }
+#endif
+ reg->gpu_alloc->imported.umm.current_mapping_usage_count++;
+ if (1 == reg->gpu_alloc->imported.umm.current_mapping_usage_count) {
+ err = kbase_jd_umm_map(kctx, reg);
+ if (err) {
+ reg->gpu_alloc->imported.umm.current_mapping_usage_count--;
+ goto exit;
+ }
+ }
+ break;
+ }
+#endif
+ default:
+ goto exit;
+ }
+
+ return kbase_mem_phy_alloc_get(reg->gpu_alloc);
+exit:
+ return NULL;
+}
+
+void kbase_unmap_external_resource(struct kbase_context *kctx,
+ struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc)
+{
+ switch (alloc->type) {
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case KBASE_MEM_TYPE_IMPORTED_UMM: {
+ alloc->imported.umm.current_mapping_usage_count--;
+
+ if (0 == alloc->imported.umm.current_mapping_usage_count) {
+ if (reg && reg->gpu_alloc == alloc) {
+ int err;
+
+ err = kbase_mmu_teardown_pages(
+ kctx,
+ reg->start_pfn,
+ alloc->nents);
+ WARN_ON(err);
+ }
+
+ kbase_jd_umm_unmap(kctx, alloc);
+ }
+ }
+ break;
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+ case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
+ alloc->imported.user_buf.current_mapping_usage_count--;
+
+ if (0 == alloc->imported.user_buf.current_mapping_usage_count) {
+ bool writeable = true;
+
+ if (reg && reg->gpu_alloc == alloc)
+ kbase_mmu_teardown_pages(
+ kctx,
+ reg->start_pfn,
+ kbase_reg_current_backed_size(reg));
+
+ if (reg && ((reg->flags & KBASE_REG_GPU_WR) == 0))
+ writeable = false;
+
+ kbase_jd_user_buf_unmap(kctx, alloc, writeable);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ kbase_mem_phy_alloc_put(alloc);
+}
+
+struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
+ struct kbase_context *kctx, u64 gpu_addr)
+{
+ struct kbase_ctx_ext_res_meta *meta = NULL;
+ struct kbase_ctx_ext_res_meta *walker;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /*
+ * Walk the per context external resource metadata list for the
+ * metadata which matches the region which is being acquired.
+ */
+ list_for_each_entry(walker, &kctx->ext_res_meta_head, ext_res_node) {
+ if (walker->gpu_addr == gpu_addr) {
+ meta = walker;
+ break;
+ }
+ }
+
+ /* No metadata exists so create one. */
+ if (!meta) {
+ struct kbase_va_region *reg;
+
+ /* Find the region */
+ reg = kbase_region_tracker_find_region_enclosing_address(
+ kctx, gpu_addr);
+ if (NULL == reg || (reg->flags & KBASE_REG_FREE))
+ goto failed;
+
+ /* Allocate the metadata object */
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ goto failed;
+
+ /*
+ * Fill in the metadata object and acquire a reference
+ * for the physical resource.
+ */
+ meta->alloc = kbase_map_external_resource(kctx, reg, NULL
+#ifdef CONFIG_KDS
+ , NULL, NULL,
+ NULL, false
+#endif
+ );
+
+ if (!meta->alloc)
+ goto fail_map;
+
+ meta->gpu_addr = reg->start_pfn << PAGE_SHIFT;
+
+ list_add(&meta->ext_res_node, &kctx->ext_res_meta_head);
+ }
+
+ return meta;
+
+fail_map:
+ kfree(meta);
+failed:
+ return NULL;
+}
+
+bool kbase_sticky_resource_release(struct kbase_context *kctx,
+ struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr)
+{
+ struct kbase_ctx_ext_res_meta *walker;
+ struct kbase_va_region *reg;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /* Search of the metadata if one isn't provided. */
+ if (!meta) {
+ /*
+ * Walk the per context external resource metadata list for the
+ * metadata which matches the region which is being released.
+ */
+ list_for_each_entry(walker, &kctx->ext_res_meta_head,
+ ext_res_node) {
+ if (walker->gpu_addr == gpu_addr) {
+ meta = walker;
+ break;
+ }
+ }
+ }
+
+ /* No metadata so just return. */
+ if (!meta)
+ return false;
+
+ /* Drop the physical memory reference and free the metadata. */
+ reg = kbase_region_tracker_find_region_enclosing_address(
+ kctx,
+ meta->gpu_addr);
+
+ kbase_unmap_external_resource(kctx, reg, meta->alloc);
+ list_del(&meta->ext_res_node);
+ kfree(meta);
+
+ return true;
+}
+
+int kbase_sticky_resource_init(struct kbase_context *kctx)
+{
+ INIT_LIST_HEAD(&kctx->ext_res_meta_head);
+
+ return 0;
+}
+
+void kbase_sticky_resource_term(struct kbase_context *kctx)
+{
+ struct kbase_ctx_ext_res_meta *walker;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /*
+ * Free any sticky resources which haven't been unmapped.
+ *
+ * Note:
+ * We don't care about refcounts at this point as no future
+ * references to the meta data will be made.
+ * Region termination would find these if we didn't free them
+ * here, but it's more efficient if we do the clean up here.
+ */
+ while (!list_empty(&kctx->ext_res_meta_head)) {
+ walker = list_first_entry(&kctx->ext_res_meta_head,
+ struct kbase_ctx_ext_res_meta, ext_res_node);
+
+ kbase_sticky_resource_release(kctx, walker, 0);
+ }
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mem.h b/drivers/gpu/arm_gpu/mali_kbase_mem.h
new file mode 100644
index 000000000000..e9a8d5dd6b94
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mem.h
@@ -0,0 +1,1138 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_mem.h
+ * Base kernel memory APIs
+ */
+
+#ifndef _KBASE_MEM_H_
+#define _KBASE_MEM_H_
+
+#ifndef _KBASE_H_
+#error "Don't include this file directly, use mali_kbase.h instead"
+#endif
+
+#include <linux/kref.h>
+#ifdef CONFIG_KDS
+#include <linux/kds.h>
+#endif /* CONFIG_KDS */
+#ifdef CONFIG_UMP
+#include <linux/ump.h>
+#endif /* CONFIG_UMP */
+#include "mali_base_kernel.h"
+#include <mali_kbase_hw.h>
+#include "mali_kbase_pm.h"
+#include "mali_kbase_defs.h"
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include "mali_kbase_gator.h"
+#endif
+/* Required for kbase_mem_evictable_unmake */
+#include "mali_kbase_mem_linux.h"
+
+/* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2) /* round to 4 pages */
+
+/* Part of the workaround for PRLAM-9630 requires us to grow/shrink memory by 8 pages.
+The MMU reads in 8 page table entries from memory at a time, if we have more than one page fault within the same 8 pages and
+page tables are updated accordingly, the MMU does not re-read the page table entries from memory for the subsequent page table
+updates and generates duplicate page faults as the page table information used by the MMU is not valid. */
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3) /* round to 8 pages */
+
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2 (0) /* round to 1 page */
+
+/* This must always be a power of 2 */
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2)
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_8316 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316)
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_9630 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630)
+/**
+ * A CPU mapping
+ */
+struct kbase_cpu_mapping {
+ struct list_head mappings_list;
+ struct kbase_mem_phy_alloc *alloc;
+ struct kbase_context *kctx;
+ struct kbase_va_region *region;
+ int count;
+ int free_on_close;
+};
+
+enum kbase_memory_type {
+ KBASE_MEM_TYPE_NATIVE,
+ KBASE_MEM_TYPE_IMPORTED_UMP,
+ KBASE_MEM_TYPE_IMPORTED_UMM,
+ KBASE_MEM_TYPE_IMPORTED_USER_BUF,
+ KBASE_MEM_TYPE_ALIAS,
+ KBASE_MEM_TYPE_TB,
+ KBASE_MEM_TYPE_RAW
+};
+
+/* internal structure, mirroring base_mem_aliasing_info,
+ * but with alloc instead of a gpu va (handle) */
+struct kbase_aliased {
+ struct kbase_mem_phy_alloc *alloc; /* NULL for special, non-NULL for native */
+ u64 offset; /* in pages */
+ u64 length; /* in pages */
+};
+
+/**
+ * @brief Physical pages tracking object properties
+ */
+#define KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED (1ul << 0)
+#define KBASE_MEM_PHY_ALLOC_LARGE (1ul << 1)
+
+/* physical pages tracking object.
+ * Set up to track N pages.
+ * N not stored here, the creator holds that info.
+ * This object only tracks how many elements are actually valid (present).
+ * Changing of nents or *pages should only happen if the kbase_mem_phy_alloc is not
+ * shared with another region or client. CPU mappings are OK to exist when changing, as
+ * long as the tracked mappings objects are updated as part of the change.
+ */
+struct kbase_mem_phy_alloc {
+ struct kref kref; /* number of users of this alloc */
+ atomic_t gpu_mappings;
+ size_t nents; /* 0..N */
+ struct tagged_addr *pages; /* N elements, only 0..nents are valid */
+
+ /* kbase_cpu_mappings */
+ struct list_head mappings;
+
+ /* Node used to store this allocation on the eviction list */
+ struct list_head evict_node;
+ /* Physical backing size when the pages where evicted */
+ size_t evicted;
+ /*
+ * Back reference to the region structure which created this
+ * allocation, or NULL if it has been freed.
+ */
+ struct kbase_va_region *reg;
+
+ /* type of buffer */
+ enum kbase_memory_type type;
+
+ unsigned long properties;
+
+ struct list_head zone_cache;
+
+ /* member in union valid based on @a type */
+ union {
+#ifdef CONFIG_UMP
+ ump_dd_handle ump_handle;
+#endif /* CONFIG_UMP */
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ struct {
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *dma_attachment;
+ unsigned int current_mapping_usage_count;
+ struct sg_table *sgt;
+ } umm;
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
+ struct {
+ u64 stride;
+ size_t nents;
+ struct kbase_aliased *aliased;
+ } alias;
+ /* Used by type = (KBASE_MEM_TYPE_NATIVE, KBASE_MEM_TYPE_TB) */
+ struct kbase_context *kctx;
+ struct kbase_alloc_import_user_buf {
+ unsigned long address;
+ unsigned long size;
+ unsigned long nr_pages;
+ struct page **pages;
+ /* top bit (1<<31) of current_mapping_usage_count
+ * specifies that this import was pinned on import
+ * See PINNED_ON_IMPORT
+ */
+ u32 current_mapping_usage_count;
+ struct mm_struct *mm;
+ dma_addr_t *dma_addrs;
+ } user_buf;
+ } imported;
+};
+
+/* The top bit of kbase_alloc_import_user_buf::current_mapping_usage_count is
+ * used to signify that a buffer was pinned when it was imported. Since the
+ * reference count is limited by the number of atoms that can be submitted at
+ * once there should be no danger of overflowing into this bit.
+ * Stealing the top bit also has the benefit that
+ * current_mapping_usage_count != 0 if and only if the buffer is mapped.
+ */
+#define PINNED_ON_IMPORT (1<<31)
+
+static inline void kbase_mem_phy_alloc_gpu_mapped(struct kbase_mem_phy_alloc *alloc)
+{
+ KBASE_DEBUG_ASSERT(alloc);
+ /* we only track mappings of NATIVE buffers */
+ if (alloc->type == KBASE_MEM_TYPE_NATIVE)
+ atomic_inc(&alloc->gpu_mappings);
+}
+
+static inline void kbase_mem_phy_alloc_gpu_unmapped(struct kbase_mem_phy_alloc *alloc)
+{
+ KBASE_DEBUG_ASSERT(alloc);
+ /* we only track mappings of NATIVE buffers */
+ if (alloc->type == KBASE_MEM_TYPE_NATIVE)
+ if (0 > atomic_dec_return(&alloc->gpu_mappings)) {
+ pr_err("Mismatched %s:\n", __func__);
+ dump_stack();
+ }
+}
+
+/**
+ * kbase_mem_is_imported - Indicate whether a memory type is imported
+ *
+ * @type: the memory type
+ *
+ * Return: true if the memory type is imported, false otherwise
+ */
+static inline bool kbase_mem_is_imported(enum kbase_memory_type type)
+{
+ return (type == KBASE_MEM_TYPE_IMPORTED_UMP) ||
+ (type == KBASE_MEM_TYPE_IMPORTED_UMM) ||
+ (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+}
+
+void kbase_mem_kref_free(struct kref *kref);
+
+int kbase_mem_init(struct kbase_device *kbdev);
+void kbase_mem_halt(struct kbase_device *kbdev);
+void kbase_mem_term(struct kbase_device *kbdev);
+
+static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_get(struct kbase_mem_phy_alloc *alloc)
+{
+ kref_get(&alloc->kref);
+ return alloc;
+}
+
+static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_put(struct kbase_mem_phy_alloc *alloc)
+{
+ kref_put(&alloc->kref, kbase_mem_kref_free);
+ return NULL;
+}
+
+/**
+ * A GPU memory region, and attributes for CPU mappings.
+ */
+struct kbase_va_region {
+ struct rb_node rblink;
+ struct list_head link;
+
+ struct kbase_context *kctx; /* Backlink to base context */
+
+ u64 start_pfn; /* The PFN in GPU space */
+ size_t nr_pages;
+
+/* Free region */
+#define KBASE_REG_FREE (1ul << 0)
+/* CPU write access */
+#define KBASE_REG_CPU_WR (1ul << 1)
+/* GPU write access */
+#define KBASE_REG_GPU_WR (1ul << 2)
+/* No eXecute flag */
+#define KBASE_REG_GPU_NX (1ul << 3)
+/* Is CPU cached? */
+#define KBASE_REG_CPU_CACHED (1ul << 4)
+/* Is GPU cached? */
+#define KBASE_REG_GPU_CACHED (1ul << 5)
+
+#define KBASE_REG_GROWABLE (1ul << 6)
+/* Can grow on pf? */
+#define KBASE_REG_PF_GROW (1ul << 7)
+
+/* VA managed by us */
+#define KBASE_REG_CUSTOM_VA (1ul << 8)
+
+/* inner shareable coherency */
+#define KBASE_REG_SHARE_IN (1ul << 9)
+/* inner & outer shareable coherency */
+#define KBASE_REG_SHARE_BOTH (1ul << 10)
+
+/* Space for 4 different zones */
+#define KBASE_REG_ZONE_MASK (3ul << 11)
+#define KBASE_REG_ZONE(x) (((x) & 3) << 11)
+
+/* GPU read access */
+#define KBASE_REG_GPU_RD (1ul<<13)
+/* CPU read access */
+#define KBASE_REG_CPU_RD (1ul<<14)
+
+/* Index of chosen MEMATTR for this region (0..7) */
+#define KBASE_REG_MEMATTR_MASK (7ul << 16)
+#define KBASE_REG_MEMATTR_INDEX(x) (((x) & 7) << 16)
+#define KBASE_REG_MEMATTR_VALUE(x) (((x) & KBASE_REG_MEMATTR_MASK) >> 16)
+
+#define KBASE_REG_SECURE (1ul << 19)
+
+#define KBASE_REG_DONT_NEED (1ul << 20)
+
+/* Imported buffer is padded? */
+#define KBASE_REG_IMPORT_PAD (1ul << 21)
+
+#define KBASE_REG_ZONE_SAME_VA KBASE_REG_ZONE(0)
+
+/* only used with 32-bit clients */
+/*
+ * On a 32bit platform, custom VA should be wired from (4GB + shader region)
+ * to the VA limit of the GPU. Unfortunately, the Linux mmap() interface
+ * limits us to 2^32 pages (2^44 bytes, see mmap64 man page for reference).
+ * So we put the default limit to the maximum possible on Linux and shrink
+ * it down, if required by the GPU, during initialization.
+ */
+
+/*
+ * Dedicated 16MB region for shader code:
+ * VA range 0x101000000-0x102000000
+ */
+#define KBASE_REG_ZONE_EXEC KBASE_REG_ZONE(1)
+#define KBASE_REG_ZONE_EXEC_BASE (0x101000000ULL >> PAGE_SHIFT)
+#define KBASE_REG_ZONE_EXEC_SIZE ((16ULL * 1024 * 1024) >> PAGE_SHIFT)
+
+#define KBASE_REG_ZONE_CUSTOM_VA KBASE_REG_ZONE(2)
+#define KBASE_REG_ZONE_CUSTOM_VA_BASE (KBASE_REG_ZONE_EXEC_BASE + KBASE_REG_ZONE_EXEC_SIZE) /* Starting after KBASE_REG_ZONE_EXEC */
+#define KBASE_REG_ZONE_CUSTOM_VA_SIZE (((1ULL << 44) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE)
+/* end 32-bit clients only */
+
+ unsigned long flags;
+
+ size_t extent; /* nr of pages alloc'd on PF */
+
+ struct kbase_mem_phy_alloc *cpu_alloc; /* the one alloc object we mmap to the CPU when mapping this region */
+ struct kbase_mem_phy_alloc *gpu_alloc; /* the one alloc object we mmap to the GPU when mapping this region */
+
+ /* non-NULL if this memory object is a kds_resource */
+ struct kds_resource *kds_res;
+
+ /* List head used to store the region in the JIT allocation pool */
+ struct list_head jit_node;
+};
+
+/* Common functions */
+static inline struct tagged_addr *kbase_get_cpu_phy_pages(
+ struct kbase_va_region *reg)
+{
+ KBASE_DEBUG_ASSERT(reg);
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
+
+ return reg->cpu_alloc->pages;
+}
+
+static inline struct tagged_addr *kbase_get_gpu_phy_pages(
+ struct kbase_va_region *reg)
+{
+ KBASE_DEBUG_ASSERT(reg);
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
+
+ return reg->gpu_alloc->pages;
+}
+
+static inline size_t kbase_reg_current_backed_size(struct kbase_va_region *reg)
+{
+ KBASE_DEBUG_ASSERT(reg);
+ /* if no alloc object the backed size naturally is 0 */
+ if (!reg->cpu_alloc)
+ return 0;
+
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
+
+ return reg->cpu_alloc->nents;
+}
+
+#define KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD ((size_t)(4*1024)) /* size above which vmalloc is used over kmalloc */
+
+static inline struct kbase_mem_phy_alloc *kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type)
+{
+ struct kbase_mem_phy_alloc *alloc;
+ size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages;
+ size_t per_page_size = sizeof(*alloc->pages);
+
+ /* Imported pages may have page private data already in use */
+ if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
+ alloc_size += nr_pages *
+ sizeof(*alloc->imported.user_buf.dma_addrs);
+ per_page_size += sizeof(*alloc->imported.user_buf.dma_addrs);
+ }
+
+ /*
+ * Prevent nr_pages*per_page_size + sizeof(*alloc) from
+ * wrapping around.
+ */
+ if (nr_pages > ((((size_t) -1) - sizeof(*alloc))
+ / per_page_size))
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate based on the size to reduce internal fragmentation of vmem */
+ if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
+ alloc = vzalloc(alloc_size);
+ else
+ alloc = kzalloc(alloc_size, GFP_KERNEL);
+
+ if (!alloc)
+ return ERR_PTR(-ENOMEM);
+
+ /* Store allocation method */
+ if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
+ alloc->properties |= KBASE_MEM_PHY_ALLOC_LARGE;
+
+ kref_init(&alloc->kref);
+ atomic_set(&alloc->gpu_mappings, 0);
+ alloc->nents = 0;
+ alloc->pages = (void *)(alloc + 1);
+ INIT_LIST_HEAD(&alloc->mappings);
+ alloc->type = type;
+ INIT_LIST_HEAD(&alloc->zone_cache);
+
+ if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF)
+ alloc->imported.user_buf.dma_addrs =
+ (void *) (alloc->pages + nr_pages);
+
+ return alloc;
+}
+
+static inline int kbase_reg_prepare_native(struct kbase_va_region *reg,
+ struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(reg);
+ KBASE_DEBUG_ASSERT(!reg->cpu_alloc);
+ KBASE_DEBUG_ASSERT(!reg->gpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->flags & KBASE_REG_FREE);
+
+ reg->cpu_alloc = kbase_alloc_create(reg->nr_pages,
+ KBASE_MEM_TYPE_NATIVE);
+ if (IS_ERR(reg->cpu_alloc))
+ return PTR_ERR(reg->cpu_alloc);
+ else if (!reg->cpu_alloc)
+ return -ENOMEM;
+ reg->cpu_alloc->imported.kctx = kctx;
+ INIT_LIST_HEAD(&reg->cpu_alloc->evict_node);
+ if (kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE)
+ && (reg->flags & KBASE_REG_CPU_CACHED)) {
+ reg->gpu_alloc = kbase_alloc_create(reg->nr_pages,
+ KBASE_MEM_TYPE_NATIVE);
+ reg->gpu_alloc->imported.kctx = kctx;
+ INIT_LIST_HEAD(&reg->gpu_alloc->evict_node);
+ } else {
+ reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+ }
+
+ INIT_LIST_HEAD(&reg->jit_node);
+ reg->flags &= ~KBASE_REG_FREE;
+ return 0;
+}
+
+static inline int kbase_atomic_add_pages(int num_pages, atomic_t *used_pages)
+{
+ int new_val = atomic_add_return(num_pages, used_pages);
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_total_alloc_pages_change((long long int)new_val);
+#endif
+ return new_val;
+}
+
+static inline int kbase_atomic_sub_pages(int num_pages, atomic_t *used_pages)
+{
+ int new_val = atomic_sub_return(num_pages, used_pages);
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_total_alloc_pages_change((long long int)new_val);
+#endif
+ return new_val;
+}
+
+/*
+ * Max size for kbdev memory pool (in pages)
+ */
+#define KBASE_MEM_POOL_MAX_SIZE_KBDEV (SZ_64M >> PAGE_SHIFT)
+
+/*
+ * Max size for kctx memory pool (in pages)
+ */
+#define KBASE_MEM_POOL_MAX_SIZE_KCTX (SZ_64M >> PAGE_SHIFT)
+
+/*
+ * The order required for a 2MB page allocation (2^order * 4KB = 2MB)
+ */
+#define KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER 9
+
+/*
+ * The order required for a 4KB page allocation
+ */
+#define KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER 0
+
+/**
+ * kbase_mem_pool_init - Create a memory pool for a kbase device
+ * @pool: Memory pool to initialize
+ * @max_size: Maximum number of free pages the pool can hold
+ * @order: Page order for physical page size (order=0=>4kB, order=9=>2MB)
+ * @kbdev: Kbase device where memory is used
+ * @next_pool: Pointer to the next pool or NULL.
+ *
+ * Allocations from @pool are in whole pages. Each @pool has a free list where
+ * pages can be quickly allocated from. The free list is initially empty and
+ * filled whenever pages are freed back to the pool. The number of free pages
+ * in the pool will in general not exceed @max_size, but the pool may in
+ * certain corner cases grow above @max_size.
+ *
+ * If @next_pool is not NULL, we will allocate from @next_pool before going to
+ * the kernel allocator. Similarily pages can spill over to @next_pool when
+ * @pool is full. Pages are zeroed before they spill over to another pool, to
+ * prevent leaking information between applications.
+ *
+ * A shrinker is registered so that Linux mm can reclaim pages from the pool as
+ * needed.
+ *
+ * Return: 0 on success, negative -errno on error
+ */
+int kbase_mem_pool_init(struct kbase_mem_pool *pool,
+ size_t max_size,
+ size_t order,
+ struct kbase_device *kbdev,
+ struct kbase_mem_pool *next_pool);
+
+/**
+ * kbase_mem_pool_term - Destroy a memory pool
+ * @pool: Memory pool to destroy
+ *
+ * Pages in the pool will spill over to @next_pool (if available) or freed to
+ * the kernel.
+ */
+void kbase_mem_pool_term(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_mem_pool_alloc - Allocate a page from memory pool
+ * @pool: Memory pool to allocate from
+ *
+ * Allocations from the pool are made as follows:
+ * 1. If there are free pages in the pool, allocate a page from @pool.
+ * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page
+ * from @next_pool.
+ * 3. Return NULL if no memory in the pool
+ *
+ * Return: Pointer to allocated page, or NULL if allocation failed.
+ */
+struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_mem_pool_free - Free a page to memory pool
+ * @pool: Memory pool where page should be freed
+ * @page: Page to free to the pool
+ * @dirty: Whether some of the page may be dirty in the cache.
+ *
+ * Pages are freed to the pool as follows:
+ * 1. If @pool is not full, add @page to @pool.
+ * 2. Otherwise, if @next_pool is not NULL and not full, add @page to
+ * @next_pool.
+ * 3. Finally, free @page to the kernel.
+ */
+void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *page,
+ bool dirty);
+
+/**
+ * kbase_mem_pool_alloc_pages - Allocate pages from memory pool
+ * @pool: Memory pool to allocate from
+ * @nr_pages: Number of pages to allocate
+ * @pages: Pointer to array where the physical address of the allocated
+ * pages will be stored.
+ * @partial_allowed: If fewer pages allocated is allowed
+ *
+ * Like kbase_mem_pool_alloc() but optimized for allocating many pages.
+ *
+ * Return:
+ * On success number of pages allocated (could be less than nr_pages if
+ * partial_allowed).
+ * On error an error code.
+ */
+int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
+ struct tagged_addr *pages, bool partial_allowed);
+
+/**
+ * kbase_mem_pool_free_pages - Free pages to memory pool
+ * @pool: Memory pool where pages should be freed
+ * @nr_pages: Number of pages to free
+ * @pages: Pointer to array holding the physical addresses of the pages to
+ * free.
+ * @dirty: Whether any pages may be dirty in the cache.
+ * @reclaimed: Whether the pages where reclaimable and thus should bypass
+ * the pool and go straight to the kernel.
+ *
+ * Like kbase_mem_pool_free() but optimized for freeing many pages.
+ */
+void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
+ struct tagged_addr *pages, bool dirty, bool reclaimed);
+
+/**
+ * kbase_mem_pool_size - Get number of free pages in memory pool
+ * @pool: Memory pool to inspect
+ *
+ * Note: the size of the pool may in certain corner cases exceed @max_size!
+ *
+ * Return: Number of free pages in the pool
+ */
+static inline size_t kbase_mem_pool_size(struct kbase_mem_pool *pool)
+{
+ return ACCESS_ONCE(pool->cur_size);
+}
+
+/**
+ * kbase_mem_pool_max_size - Get maximum number of free pages in memory pool
+ * @pool: Memory pool to inspect
+ *
+ * Return: Maximum number of free pages in the pool
+ */
+static inline size_t kbase_mem_pool_max_size(struct kbase_mem_pool *pool)
+{
+ return pool->max_size;
+}
+
+
+/**
+ * kbase_mem_pool_set_max_size - Set maximum number of free pages in memory pool
+ * @pool: Memory pool to inspect
+ * @max_size: Maximum number of free pages the pool can hold
+ *
+ * If @max_size is reduced, the pool will be shrunk to adhere to the new limit.
+ * For details see kbase_mem_pool_shrink().
+ */
+void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size);
+
+/**
+ * kbase_mem_pool_grow - Grow the pool
+ * @pool: Memory pool to grow
+ * @nr_to_grow: Number of pages to add to the pool
+ *
+ * Adds @nr_to_grow pages to the pool. Note that this may cause the pool to
+ * become larger than the maximum size specified.
+ *
+ * Returns: 0 on success, -ENOMEM if unable to allocate sufficent pages
+ */
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow);
+
+/**
+ * kbase_mem_pool_trim - Grow or shrink the pool to a new size
+ * @pool: Memory pool to trim
+ * @new_size: New number of pages in the pool
+ *
+ * If @new_size > @cur_size, fill the pool with new pages from the kernel, but
+ * not above the max_size for the pool.
+ * If @new_size < @cur_size, shrink the pool by freeing pages to the kernel.
+ */
+void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size);
+
+/**
+ * kbase_mem_alloc_page - Allocate a new page for a device
+ * @pool: Memory pool to allocate a page from
+ *
+ * Most uses should use kbase_mem_pool_alloc to allocate a page. However that
+ * function can fail in the event the pool is empty.
+ *
+ * Return: A new page or NULL if no memory
+ */
+struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool);
+
+int kbase_region_tracker_init(struct kbase_context *kctx);
+int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages);
+void kbase_region_tracker_term(struct kbase_context *kctx);
+
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr);
+
+/**
+ * @brief Check that a pointer is actually a valid region.
+ *
+ * Must be called with context lock held.
+ */
+struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr);
+
+struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone);
+void kbase_free_alloced_region(struct kbase_va_region *reg);
+int kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
+
+bool kbase_check_alloc_flags(unsigned long flags);
+bool kbase_check_import_flags(unsigned long flags);
+
+/**
+ * kbase_update_region_flags - Convert user space flags to kernel region flags
+ *
+ * @kctx: kbase context
+ * @reg: The region to update the flags on
+ * @flags: The flags passed from user space
+ *
+ * The user space flag BASE_MEM_COHERENT_SYSTEM_REQUIRED will be rejected and
+ * this function will fail if the system does not support system coherency.
+ *
+ * Return: 0 if successful, -EINVAL if the flags are not supported
+ */
+int kbase_update_region_flags(struct kbase_context *kctx,
+ struct kbase_va_region *reg, unsigned long flags);
+
+void kbase_gpu_vm_lock(struct kbase_context *kctx);
+void kbase_gpu_vm_unlock(struct kbase_context *kctx);
+
+int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size);
+
+int kbase_mmu_init(struct kbase_context *kctx);
+void kbase_mmu_term(struct kbase_context *kctx);
+
+phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx);
+void kbase_mmu_free_pgd(struct kbase_context *kctx);
+int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
+ struct tagged_addr *phys, size_t nr,
+ unsigned long flags);
+int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn,
+ struct tagged_addr *phys, size_t nr,
+ unsigned long flags);
+int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
+ struct tagged_addr phys, size_t nr,
+ unsigned long flags);
+
+int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr);
+int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
+ struct tagged_addr *phys, size_t nr,
+ unsigned long flags);
+
+/**
+ * @brief Register region and map it on the GPU.
+ *
+ * Call kbase_add_va_region() and map the region on the GPU.
+ */
+int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
+
+/**
+ * @brief Remove the region from the GPU and unregister it.
+ *
+ * Must be called with context lock held.
+ */
+int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg);
+
+/**
+ * The caller has the following locking conditions:
+ * - It must hold kbase_device->mmu_hw_mutex
+ * - It must hold the hwaccess_lock
+ */
+void kbase_mmu_update(struct kbase_context *kctx);
+
+/**
+ * kbase_mmu_disable() - Disable the MMU for a previously active kbase context.
+ * @kctx: Kbase context
+ *
+ * Disable and perform the required cache maintenance to remove the all
+ * data from provided kbase context from the GPU caches.
+ *
+ * The caller has the following locking conditions:
+ * - It must hold kbase_device->mmu_hw_mutex
+ * - It must hold the hwaccess_lock
+ */
+void kbase_mmu_disable(struct kbase_context *kctx);
+
+/**
+ * kbase_mmu_disable_as() - Set the MMU to unmapped mode for the specified
+ * address space.
+ * @kbdev: Kbase device
+ * @as_nr: The address space number to set to unmapped.
+ *
+ * This function must only be called during reset/power-up and it used to
+ * ensure the registers are in a known state.
+ *
+ * The caller must hold kbdev->mmu_hw_mutex.
+ */
+void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr);
+
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
+
+/** Dump the MMU tables to a buffer
+ *
+ * This function allocates a buffer (of @c nr_pages pages) to hold a dump of the MMU tables and fills it. If the
+ * buffer is too small then the return value will be NULL.
+ *
+ * The GPU vm lock must be held when calling this function.
+ *
+ * The buffer returned should be freed with @ref vfree when it is no longer required.
+ *
+ * @param[in] kctx The kbase context to dump
+ * @param[in] nr_pages The number of pages to allocate for the buffer.
+ *
+ * @return The address of the buffer containing the MMU dump or NULL on error (including if the @c nr_pages is too
+ * small)
+ */
+void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages);
+
+/**
+ * kbase_sync_now - Perform cache maintenance on a memory region
+ *
+ * @kctx: The kbase context of the region
+ * @sset: A syncset structure describing the region and direction of the
+ * synchronisation required
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_sync_now(struct kbase_context *kctx, struct basep_syncset *sset);
+void kbase_sync_single(struct kbase_context *kctx, struct tagged_addr cpu_pa,
+ struct tagged_addr gpu_pa, off_t offset, size_t size,
+ enum kbase_sync_type sync_fn);
+void kbase_pre_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
+void kbase_post_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
+
+/* OS specific functions */
+int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr);
+int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg);
+void kbase_os_mem_map_lock(struct kbase_context *kctx);
+void kbase_os_mem_map_unlock(struct kbase_context *kctx);
+
+/**
+ * @brief Update the memory allocation counters for the current process
+ *
+ * OS specific call to updates the current memory allocation counters for the current process with
+ * the supplied delta.
+ *
+ * @param[in] kctx The kbase context
+ * @param[in] pages The desired delta to apply to the memory usage counters.
+ */
+
+void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages);
+
+/**
+ * @brief Add to the memory allocation counters for the current process
+ *
+ * OS specific call to add to the current memory allocation counters for the current process by
+ * the supplied amount.
+ *
+ * @param[in] kctx The kernel base context used for the allocation.
+ * @param[in] pages The desired delta to apply to the memory usage counters.
+ */
+
+static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages)
+{
+ kbasep_os_process_page_usage_update(kctx, pages);
+}
+
+/**
+ * @brief Subtract from the memory allocation counters for the current process
+ *
+ * OS specific call to subtract from the current memory allocation counters for the current process by
+ * the supplied amount.
+ *
+ * @param[in] kctx The kernel base context used for the allocation.
+ * @param[in] pages The desired delta to apply to the memory usage counters.
+ */
+
+static inline void kbase_process_page_usage_dec(struct kbase_context *kctx, int pages)
+{
+ kbasep_os_process_page_usage_update(kctx, 0 - pages);
+}
+
+/**
+ * kbasep_find_enclosing_cpu_mapping_offset() - Find the offset of the CPU
+ * mapping of a memory allocation containing a given address range
+ *
+ * Searches for a CPU mapping of any part of any region that fully encloses the
+ * CPU virtual address range specified by @uaddr and @size. Returns a failure
+ * indication if only part of the address range lies within a CPU mapping.
+ *
+ * @kctx: The kernel base context used for the allocation.
+ * @uaddr: Start of the CPU virtual address range.
+ * @size: Size of the CPU virtual address range (in bytes).
+ * @offset: The offset from the start of the allocation to the specified CPU
+ * virtual address.
+ *
+ * Return: 0 if offset was obtained successfully. Error code otherwise.
+ */
+int kbasep_find_enclosing_cpu_mapping_offset(
+ struct kbase_context *kctx,
+ unsigned long uaddr, size_t size, u64 *offset);
+
+enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer);
+void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+* @brief Allocates physical pages.
+*
+* Allocates \a nr_pages_requested and updates the alloc object.
+*
+* @param[in] alloc allocation object to add pages to
+* @param[in] nr_pages_requested number of physical pages to allocate
+*
+* @return 0 if all pages have been successfully allocated. Error code otherwise
+*/
+int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_requested);
+
+/**
+* @brief Free physical pages.
+*
+* Frees \a nr_pages and updates the alloc object.
+*
+* @param[in] alloc allocation object to free pages from
+* @param[in] nr_pages_to_free number of physical pages to free
+*/
+int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_to_free);
+
+static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr)
+{
+ SetPagePrivate(p);
+ if (sizeof(dma_addr_t) > sizeof(p->private)) {
+ /* on 32-bit ARM with LPAE dma_addr_t becomes larger, but the
+ * private field stays the same. So we have to be clever and
+ * use the fact that we only store DMA addresses of whole pages,
+ * so the low bits should be zero */
+ KBASE_DEBUG_ASSERT(!(dma_addr & (PAGE_SIZE - 1)));
+ set_page_private(p, dma_addr >> PAGE_SHIFT);
+ } else {
+ set_page_private(p, dma_addr);
+ }
+}
+
+static inline dma_addr_t kbase_dma_addr(struct page *p)
+{
+ if (sizeof(dma_addr_t) > sizeof(p->private))
+ return ((dma_addr_t)page_private(p)) << PAGE_SHIFT;
+
+ return (dma_addr_t)page_private(p);
+}
+
+static inline void kbase_clear_dma_addr(struct page *p)
+{
+ ClearPagePrivate(p);
+}
+
+/**
+* @brief Process a bus or page fault.
+*
+* This function will process a fault on a specific address space
+*
+* @param[in] kbdev The @ref kbase_device the fault happened on
+* @param[in] kctx The @ref kbase_context for the faulting address space if
+* one was found.
+* @param[in] as The address space that has the fault
+*/
+void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
+ struct kbase_context *kctx, struct kbase_as *as);
+
+/**
+ * @brief Process a page fault.
+ *
+ * @param[in] data work_struct passed by queue_work()
+ */
+void page_fault_worker(struct work_struct *data);
+
+/**
+ * @brief Process a bus fault.
+ *
+ * @param[in] data work_struct passed by queue_work()
+ */
+void bus_fault_worker(struct work_struct *data);
+
+/**
+ * @brief Flush MMU workqueues.
+ *
+ * This function will cause any outstanding page or bus faults to be processed.
+ * It should be called prior to powering off the GPU.
+ *
+ * @param[in] kbdev Device pointer
+ */
+void kbase_flush_mmu_wqs(struct kbase_device *kbdev);
+
+/**
+ * kbase_sync_single_for_device - update physical memory and give GPU ownership
+ * @kbdev: Device pointer
+ * @handle: DMA address of region
+ * @size: Size of region to sync
+ * @dir: DMA data direction
+ */
+
+void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir);
+
+/**
+ * kbase_sync_single_for_cpu - update physical memory and give CPU ownership
+ * @kbdev: Device pointer
+ * @handle: DMA address of region
+ * @size: Size of region to sync
+ * @dir: DMA data direction
+ */
+
+void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir);
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * kbase_jit_debugfs_init - Add per context debugfs entry for JIT.
+ * @kctx: kbase context
+ */
+void kbase_jit_debugfs_init(struct kbase_context *kctx);
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * kbase_jit_init - Initialize the JIT memory pool management
+ * @kctx: kbase context
+ *
+ * Returns zero on success or negative error number on failure.
+ */
+int kbase_jit_init(struct kbase_context *kctx);
+
+/**
+ * kbase_jit_allocate - Allocate JIT memory
+ * @kctx: kbase context
+ * @info: JIT allocation information
+ *
+ * Return: JIT allocation on success or NULL on failure.
+ */
+struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
+ struct base_jit_alloc_info *info);
+
+/**
+ * kbase_jit_free - Free a JIT allocation
+ * @kctx: kbase context
+ * @reg: JIT allocation
+ *
+ * Frees a JIT allocation and places it into the free pool for later reuse.
+ */
+void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg);
+
+/**
+ * kbase_jit_backing_lost - Inform JIT that an allocation has lost backing
+ * @reg: JIT allocation
+ */
+void kbase_jit_backing_lost(struct kbase_va_region *reg);
+
+/**
+ * kbase_jit_evict - Evict a JIT allocation from the pool
+ * @kctx: kbase context
+ *
+ * Evict the least recently used JIT allocation from the pool. This can be
+ * required if normal VA allocations are failing due to VA exhaustion.
+ *
+ * Return: True if a JIT allocation was freed, false otherwise.
+ */
+bool kbase_jit_evict(struct kbase_context *kctx);
+
+/**
+ * kbase_jit_term - Terminate the JIT memory pool management
+ * @kctx: kbase context
+ */
+void kbase_jit_term(struct kbase_context *kctx);
+
+/**
+ * kbase_map_external_resource - Map an external resource to the GPU.
+ * @kctx: kbase context.
+ * @reg: The region to map.
+ * @locked_mm: The mm_struct which has been locked for this operation.
+ * @kds_res_count: The number of KDS resources.
+ * @kds_resources: Array of KDS resources.
+ * @kds_access_bitmap: Access bitmap for KDS.
+ * @exclusive: If the KDS resource requires exclusive access.
+ *
+ * Return: The physical allocation which backs the region on success or NULL
+ * on failure.
+ */
+struct kbase_mem_phy_alloc *kbase_map_external_resource(
+ struct kbase_context *kctx, struct kbase_va_region *reg,
+ struct mm_struct *locked_mm
+#ifdef CONFIG_KDS
+ , u32 *kds_res_count, struct kds_resource **kds_resources,
+ unsigned long *kds_access_bitmap, bool exclusive
+#endif
+ );
+
+/**
+ * kbase_unmap_external_resource - Unmap an external resource from the GPU.
+ * @kctx: kbase context.
+ * @reg: The region to unmap or NULL if it has already been released.
+ * @alloc: The physical allocation being unmapped.
+ */
+void kbase_unmap_external_resource(struct kbase_context *kctx,
+ struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc);
+
+/**
+ * kbase_sticky_resource_init - Initialize sticky resource management.
+ * @kctx: kbase context
+ *
+ * Returns zero on success or negative error number on failure.
+ */
+int kbase_sticky_resource_init(struct kbase_context *kctx);
+
+/**
+ * kbase_sticky_resource_acquire - Acquire a reference on a sticky resource.
+ * @kctx: kbase context.
+ * @gpu_addr: The GPU address of the external resource.
+ *
+ * Return: The metadata object which represents the binding between the
+ * external resource and the kbase context on success or NULL on failure.
+ */
+struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
+ struct kbase_context *kctx, u64 gpu_addr);
+
+/**
+ * kbase_sticky_resource_release - Release a reference on a sticky resource.
+ * @kctx: kbase context.
+ * @meta: Binding metadata.
+ * @gpu_addr: GPU address of the external resource.
+ *
+ * If meta is NULL then gpu_addr will be used to scan the metadata list and
+ * find the matching metadata (if any), otherwise the provided meta will be
+ * used and gpu_addr will be ignored.
+ *
+ * Return: True if the release found the metadata and the reference was dropped.
+ */
+bool kbase_sticky_resource_release(struct kbase_context *kctx,
+ struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr);
+
+/**
+ * kbase_sticky_resource_term - Terminate sticky resource management.
+ * @kctx: kbase context
+ */
+void kbase_sticky_resource_term(struct kbase_context *kctx);
+
+/**
+ * kbase_zone_cache_update - Update the memory zone cache after new pages have
+ * been added.
+ * @alloc: The physical memory allocation to build the cache for.
+ * @start_offset: Offset to where the new pages start.
+ *
+ * Updates an existing memory zone cache, updating the counters for the
+ * various zones.
+ * If the memory allocation doesn't already have a zone cache assume that
+ * one isn't created and thus don't do anything.
+ *
+ * Return: Zero cache was updated, negative error code on error.
+ */
+int kbase_zone_cache_update(struct kbase_mem_phy_alloc *alloc,
+ size_t start_offset);
+
+/**
+ * kbase_zone_cache_build - Build the memory zone cache.
+ * @alloc: The physical memory allocation to build the cache for.
+ *
+ * Create a new zone cache for the provided physical memory allocation if
+ * one doesn't already exist, if one does exist then just return.
+ *
+ * Return: Zero if the zone cache was created, negative error code on error.
+ */
+int kbase_zone_cache_build(struct kbase_mem_phy_alloc *alloc);
+
+/**
+ * kbase_zone_cache_clear - Clear the memory zone cache.
+ * @alloc: The physical memory allocation to clear the cache on.
+ */
+void kbase_zone_cache_clear(struct kbase_mem_phy_alloc *alloc);
+
+#endif /* _KBASE_MEM_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mem_linux.c b/drivers/gpu/arm_gpu/mali_kbase_mem_linux.c
new file mode 100644
index 000000000000..b1f2c461b1e1
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mem_linux.c
@@ -0,0 +1,2670 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_mem_linux.c
+ * Base kernel memory APIs, Linux implementation.
+ */
+
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+#include <linux/dma-attrs.h>
+#endif /* LINUX_VERSION_CODE >= 3.5.0 && < 4.8.0 */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+#include <linux/dma-buf.h>
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
+#include <linux/shrinker.h>
+#include <linux/cache.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_kbase_tlstream.h>
+
+static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma);
+
+/**
+ * kbase_mem_shrink_cpu_mapping - Shrink the CPU mapping(s) of an allocation
+ * @kctx: Context the region belongs to
+ * @reg: The GPU region
+ * @new_pages: The number of pages after the shrink
+ * @old_pages: The number of pages before the shrink
+ *
+ * Shrink (or completely remove) all CPU mappings which reference the shrunk
+ * part of the allocation.
+ *
+ * Note: Caller must be holding the processes mmap_sem lock.
+ */
+static void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages);
+
+/**
+ * kbase_mem_shrink_gpu_mapping - Shrink the GPU mapping of an allocation
+ * @kctx: Context the region belongs to
+ * @reg: The GPU region or NULL if there isn't one
+ * @new_pages: The number of pages after the shrink
+ * @old_pages: The number of pages before the shrink
+ *
+ * Return: 0 on success, negative -errno on error
+ *
+ * Unmap the shrunk pages from the GPU mapping. Note that the size of the region
+ * itself is unmodified as we still need to reserve the VA, only the page tables
+ * will be modified by this function.
+ */
+static int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages);
+
+struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
+ u64 va_pages, u64 commit_pages, u64 extent, u64 *flags,
+ u64 *gpu_va)
+{
+ int zone;
+ int gpu_pc_bits;
+ struct kbase_va_region *reg;
+ struct device *dev;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(flags);
+ KBASE_DEBUG_ASSERT(gpu_va);
+
+ dev = kctx->kbdev->dev;
+ *gpu_va = 0; /* return 0 on failure */
+
+ gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
+
+ if (0 == va_pages) {
+ dev_warn(dev, "kbase_mem_alloc called with 0 va_pages!");
+ goto bad_size;
+ }
+
+ if (va_pages > (U64_MAX / PAGE_SIZE))
+ /* 64-bit address range is the max */
+ goto bad_size;
+
+ if (!kbase_check_alloc_flags(*flags)) {
+ dev_warn(dev,
+ "kbase_mem_alloc called with bad flags (%llx)",
+ (unsigned long long)*flags);
+ goto bad_flags;
+ }
+
+ if ((*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0 &&
+ !kbase_device_is_cpu_coherent(kctx->kbdev)) {
+ dev_warn(dev, "kbase_mem_alloc call required coherent mem when unavailable");
+ goto bad_flags;
+ }
+ if ((*flags & BASE_MEM_COHERENT_SYSTEM) != 0 &&
+ !kbase_device_is_cpu_coherent(kctx->kbdev)) {
+ /* Remove COHERENT_SYSTEM flag if coherent mem is unavailable */
+ *flags &= ~BASE_MEM_COHERENT_SYSTEM;
+ }
+
+ /* Limit GPU executable allocs to GPU PC size */
+ if ((*flags & BASE_MEM_PROT_GPU_EX) &&
+ (va_pages > (1ULL << gpu_pc_bits >> PAGE_SHIFT)))
+ goto bad_ex_size;
+
+ /* find out which VA zone to use */
+ if (*flags & BASE_MEM_SAME_VA)
+ zone = KBASE_REG_ZONE_SAME_VA;
+ else if (*flags & BASE_MEM_PROT_GPU_EX)
+ zone = KBASE_REG_ZONE_EXEC;
+ else
+ zone = KBASE_REG_ZONE_CUSTOM_VA;
+
+ reg = kbase_alloc_free_region(kctx, 0, va_pages, zone);
+ if (!reg) {
+ dev_err(dev, "Failed to allocate free region");
+ goto no_region;
+ }
+
+ if (kbase_update_region_flags(kctx, reg, *flags) != 0)
+ goto invalid_flags;
+
+ if (kbase_reg_prepare_native(reg, kctx) != 0) {
+ dev_err(dev, "Failed to prepare region");
+ goto prepare_failed;
+ }
+
+ if (*flags & BASE_MEM_GROW_ON_GPF)
+ reg->extent = extent;
+ else
+ reg->extent = 0;
+
+ if (kbase_alloc_phy_pages(reg, va_pages, commit_pages) != 0) {
+ dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)",
+ (unsigned long long)commit_pages,
+ (unsigned long long)va_pages);
+ goto no_mem;
+ }
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* mmap needed to setup VA? */
+ if (*flags & BASE_MEM_SAME_VA) {
+ unsigned long prot = PROT_NONE;
+ unsigned long va_size = va_pages << PAGE_SHIFT;
+ unsigned long va_map = va_size;
+ unsigned long cookie, cookie_nr;
+ unsigned long cpu_addr;
+
+ /* Bind to a cookie */
+ if (!kctx->cookies) {
+ dev_err(dev, "No cookies available for allocation!");
+ kbase_gpu_vm_unlock(kctx);
+ goto no_cookie;
+ }
+ /* return a cookie */
+ cookie_nr = __ffs(kctx->cookies);
+ kctx->cookies &= ~(1UL << cookie_nr);
+ BUG_ON(kctx->pending_regions[cookie_nr]);
+ kctx->pending_regions[cookie_nr] = reg;
+
+ kbase_gpu_vm_unlock(kctx);
+
+ /* relocate to correct base */
+ cookie = cookie_nr + PFN_DOWN(BASE_MEM_COOKIE_BASE);
+ cookie <<= PAGE_SHIFT;
+
+ /*
+ * 10.1-10.4 UKU userland relies on the kernel to call mmap.
+ * For all other versions we can just return the cookie
+ */
+ if (kctx->api_version < KBASE_API_VERSION(10, 1) ||
+ kctx->api_version > KBASE_API_VERSION(10, 4)) {
+ *gpu_va = (u64) cookie;
+ return reg;
+ }
+ if (*flags & BASE_MEM_PROT_CPU_RD)
+ prot |= PROT_READ;
+ if (*flags & BASE_MEM_PROT_CPU_WR)
+ prot |= PROT_WRITE;
+
+ cpu_addr = vm_mmap(kctx->filp, 0, va_map, prot,
+ MAP_SHARED, cookie);
+
+ if (IS_ERR_VALUE(cpu_addr)) {
+ kbase_gpu_vm_lock(kctx);
+ kctx->pending_regions[cookie_nr] = NULL;
+ kctx->cookies |= (1UL << cookie_nr);
+ kbase_gpu_vm_unlock(kctx);
+ goto no_mmap;
+ }
+
+ *gpu_va = (u64) cpu_addr;
+ } else /* we control the VA */ {
+ if (kbase_gpu_mmap(kctx, reg, 0, va_pages, 1) != 0) {
+ dev_warn(dev, "Failed to map memory on GPU");
+ kbase_gpu_vm_unlock(kctx);
+ goto no_mmap;
+ }
+ /* return real GPU VA */
+ *gpu_va = reg->start_pfn << PAGE_SHIFT;
+
+ kbase_gpu_vm_unlock(kctx);
+ }
+
+ return reg;
+
+no_mmap:
+no_cookie:
+no_mem:
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+invalid_flags:
+prepare_failed:
+ kfree(reg);
+no_region:
+bad_ex_size:
+bad_flags:
+bad_size:
+ return NULL;
+}
+KBASE_EXPORT_TEST_API(kbase_mem_alloc);
+
+int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 * const out)
+{
+ struct kbase_va_region *reg;
+ int ret = -EINVAL;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(out);
+
+ if (gpu_addr & ~PAGE_MASK) {
+ dev_warn(kctx->kbdev->dev, "mem_query: gpu_addr: passed parameter is invalid");
+ return -EINVAL;
+ }
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* Validate the region */
+ reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+ if (!reg || (reg->flags & KBASE_REG_FREE))
+ goto out_unlock;
+
+ switch (query) {
+ case KBASE_MEM_QUERY_COMMIT_SIZE:
+ if (reg->cpu_alloc->type != KBASE_MEM_TYPE_ALIAS) {
+ *out = kbase_reg_current_backed_size(reg);
+ } else {
+ size_t i;
+ struct kbase_aliased *aliased;
+ *out = 0;
+ aliased = reg->cpu_alloc->imported.alias.aliased;
+ for (i = 0; i < reg->cpu_alloc->imported.alias.nents; i++)
+ *out += aliased[i].length;
+ }
+ break;
+ case KBASE_MEM_QUERY_VA_SIZE:
+ *out = reg->nr_pages;
+ break;
+ case KBASE_MEM_QUERY_FLAGS:
+ {
+ *out = 0;
+ if (KBASE_REG_CPU_WR & reg->flags)
+ *out |= BASE_MEM_PROT_CPU_WR;
+ if (KBASE_REG_CPU_RD & reg->flags)
+ *out |= BASE_MEM_PROT_CPU_RD;
+ if (KBASE_REG_CPU_CACHED & reg->flags)
+ *out |= BASE_MEM_CACHED_CPU;
+ if (KBASE_REG_GPU_WR & reg->flags)
+ *out |= BASE_MEM_PROT_GPU_WR;
+ if (KBASE_REG_GPU_RD & reg->flags)
+ *out |= BASE_MEM_PROT_GPU_RD;
+ if (!(KBASE_REG_GPU_NX & reg->flags))
+ *out |= BASE_MEM_PROT_GPU_EX;
+ if (KBASE_REG_SHARE_BOTH & reg->flags)
+ *out |= BASE_MEM_COHERENT_SYSTEM;
+ if (KBASE_REG_SHARE_IN & reg->flags)
+ *out |= BASE_MEM_COHERENT_LOCAL;
+ break;
+ }
+ default:
+ *out = 0;
+ goto out_unlock;
+ }
+
+ ret = 0;
+
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return ret;
+}
+
+/**
+ * kbase_mem_evictable_reclaim_count_objects - Count number of pages in the
+ * Ephemeral memory eviction list.
+ * @s: Shrinker
+ * @sc: Shrinker control
+ *
+ * Return: Number of pages which can be freed.
+ */
+static
+unsigned long kbase_mem_evictable_reclaim_count_objects(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ struct kbase_context *kctx;
+ struct kbase_mem_phy_alloc *alloc;
+ unsigned long pages = 0;
+
+ kctx = container_of(s, struct kbase_context, reclaim);
+
+ mutex_lock(&kctx->jit_evict_lock);
+
+ list_for_each_entry(alloc, &kctx->evict_list, evict_node)
+ pages += alloc->nents;
+
+ mutex_unlock(&kctx->jit_evict_lock);
+ return pages;
+}
+
+/**
+ * kbase_mem_evictable_reclaim_scan_objects - Scan the Ephemeral memory eviction
+ * list for pages and try to reclaim them.
+ * @s: Shrinker
+ * @sc: Shrinker control
+ *
+ * Return: Number of pages freed (can be less then requested) or -1 if the
+ * shrinker failed to free pages in its pool.
+ *
+ * Note:
+ * This function accesses region structures without taking the region lock,
+ * this is required as the OOM killer can call the shrinker after the region
+ * lock has already been held.
+ * This is safe as we can guarantee that a region on the eviction list will
+ * not be freed (kbase_mem_free_region removes the allocation from the list
+ * before destroying it), or modified by other parts of the driver.
+ * The eviction list itself is guarded by the eviction lock and the MMU updates
+ * are protected by their own lock.
+ */
+static
+unsigned long kbase_mem_evictable_reclaim_scan_objects(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ struct kbase_context *kctx;
+ struct kbase_mem_phy_alloc *alloc;
+ struct kbase_mem_phy_alloc *tmp;
+ unsigned long freed = 0;
+
+ kctx = container_of(s, struct kbase_context, reclaim);
+ mutex_lock(&kctx->jit_evict_lock);
+
+ list_for_each_entry_safe(alloc, tmp, &kctx->evict_list, evict_node) {
+ int err;
+
+ err = kbase_mem_shrink_gpu_mapping(kctx, alloc->reg,
+ 0, alloc->nents);
+ if (err != 0) {
+ /*
+ * Failed to remove GPU mapping, tell the shrinker
+ * to stop trying to shrink our slab even though we
+ * have pages in it.
+ */
+ freed = -1;
+ goto out_unlock;
+ }
+
+ /*
+ * Update alloc->evicted before freeing the backing so the
+ * helper can determine that it needs to bypass the accounting
+ * and memory pool.
+ */
+ alloc->evicted = alloc->nents;
+
+ kbase_free_phy_pages_helper(alloc, alloc->evicted);
+ freed += alloc->evicted;
+ list_del_init(&alloc->evict_node);
+
+ /*
+ * Inform the JIT allocator this region has lost backing
+ * as it might need to free the allocation.
+ */
+ kbase_jit_backing_lost(alloc->reg);
+
+ /* Enough pages have been freed so stop now */
+ if (freed > sc->nr_to_scan)
+ break;
+ }
+out_unlock:
+ mutex_unlock(&kctx->jit_evict_lock);
+
+ return freed;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int kbase_mem_evictable_reclaim_shrink(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ if (sc->nr_to_scan == 0)
+ return kbase_mem_evictable_reclaim_count_objects(s, sc);
+
+ return kbase_mem_evictable_reclaim_scan_objects(s, sc);
+}
+#endif
+
+int kbase_mem_evictable_init(struct kbase_context *kctx)
+{
+ INIT_LIST_HEAD(&kctx->evict_list);
+ mutex_init(&kctx->jit_evict_lock);
+
+ /* Register shrinker */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+ kctx->reclaim.shrink = kbase_mem_evictable_reclaim_shrink;
+#else
+ kctx->reclaim.count_objects = kbase_mem_evictable_reclaim_count_objects;
+ kctx->reclaim.scan_objects = kbase_mem_evictable_reclaim_scan_objects;
+#endif
+ kctx->reclaim.seeks = DEFAULT_SEEKS;
+ /* Kernel versions prior to 3.1 :
+ * struct shrinker does not define batch */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
+ kctx->reclaim.batch = 0;
+#endif
+ register_shrinker(&kctx->reclaim);
+ return 0;
+}
+
+void kbase_mem_evictable_deinit(struct kbase_context *kctx)
+{
+ unregister_shrinker(&kctx->reclaim);
+}
+
+struct kbase_mem_zone_cache_entry {
+ /* List head used to link the cache entry to the memory allocation. */
+ struct list_head zone_node;
+ /* The zone the cacheline is for. */
+ struct zone *zone;
+ /* The number of pages in the allocation which belong to this zone. */
+ u64 count;
+};
+
+static bool kbase_zone_cache_builder(struct kbase_mem_phy_alloc *alloc,
+ size_t start_offset)
+{
+ struct kbase_mem_zone_cache_entry *cache = NULL;
+ size_t i;
+ int ret = 0;
+
+ for (i = start_offset; i < alloc->nents; i++) {
+ struct page *p = phys_to_page(as_phys_addr_t(alloc->pages[i]));
+ struct zone *zone = page_zone(p);
+ bool create = true;
+
+ if (cache && (cache->zone == zone)) {
+ /*
+ * Fast path check as most of the time adjacent
+ * pages come from the same zone.
+ */
+ create = false;
+ } else {
+ /*
+ * Slow path check, walk all the cache entries to see
+ * if we already know about this zone.
+ */
+ list_for_each_entry(cache, &alloc->zone_cache, zone_node) {
+ if (cache->zone == zone) {
+ create = false;
+ break;
+ }
+ }
+ }
+
+ /* This zone wasn't found in the cache, create an entry for it */
+ if (create) {
+ cache = kmalloc(sizeof(*cache), GFP_KERNEL);
+ if (!cache) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ cache->zone = zone;
+ cache->count = 0;
+ list_add(&cache->zone_node, &alloc->zone_cache);
+ }
+
+ cache->count++;
+ }
+ return 0;
+
+bail:
+ return ret;
+}
+
+int kbase_zone_cache_update(struct kbase_mem_phy_alloc *alloc,
+ size_t start_offset)
+{
+ /*
+ * Bail if the zone cache is empty, only update the cache if it
+ * existed in the first place.
+ */
+ if (list_empty(&alloc->zone_cache))
+ return 0;
+
+ return kbase_zone_cache_builder(alloc, start_offset);
+}
+
+int kbase_zone_cache_build(struct kbase_mem_phy_alloc *alloc)
+{
+ /* Bail if the zone cache already exists */
+ if (!list_empty(&alloc->zone_cache))
+ return 0;
+
+ return kbase_zone_cache_builder(alloc, 0);
+}
+
+void kbase_zone_cache_clear(struct kbase_mem_phy_alloc *alloc)
+{
+ struct kbase_mem_zone_cache_entry *walker;
+
+ while(!list_empty(&alloc->zone_cache)){
+ walker = list_first_entry(&alloc->zone_cache,
+ struct kbase_mem_zone_cache_entry, zone_node);
+ list_del(&walker->zone_node);
+ kfree(walker);
+ }
+}
+
+/**
+ * kbase_mem_evictable_mark_reclaim - Mark the pages as reclaimable.
+ * @alloc: The physical allocation
+ */
+static void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc)
+{
+ struct kbase_context *kctx = alloc->imported.kctx;
+ struct kbase_mem_zone_cache_entry *zone_cache;
+ int __maybe_unused new_page_count;
+ int err;
+
+ /* Attempt to build a zone cache of tracking */
+ err = kbase_zone_cache_build(alloc);
+ if (err == 0) {
+ /* Bulk update all the zones */
+ list_for_each_entry(zone_cache, &alloc->zone_cache, zone_node) {
+ zone_page_state_add(zone_cache->count,
+ zone_cache->zone, NR_SLAB_RECLAIMABLE);
+ }
+ } else {
+ /* Fall-back to page by page updates */
+ int i;
+
+ for (i = 0; i < alloc->nents; i++) {
+ struct page *p;
+ struct zone *zone;
+
+ p = phys_to_page(as_phys_addr_t(alloc->pages[i]));
+ zone = page_zone(p);
+
+ zone_page_state_add(1, zone, NR_SLAB_RECLAIMABLE);
+ }
+ }
+
+ kbase_process_page_usage_dec(kctx, alloc->nents);
+ new_page_count = kbase_atomic_sub_pages(alloc->nents,
+ &kctx->used_pages);
+ kbase_atomic_sub_pages(alloc->nents, &kctx->kbdev->memdev.used_pages);
+
+ KBASE_TLSTREAM_AUX_PAGESALLOC(
+ (u32)kctx->id,
+ (u64)new_page_count);
+}
+
+/**
+ * kbase_mem_evictable_unmark_reclaim - Mark the pages as no longer reclaimable.
+ * @alloc: The physical allocation
+ */
+static
+void kbase_mem_evictable_unmark_reclaim(struct kbase_mem_phy_alloc *alloc)
+{
+ struct kbase_context *kctx = alloc->imported.kctx;
+ struct kbase_mem_zone_cache_entry *zone_cache;
+ int __maybe_unused new_page_count;
+ int err;
+
+ new_page_count = kbase_atomic_add_pages(alloc->nents,
+ &kctx->used_pages);
+ kbase_atomic_add_pages(alloc->nents, &kctx->kbdev->memdev.used_pages);
+
+ /* Increase mm counters so that the allocation is accounted for
+ * against the process and thus is visible to the OOM killer,
+ * then remove it from the reclaimable accounting. */
+ kbase_process_page_usage_inc(kctx, alloc->nents);
+
+ /* Attempt to build a zone cache of tracking */
+ err = kbase_zone_cache_build(alloc);
+ if (err == 0) {
+ /* Bulk update all the zones */
+ list_for_each_entry(zone_cache, &alloc->zone_cache, zone_node) {
+ zone_page_state_add(-zone_cache->count,
+ zone_cache->zone, NR_SLAB_RECLAIMABLE);
+ }
+ } else {
+ /* Fall-back to page by page updates */
+ int i;
+
+ for (i = 0; i < alloc->nents; i++) {
+ struct page *p;
+ struct zone *zone;
+
+ p = phys_to_page(as_phys_addr_t(alloc->pages[i]));
+ zone = page_zone(p);
+ zone_page_state_add(-1, zone, NR_SLAB_RECLAIMABLE);
+ }
+ }
+
+ KBASE_TLSTREAM_AUX_PAGESALLOC(
+ (u32)kctx->id,
+ (u64)new_page_count);
+}
+
+int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc)
+{
+ struct kbase_context *kctx = gpu_alloc->imported.kctx;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /* This alloction can't already be on a list. */
+ WARN_ON(!list_empty(&gpu_alloc->evict_node));
+
+ kbase_mem_shrink_cpu_mapping(kctx, gpu_alloc->reg,
+ 0, gpu_alloc->nents);
+
+ /*
+ * Add the allocation to the eviction list, after this point the shrink
+ * can reclaim it.
+ */
+ mutex_lock(&kctx->jit_evict_lock);
+ list_add(&gpu_alloc->evict_node, &kctx->evict_list);
+ mutex_unlock(&kctx->jit_evict_lock);
+ kbase_mem_evictable_mark_reclaim(gpu_alloc);
+
+ gpu_alloc->reg->flags |= KBASE_REG_DONT_NEED;
+ return 0;
+}
+
+bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *gpu_alloc)
+{
+ struct kbase_context *kctx = gpu_alloc->imported.kctx;
+ int err = 0;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /*
+ * First remove the allocation from the eviction list as it's no
+ * longer eligible for eviction.
+ */
+ list_del_init(&gpu_alloc->evict_node);
+
+ if (gpu_alloc->evicted == 0) {
+ /*
+ * The backing is still present, update the VM stats as it's
+ * in use again.
+ */
+ kbase_mem_evictable_unmark_reclaim(gpu_alloc);
+ } else {
+ /* If the region is still alive ... */
+ if (gpu_alloc->reg) {
+ /* ... allocate replacement backing ... */
+ err = kbase_alloc_phy_pages_helper(gpu_alloc,
+ gpu_alloc->evicted);
+
+ /*
+ * ... and grow the mapping back to its
+ * pre-eviction size.
+ */
+ if (!err)
+ err = kbase_mem_grow_gpu_mapping(kctx,
+ gpu_alloc->reg,
+ gpu_alloc->evicted, 0);
+
+ gpu_alloc->evicted = 0;
+ }
+ }
+
+ /* If the region is still alive remove the DONT_NEED attribute. */
+ if (gpu_alloc->reg)
+ gpu_alloc->reg->flags &= ~KBASE_REG_DONT_NEED;
+
+ return (err == 0);
+}
+
+int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask)
+{
+ struct kbase_va_region *reg;
+ int ret = -EINVAL;
+ unsigned int real_flags = 0;
+ unsigned int prev_flags = 0;
+ bool prev_needed, new_needed;
+
+ KBASE_DEBUG_ASSERT(kctx);
+
+ if (!gpu_addr)
+ return -EINVAL;
+
+ if ((gpu_addr & ~PAGE_MASK) && (gpu_addr >= PAGE_SIZE))
+ return -EINVAL;
+
+ /* nuke other bits */
+ flags &= mask;
+
+ /* check for only supported flags */
+ if (flags & ~(BASE_MEM_FLAGS_MODIFIABLE))
+ goto out;
+
+ /* mask covers bits we don't support? */
+ if (mask & ~(BASE_MEM_FLAGS_MODIFIABLE))
+ goto out;
+
+ /* convert flags */
+ if (BASE_MEM_COHERENT_SYSTEM & flags)
+ real_flags |= KBASE_REG_SHARE_BOTH;
+ else if (BASE_MEM_COHERENT_LOCAL & flags)
+ real_flags |= KBASE_REG_SHARE_IN;
+
+ /* now we can lock down the context, and find the region */
+ down_write(&current->mm->mmap_sem);
+ kbase_gpu_vm_lock(kctx);
+
+ /* Validate the region */
+ reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+ if (!reg || (reg->flags & KBASE_REG_FREE))
+ goto out_unlock;
+
+ /* Is the region being transitioning between not needed and needed? */
+ prev_needed = (KBASE_REG_DONT_NEED & reg->flags) == KBASE_REG_DONT_NEED;
+ new_needed = (BASE_MEM_DONT_NEED & flags) == BASE_MEM_DONT_NEED;
+ if (prev_needed != new_needed) {
+ /* Aliased allocations can't be made ephemeral */
+ if (atomic_read(&reg->cpu_alloc->gpu_mappings) > 1)
+ goto out_unlock;
+
+ if (new_needed) {
+ /* Only native allocations can be marked not needed */
+ if (reg->cpu_alloc->type != KBASE_MEM_TYPE_NATIVE) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ ret = kbase_mem_evictable_make(reg->gpu_alloc);
+ if (ret)
+ goto out_unlock;
+ } else {
+ kbase_mem_evictable_unmake(reg->gpu_alloc);
+ }
+ }
+
+ /* limit to imported memory */
+ if ((reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMP) &&
+ (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM))
+ goto out_unlock;
+
+ /* no change? */
+ if (real_flags == (reg->flags & (KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH))) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ /* save for roll back */
+ prev_flags = reg->flags;
+ reg->flags &= ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH);
+ reg->flags |= real_flags;
+
+ /* Currently supporting only imported memory */
+ switch (reg->gpu_alloc->type) {
+#ifdef CONFIG_UMP
+ case KBASE_MEM_TYPE_IMPORTED_UMP:
+ ret = kbase_mmu_update_pages(kctx, reg->start_pfn, kbase_get_cpu_phy_pages(reg), reg->gpu_alloc->nents, reg->flags);
+ break;
+#endif
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case KBASE_MEM_TYPE_IMPORTED_UMM:
+ /* Future use will use the new flags, existing mapping will NOT be updated
+ * as memory should not be in use by the GPU when updating the flags.
+ */
+ ret = 0;
+ WARN_ON(reg->gpu_alloc->imported.umm.current_mapping_usage_count);
+ break;
+#endif
+ default:
+ break;
+ }
+
+ /* roll back on error, i.e. not UMP */
+ if (ret)
+ reg->flags = prev_flags;
+
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ up_write(&current->mm->mmap_sem);
+out:
+ return ret;
+}
+
+#define KBASE_MEM_IMPORT_HAVE_PAGES (1UL << BASE_MEM_FLAGS_NR_BITS)
+
+#ifdef CONFIG_UMP
+static struct kbase_va_region *kbase_mem_from_ump(struct kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags)
+{
+ struct kbase_va_region *reg;
+ ump_dd_handle umph;
+ u64 block_count;
+ const ump_dd_physical_block_64 *block_array;
+ u64 i, j;
+ int page = 0;
+ ump_alloc_flags ump_flags;
+ ump_alloc_flags cpu_flags;
+ ump_alloc_flags gpu_flags;
+
+ if (*flags & BASE_MEM_SECURE)
+ goto bad_flags;
+
+ umph = ump_dd_from_secure_id(id);
+ if (UMP_DD_INVALID_MEMORY_HANDLE == umph)
+ goto bad_id;
+
+ ump_flags = ump_dd_allocation_flags_get(umph);
+ cpu_flags = (ump_flags >> UMP_DEVICE_CPU_SHIFT) & UMP_DEVICE_MASK;
+ gpu_flags = (ump_flags >> DEFAULT_UMP_GPU_DEVICE_SHIFT) &
+ UMP_DEVICE_MASK;
+
+ *va_pages = ump_dd_size_get_64(umph);
+ *va_pages >>= PAGE_SHIFT;
+
+ if (!*va_pages)
+ goto bad_size;
+
+ if (*va_pages > (U64_MAX / PAGE_SIZE))
+ /* 64-bit address range is the max */
+ goto bad_size;
+
+ if (*flags & BASE_MEM_SAME_VA)
+ reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
+ else
+ reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
+
+ if (!reg)
+ goto no_region;
+
+ /* we've got pages to map now, and support SAME_VA */
+ *flags |= KBASE_MEM_IMPORT_HAVE_PAGES;
+
+ reg->gpu_alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMP);
+ if (IS_ERR_OR_NULL(reg->gpu_alloc))
+ goto no_alloc_obj;
+
+ reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+ reg->gpu_alloc->imported.ump_handle = umph;
+
+ reg->flags &= ~KBASE_REG_FREE;
+ reg->flags |= KBASE_REG_GPU_NX; /* UMP is always No eXecute */
+ reg->flags &= ~KBASE_REG_GROWABLE; /* UMP cannot be grown */
+
+ /* Override import flags based on UMP flags */
+ *flags &= ~(BASE_MEM_CACHED_CPU);
+ *flags &= ~(BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR);
+ *flags &= ~(BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR);
+
+ if ((cpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) ==
+ (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) {
+ reg->flags |= KBASE_REG_CPU_CACHED;
+ *flags |= BASE_MEM_CACHED_CPU;
+ }
+
+ if (cpu_flags & UMP_PROT_CPU_WR) {
+ reg->flags |= KBASE_REG_CPU_WR;
+ *flags |= BASE_MEM_PROT_CPU_WR;
+ }
+
+ if (cpu_flags & UMP_PROT_CPU_RD) {
+ reg->flags |= KBASE_REG_CPU_RD;
+ *flags |= BASE_MEM_PROT_CPU_RD;
+ }
+
+ if ((gpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) ==
+ (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR))
+ reg->flags |= KBASE_REG_GPU_CACHED;
+
+ if (gpu_flags & UMP_PROT_DEVICE_WR) {
+ reg->flags |= KBASE_REG_GPU_WR;
+ *flags |= BASE_MEM_PROT_GPU_WR;
+ }
+
+ if (gpu_flags & UMP_PROT_DEVICE_RD) {
+ reg->flags |= KBASE_REG_GPU_RD;
+ *flags |= BASE_MEM_PROT_GPU_RD;
+ }
+
+ /* ump phys block query */
+ ump_dd_phys_blocks_get_64(umph, &block_count, &block_array);
+
+ for (i = 0; i < block_count; i++) {
+ for (j = 0; j < (block_array[i].size >> PAGE_SHIFT); j++) {
+ struct tagged_addr tagged;
+
+ tagged = as_tagged(block_array[i].addr +
+ (j << PAGE_SHIFT));
+ reg->gpu_alloc->pages[page] = tagged;
+ page++;
+ }
+ }
+ reg->gpu_alloc->nents = *va_pages;
+ reg->extent = 0;
+
+ return reg;
+
+no_alloc_obj:
+ kfree(reg);
+no_region:
+bad_size:
+ ump_dd_release(umph);
+bad_id:
+bad_flags:
+ return NULL;
+}
+#endif /* CONFIG_UMP */
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
+ int fd, u64 *va_pages, u64 *flags, u32 padding)
+{
+ struct kbase_va_region *reg;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *dma_attachment;
+ bool shared_zone = false;
+
+ dma_buf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(dma_buf))
+ goto no_buf;
+
+ dma_attachment = dma_buf_attach(dma_buf, kctx->kbdev->dev);
+ if (!dma_attachment)
+ goto no_attachment;
+
+ *va_pages = (PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT) + padding;
+ if (!*va_pages)
+ goto bad_size;
+
+ if (*va_pages > (U64_MAX / PAGE_SIZE))
+ /* 64-bit address range is the max */
+ goto bad_size;
+
+ /* ignore SAME_VA */
+ *flags &= ~BASE_MEM_SAME_VA;
+
+ if (*flags & BASE_MEM_IMPORT_SHARED)
+ shared_zone = true;
+
+#ifdef CONFIG_64BIT
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+ /*
+ * 64-bit tasks require us to reserve VA on the CPU that we use
+ * on the GPU.
+ */
+ shared_zone = true;
+ }
+#endif
+
+ if (shared_zone) {
+ *flags |= BASE_MEM_NEED_MMAP;
+ reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
+ } else {
+ reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
+ }
+
+ if (!reg)
+ goto no_region;
+
+ reg->gpu_alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMM);
+ if (IS_ERR_OR_NULL(reg->gpu_alloc))
+ goto no_alloc_obj;
+
+ reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+ /* No pages to map yet */
+ reg->gpu_alloc->nents = 0;
+
+ if (kbase_update_region_flags(kctx, reg, *flags) != 0)
+ goto invalid_flags;
+
+ reg->flags &= ~KBASE_REG_FREE;
+ reg->flags |= KBASE_REG_GPU_NX; /* UMM is always No eXecute */
+ reg->flags &= ~KBASE_REG_GROWABLE; /* UMM cannot be grown */
+ reg->flags |= KBASE_REG_GPU_CACHED;
+
+ if (*flags & BASE_MEM_SECURE)
+ reg->flags |= KBASE_REG_SECURE;
+
+ if (padding)
+ reg->flags |= KBASE_REG_IMPORT_PAD;
+
+ reg->gpu_alloc->type = KBASE_MEM_TYPE_IMPORTED_UMM;
+ reg->gpu_alloc->imported.umm.sgt = NULL;
+ reg->gpu_alloc->imported.umm.dma_buf = dma_buf;
+ reg->gpu_alloc->imported.umm.dma_attachment = dma_attachment;
+ reg->gpu_alloc->imported.umm.current_mapping_usage_count = 0;
+ reg->extent = 0;
+
+ return reg;
+
+invalid_flags:
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+no_alloc_obj:
+ kfree(reg);
+no_region:
+bad_size:
+ dma_buf_detach(dma_buf, dma_attachment);
+no_attachment:
+ dma_buf_put(dma_buf);
+no_buf:
+ return NULL;
+}
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+static u32 kbase_get_cache_line_alignment(struct kbase_context *kctx)
+{
+ u32 cpu_cache_line_size = cache_line_size();
+ u32 gpu_cache_line_size =
+ (1UL << kctx->kbdev->gpu_props.props.l2_props.log2_line_size);
+
+ return ((cpu_cache_line_size > gpu_cache_line_size) ?
+ cpu_cache_line_size :
+ gpu_cache_line_size);
+}
+
+static struct kbase_va_region *kbase_mem_from_user_buffer(
+ struct kbase_context *kctx, unsigned long address,
+ unsigned long size, u64 *va_pages, u64 *flags)
+{
+ long i;
+ struct kbase_va_region *reg;
+ long faulted_pages;
+ int zone = KBASE_REG_ZONE_CUSTOM_VA;
+ bool shared_zone = false;
+ u32 cache_line_alignment = kbase_get_cache_line_alignment(kctx);
+ struct kbase_alloc_import_user_buf *user_buf;
+ struct page **pages = NULL;
+
+ if ((address & (cache_line_alignment - 1)) != 0 ||
+ (size & (cache_line_alignment - 1)) != 0) {
+ /* Coherency must be enabled to handle partial cache lines */
+ if (*flags & (BASE_MEM_COHERENT_SYSTEM |
+ BASE_MEM_COHERENT_SYSTEM_REQUIRED)) {
+ /* Force coherent system required flag, import will
+ * then fail if coherency isn't available
+ */
+ *flags |= BASE_MEM_COHERENT_SYSTEM_REQUIRED;
+ } else {
+ dev_warn(kctx->kbdev->dev,
+ "User buffer is not cache line aligned and no coherency enabled\n");
+ goto bad_size;
+ }
+ }
+
+ *va_pages = (PAGE_ALIGN(address + size) >> PAGE_SHIFT) -
+ PFN_DOWN(address);
+ if (!*va_pages)
+ goto bad_size;
+
+ if (*va_pages > (UINT64_MAX / PAGE_SIZE))
+ /* 64-bit address range is the max */
+ goto bad_size;
+
+ /* SAME_VA generally not supported with imported memory (no known use cases) */
+ *flags &= ~BASE_MEM_SAME_VA;
+
+ if (*flags & BASE_MEM_IMPORT_SHARED)
+ shared_zone = true;
+
+#ifdef CONFIG_64BIT
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+ /*
+ * 64-bit tasks require us to reserve VA on the CPU that we use
+ * on the GPU.
+ */
+ shared_zone = true;
+ }
+#endif
+
+ if (shared_zone) {
+ *flags |= BASE_MEM_NEED_MMAP;
+ zone = KBASE_REG_ZONE_SAME_VA;
+ }
+
+ reg = kbase_alloc_free_region(kctx, 0, *va_pages, zone);
+
+ if (!reg)
+ goto no_region;
+
+ reg->gpu_alloc = kbase_alloc_create(*va_pages,
+ KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+ if (IS_ERR_OR_NULL(reg->gpu_alloc))
+ goto no_alloc_obj;
+
+ reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+ if (kbase_update_region_flags(kctx, reg, *flags) != 0)
+ goto invalid_flags;
+
+ reg->flags &= ~KBASE_REG_FREE;
+ reg->flags |= KBASE_REG_GPU_NX; /* User-buffers are always No eXecute */
+ reg->flags &= ~KBASE_REG_GROWABLE; /* Cannot be grown */
+
+ user_buf = &reg->gpu_alloc->imported.user_buf;
+
+ user_buf->size = size;
+ user_buf->address = address;
+ user_buf->nr_pages = *va_pages;
+ user_buf->mm = current->mm;
+ user_buf->pages = kmalloc_array(*va_pages, sizeof(struct page *),
+ GFP_KERNEL);
+
+ if (!user_buf->pages)
+ goto no_page_array;
+
+ /* If the region is coherent with the CPU then the memory is imported
+ * and mapped onto the GPU immediately.
+ * Otherwise get_user_pages is called as a sanity check, but with
+ * NULL as the pages argument which will fault the pages, but not
+ * pin them. The memory will then be pinned only around the jobs that
+ * specify the region as an external resource.
+ */
+ if (reg->flags & KBASE_REG_SHARE_BOTH) {
+ pages = user_buf->pages;
+ *flags |= KBASE_MEM_IMPORT_HAVE_PAGES;
+ }
+
+ down_read(&current->mm->mmap_sem);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ faulted_pages = get_user_pages(current, current->mm, address, *va_pages,
+ reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+ faulted_pages = get_user_pages(address, *va_pages,
+ reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
+#else
+ faulted_pages = get_user_pages(address, *va_pages,
+ reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+ pages, NULL);
+#endif
+
+ up_read(&current->mm->mmap_sem);
+
+ if (faulted_pages != *va_pages)
+ goto fault_mismatch;
+
+ atomic_inc(&current->mm->mm_count);
+
+ reg->gpu_alloc->nents = 0;
+ reg->extent = 0;
+
+ if (pages) {
+ struct device *dev = kctx->kbdev->dev;
+ unsigned long local_size = user_buf->size;
+ unsigned long offset = user_buf->address & ~PAGE_MASK;
+ struct tagged_addr *pa = kbase_get_gpu_phy_pages(reg);
+
+ /* Top bit signifies that this was pinned on import */
+ user_buf->current_mapping_usage_count |= PINNED_ON_IMPORT;
+
+ for (i = 0; i < faulted_pages; i++) {
+ dma_addr_t dma_addr;
+ unsigned long min;
+
+ min = MIN(PAGE_SIZE - offset, local_size);
+ dma_addr = dma_map_page(dev, pages[i],
+ offset, min,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr))
+ goto unwind_dma_map;
+
+ user_buf->dma_addrs[i] = dma_addr;
+ pa[i] = as_tagged(page_to_phys(pages[i]));
+
+ local_size -= min;
+ offset = 0;
+ }
+
+ reg->gpu_alloc->nents = faulted_pages;
+ }
+
+ return reg;
+
+unwind_dma_map:
+ while (i--) {
+ dma_unmap_page(kctx->kbdev->dev,
+ user_buf->dma_addrs[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+fault_mismatch:
+ if (pages) {
+ for (i = 0; i < faulted_pages; i++)
+ put_page(pages[i]);
+ }
+ kfree(user_buf->pages);
+no_page_array:
+invalid_flags:
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+no_alloc_obj:
+ kfree(reg);
+no_region:
+bad_size:
+ return NULL;
+
+}
+
+
+u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
+ u64 nents, struct base_mem_aliasing_info *ai,
+ u64 *num_pages)
+{
+ struct kbase_va_region *reg;
+ u64 gpu_va;
+ size_t i;
+ bool coherent;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(flags);
+ KBASE_DEBUG_ASSERT(ai);
+ KBASE_DEBUG_ASSERT(num_pages);
+
+ /* mask to only allowed flags */
+ *flags &= (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR |
+ BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL |
+ BASE_MEM_COHERENT_SYSTEM_REQUIRED);
+
+ if (!(*flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR))) {
+ dev_warn(kctx->kbdev->dev,
+ "kbase_mem_alias called with bad flags (%llx)",
+ (unsigned long long)*flags);
+ goto bad_flags;
+ }
+ coherent = (*flags & BASE_MEM_COHERENT_SYSTEM) != 0 ||
+ (*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0;
+
+ if (!stride)
+ goto bad_stride;
+
+ if (!nents)
+ goto bad_nents;
+
+ if ((nents * stride) > (U64_MAX / PAGE_SIZE))
+ /* 64-bit address range is the max */
+ goto bad_size;
+
+ /* calculate the number of pages this alias will cover */
+ *num_pages = nents * stride;
+
+#ifdef CONFIG_64BIT
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+ /* 64-bit tasks must MMAP anyway, but not expose this address to
+ * clients */
+ *flags |= BASE_MEM_NEED_MMAP;
+ reg = kbase_alloc_free_region(kctx, 0, *num_pages,
+ KBASE_REG_ZONE_SAME_VA);
+ } else {
+#else
+ if (1) {
+#endif
+ reg = kbase_alloc_free_region(kctx, 0, *num_pages,
+ KBASE_REG_ZONE_CUSTOM_VA);
+ }
+
+ if (!reg)
+ goto no_reg;
+
+ /* zero-sized page array, as we don't need one/can support one */
+ reg->gpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_ALIAS);
+ if (IS_ERR_OR_NULL(reg->gpu_alloc))
+ goto no_alloc_obj;
+
+ reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+ if (kbase_update_region_flags(kctx, reg, *flags) != 0)
+ goto invalid_flags;
+
+ reg->gpu_alloc->imported.alias.nents = nents;
+ reg->gpu_alloc->imported.alias.stride = stride;
+ reg->gpu_alloc->imported.alias.aliased = vzalloc(sizeof(*reg->gpu_alloc->imported.alias.aliased) * nents);
+ if (!reg->gpu_alloc->imported.alias.aliased)
+ goto no_aliased_array;
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* validate and add src handles */
+ for (i = 0; i < nents; i++) {
+ if (ai[i].handle.basep.handle < BASE_MEM_FIRST_FREE_ADDRESS) {
+ if (ai[i].handle.basep.handle !=
+ BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE)
+ goto bad_handle; /* unsupported magic handle */
+ if (!ai[i].length)
+ goto bad_handle; /* must be > 0 */
+ if (ai[i].length > stride)
+ goto bad_handle; /* can't be larger than the
+ stride */
+ reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
+ } else {
+ struct kbase_va_region *aliasing_reg;
+ struct kbase_mem_phy_alloc *alloc;
+
+ aliasing_reg = kbase_region_tracker_find_region_base_address(
+ kctx,
+ (ai[i].handle.basep.handle >> PAGE_SHIFT) << PAGE_SHIFT);
+
+ /* validate found region */
+ if (!aliasing_reg)
+ goto bad_handle; /* Not found */
+ if (aliasing_reg->flags & KBASE_REG_FREE)
+ goto bad_handle; /* Free region */
+ if (aliasing_reg->flags & KBASE_REG_DONT_NEED)
+ goto bad_handle; /* Ephemeral region */
+ if (!aliasing_reg->gpu_alloc)
+ goto bad_handle; /* No alloc */
+ if (aliasing_reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
+ goto bad_handle; /* Not a native alloc */
+ if (coherent != ((aliasing_reg->flags & KBASE_REG_SHARE_BOTH) != 0))
+ goto bad_handle;
+ /* Non-coherent memory cannot alias
+ coherent memory, and vice versa.*/
+
+ /* check size against stride */
+ if (!ai[i].length)
+ goto bad_handle; /* must be > 0 */
+ if (ai[i].length > stride)
+ goto bad_handle; /* can't be larger than the
+ stride */
+
+ alloc = aliasing_reg->gpu_alloc;
+
+ /* check against the alloc's size */
+ if (ai[i].offset > alloc->nents)
+ goto bad_handle; /* beyond end */
+ if (ai[i].offset + ai[i].length > alloc->nents)
+ goto bad_handle; /* beyond end */
+
+ reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc);
+ reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
+ reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset;
+ }
+ }
+
+#ifdef CONFIG_64BIT
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+ /* Bind to a cookie */
+ if (!kctx->cookies) {
+ dev_err(kctx->kbdev->dev, "No cookies available for allocation!");
+ goto no_cookie;
+ }
+ /* return a cookie */
+ gpu_va = __ffs(kctx->cookies);
+ kctx->cookies &= ~(1UL << gpu_va);
+ BUG_ON(kctx->pending_regions[gpu_va]);
+ kctx->pending_regions[gpu_va] = reg;
+
+ /* relocate to correct base */
+ gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
+ gpu_va <<= PAGE_SHIFT;
+ } else /* we control the VA */ {
+#else
+ if (1) {
+#endif
+ if (kbase_gpu_mmap(kctx, reg, 0, *num_pages, 1) != 0) {
+ dev_warn(kctx->kbdev->dev, "Failed to map memory on GPU");
+ goto no_mmap;
+ }
+ /* return real GPU VA */
+ gpu_va = reg->start_pfn << PAGE_SHIFT;
+ }
+
+ reg->flags &= ~KBASE_REG_FREE;
+ reg->flags &= ~KBASE_REG_GROWABLE;
+
+ kbase_gpu_vm_unlock(kctx);
+
+ return gpu_va;
+
+#ifdef CONFIG_64BIT
+no_cookie:
+#endif
+no_mmap:
+bad_handle:
+ kbase_gpu_vm_unlock(kctx);
+no_aliased_array:
+invalid_flags:
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+no_alloc_obj:
+ kfree(reg);
+no_reg:
+bad_size:
+bad_nents:
+bad_stride:
+bad_flags:
+ return 0;
+}
+
+int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
+ void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages,
+ u64 *flags)
+{
+ struct kbase_va_region *reg;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(gpu_va);
+ KBASE_DEBUG_ASSERT(va_pages);
+ KBASE_DEBUG_ASSERT(flags);
+
+#ifdef CONFIG_64BIT
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT))
+ *flags |= BASE_MEM_SAME_VA;
+#endif
+
+ if (!kbase_check_import_flags(*flags)) {
+ dev_warn(kctx->kbdev->dev,
+ "kbase_mem_import called with bad flags (%llx)",
+ (unsigned long long)*flags);
+ goto bad_flags;
+ }
+
+ if ((*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0 &&
+ !kbase_device_is_cpu_coherent(kctx->kbdev)) {
+ dev_warn(kctx->kbdev->dev,
+ "kbase_mem_import call required coherent mem when unavailable");
+ goto bad_flags;
+ }
+ if ((*flags & BASE_MEM_COHERENT_SYSTEM) != 0 &&
+ !kbase_device_is_cpu_coherent(kctx->kbdev)) {
+ /* Remove COHERENT_SYSTEM flag if coherent mem is unavailable */
+ *flags &= ~BASE_MEM_COHERENT_SYSTEM;
+ }
+
+ if ((padding != 0) && (type != BASE_MEM_IMPORT_TYPE_UMM)) {
+ dev_warn(kctx->kbdev->dev,
+ "padding is only supported for UMM");
+ goto bad_flags;
+ }
+
+ switch (type) {
+#ifdef CONFIG_UMP
+ case BASE_MEM_IMPORT_TYPE_UMP: {
+ ump_secure_id id;
+
+ if (get_user(id, (ump_secure_id __user *)phandle))
+ reg = NULL;
+ else
+ reg = kbase_mem_from_ump(kctx, id, va_pages, flags);
+ }
+ break;
+#endif /* CONFIG_UMP */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case BASE_MEM_IMPORT_TYPE_UMM: {
+ int fd;
+
+ if (get_user(fd, (int __user *)phandle))
+ reg = NULL;
+ else
+ reg = kbase_mem_from_umm(kctx, fd, va_pages, flags,
+ padding);
+ }
+ break;
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+ case BASE_MEM_IMPORT_TYPE_USER_BUFFER: {
+ struct base_mem_import_user_buffer user_buffer;
+ void __user *uptr;
+
+ if (copy_from_user(&user_buffer, phandle,
+ sizeof(user_buffer))) {
+ reg = NULL;
+ } else {
+#ifdef CONFIG_COMPAT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ uptr = compat_ptr(user_buffer.ptr);
+ else
+#endif
+ uptr = u64_to_user_ptr(user_buffer.ptr);
+
+ reg = kbase_mem_from_user_buffer(kctx,
+ (unsigned long)uptr, user_buffer.length,
+ va_pages, flags);
+ }
+ break;
+ }
+ default: {
+ reg = NULL;
+ break;
+ }
+ }
+
+ if (!reg)
+ goto no_reg;
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* mmap needed to setup VA? */
+ if (*flags & (BASE_MEM_SAME_VA | BASE_MEM_NEED_MMAP)) {
+ /* Bind to a cookie */
+ if (!kctx->cookies)
+ goto no_cookie;
+ /* return a cookie */
+ *gpu_va = __ffs(kctx->cookies);
+ kctx->cookies &= ~(1UL << *gpu_va);
+ BUG_ON(kctx->pending_regions[*gpu_va]);
+ kctx->pending_regions[*gpu_va] = reg;
+
+ /* relocate to correct base */
+ *gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
+ *gpu_va <<= PAGE_SHIFT;
+
+ } else if (*flags & KBASE_MEM_IMPORT_HAVE_PAGES) {
+ /* we control the VA, mmap now to the GPU */
+ if (kbase_gpu_mmap(kctx, reg, 0, *va_pages, 1) != 0)
+ goto no_gpu_va;
+ /* return real GPU VA */
+ *gpu_va = reg->start_pfn << PAGE_SHIFT;
+ } else {
+ /* we control the VA, but nothing to mmap yet */
+ if (kbase_add_va_region(kctx, reg, 0, *va_pages, 1) != 0)
+ goto no_gpu_va;
+ /* return real GPU VA */
+ *gpu_va = reg->start_pfn << PAGE_SHIFT;
+ }
+
+ /* clear out private flags */
+ *flags &= ((1UL << BASE_MEM_FLAGS_NR_BITS) - 1);
+
+ kbase_gpu_vm_unlock(kctx);
+
+ return 0;
+
+no_gpu_va:
+no_cookie:
+ kbase_gpu_vm_unlock(kctx);
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+ kfree(reg);
+no_reg:
+bad_flags:
+ *gpu_va = 0;
+ *va_pages = 0;
+ *flags = 0;
+ return -ENOMEM;
+}
+
+int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages)
+{
+ struct tagged_addr *phy_pages;
+ u64 delta = new_pages - old_pages;
+ int ret = 0;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /* Map the new pages into the GPU */
+ phy_pages = kbase_get_gpu_phy_pages(reg);
+ ret = kbase_mmu_insert_pages(kctx, reg->start_pfn + old_pages,
+ phy_pages + old_pages, delta, reg->flags);
+
+ return ret;
+}
+
+static void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages)
+{
+ u64 gpu_va_start = reg->start_pfn;
+
+ if (new_pages == old_pages)
+ /* Nothing to do */
+ return;
+
+ unmap_mapping_range(kctx->filp->f_inode->i_mapping,
+ (gpu_va_start + new_pages)<<PAGE_SHIFT,
+ (old_pages - new_pages)<<PAGE_SHIFT, 1);
+}
+
+static int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages)
+{
+ u64 delta = old_pages - new_pages;
+ int ret = 0;
+
+ ret = kbase_mmu_teardown_pages(kctx,
+ reg->start_pfn + new_pages, delta);
+
+ return ret;
+}
+
+int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages)
+{
+ u64 old_pages;
+ u64 delta;
+ int res = -EINVAL;
+ struct kbase_va_region *reg;
+ bool read_locked = false;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(gpu_addr != 0);
+
+ if (gpu_addr & ~PAGE_MASK) {
+ dev_warn(kctx->kbdev->dev, "kbase:mem_commit: gpu_addr: passed parameter is invalid");
+ return -EINVAL;
+ }
+
+ down_write(&current->mm->mmap_sem);
+ kbase_gpu_vm_lock(kctx);
+
+ /* Validate the region */
+ reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+ if (!reg || (reg->flags & KBASE_REG_FREE))
+ goto out_unlock;
+
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+
+ if (reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
+ goto out_unlock;
+
+ if (0 == (reg->flags & KBASE_REG_GROWABLE))
+ goto out_unlock;
+
+ /* Would overflow the VA region */
+ if (new_pages > reg->nr_pages)
+ goto out_unlock;
+
+ /* can't be mapped more than once on the GPU */
+ if (atomic_read(&reg->gpu_alloc->gpu_mappings) > 1)
+ goto out_unlock;
+ /* can't grow regions which are ephemeral */
+ if (reg->flags & KBASE_REG_DONT_NEED)
+ goto out_unlock;
+
+ if (new_pages == reg->gpu_alloc->nents) {
+ /* no change */
+ res = 0;
+ goto out_unlock;
+ }
+
+ old_pages = kbase_reg_current_backed_size(reg);
+ if (new_pages > old_pages) {
+ delta = new_pages - old_pages;
+
+ /*
+ * No update to the mm so downgrade the writer lock to a read
+ * lock so other readers aren't blocked after this point.
+ */
+ downgrade_write(&current->mm->mmap_sem);
+ read_locked = true;
+
+ /* Allocate some more pages */
+ if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, delta) != 0) {
+ res = -ENOMEM;
+ goto out_unlock;
+ }
+ if (reg->cpu_alloc != reg->gpu_alloc) {
+ if (kbase_alloc_phy_pages_helper(
+ reg->gpu_alloc, delta) != 0) {
+ res = -ENOMEM;
+ kbase_free_phy_pages_helper(reg->cpu_alloc,
+ delta);
+ goto out_unlock;
+ }
+ }
+
+ /* No update required for CPU mappings, that's done on fault. */
+
+ /* Update GPU mapping. */
+ res = kbase_mem_grow_gpu_mapping(kctx, reg,
+ new_pages, old_pages);
+
+ /* On error free the new pages */
+ if (res) {
+ kbase_free_phy_pages_helper(reg->cpu_alloc, delta);
+ if (reg->cpu_alloc != reg->gpu_alloc)
+ kbase_free_phy_pages_helper(reg->gpu_alloc,
+ delta);
+ res = -ENOMEM;
+ goto out_unlock;
+ }
+ } else {
+ delta = old_pages - new_pages;
+
+ /* Update all CPU mapping(s) */
+ kbase_mem_shrink_cpu_mapping(kctx, reg,
+ new_pages, old_pages);
+
+ /* Update the GPU mapping */
+ res = kbase_mem_shrink_gpu_mapping(kctx, reg,
+ new_pages, old_pages);
+ if (res) {
+ res = -ENOMEM;
+ goto out_unlock;
+ }
+
+ kbase_free_phy_pages_helper(reg->cpu_alloc, delta);
+ if (reg->cpu_alloc != reg->gpu_alloc)
+ kbase_free_phy_pages_helper(reg->gpu_alloc, delta);
+ }
+
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ if (read_locked)
+ up_read(&current->mm->mmap_sem);
+ else
+ up_write(&current->mm->mmap_sem);
+
+ return res;
+}
+
+static void kbase_cpu_vm_open(struct vm_area_struct *vma)
+{
+ struct kbase_cpu_mapping *map = vma->vm_private_data;
+
+ KBASE_DEBUG_ASSERT(map);
+ KBASE_DEBUG_ASSERT(map->count > 0);
+ /* non-atomic as we're under Linux' mm lock */
+ map->count++;
+}
+
+static void kbase_cpu_vm_close(struct vm_area_struct *vma)
+{
+ struct kbase_cpu_mapping *map = vma->vm_private_data;
+
+ KBASE_DEBUG_ASSERT(map);
+ KBASE_DEBUG_ASSERT(map->count > 0);
+
+ /* non-atomic as we're under Linux' mm lock */
+ if (--map->count)
+ return;
+
+ KBASE_DEBUG_ASSERT(map->kctx);
+ KBASE_DEBUG_ASSERT(map->alloc);
+
+ kbase_gpu_vm_lock(map->kctx);
+
+ if (map->free_on_close) {
+ KBASE_DEBUG_ASSERT((map->region->flags & KBASE_REG_ZONE_MASK) ==
+ KBASE_REG_ZONE_SAME_VA);
+ /* Avoid freeing memory on the process death which results in
+ * GPU Page Fault. Memory will be freed in kbase_destroy_context
+ */
+ if (!(current->flags & PF_EXITING))
+ kbase_mem_free_region(map->kctx, map->region);
+ }
+
+ list_del(&map->mappings_list);
+
+ kbase_gpu_vm_unlock(map->kctx);
+
+ kbase_mem_phy_alloc_put(map->alloc);
+ kfree(map);
+}
+
+KBASE_EXPORT_TEST_API(kbase_cpu_vm_close);
+
+
+static int kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct kbase_cpu_mapping *map = vma->vm_private_data;
+ pgoff_t rel_pgoff;
+ size_t i;
+ pgoff_t addr;
+
+ KBASE_DEBUG_ASSERT(map);
+ KBASE_DEBUG_ASSERT(map->count > 0);
+ KBASE_DEBUG_ASSERT(map->kctx);
+ KBASE_DEBUG_ASSERT(map->alloc);
+
+ rel_pgoff = vmf->pgoff - map->region->start_pfn;
+
+ kbase_gpu_vm_lock(map->kctx);
+ if (rel_pgoff >= map->alloc->nents)
+ goto locked_bad_fault;
+
+ /* Fault on access to DONT_NEED regions */
+ if (map->alloc->reg && (map->alloc->reg->flags & KBASE_REG_DONT_NEED))
+ goto locked_bad_fault;
+
+ /* insert all valid pages from the fault location */
+ i = rel_pgoff;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ addr = (pgoff_t)((uintptr_t)vmf->virtual_address >> PAGE_SHIFT);
+#else
+ addr = (pgoff_t)(vmf->address >> PAGE_SHIFT);
+#endif
+ while (i < map->alloc->nents && (addr < vma->vm_end >> PAGE_SHIFT)) {
+ int ret = vm_insert_pfn(vma, addr << PAGE_SHIFT,
+ PFN_DOWN(as_phys_addr_t(map->alloc->pages[i])));
+ if (ret < 0 && ret != -EBUSY)
+ goto locked_bad_fault;
+
+ i++; addr++;
+ }
+
+ kbase_gpu_vm_unlock(map->kctx);
+ /* we resolved it, nothing for VM to do */
+ return VM_FAULT_NOPAGE;
+
+locked_bad_fault:
+ kbase_gpu_vm_unlock(map->kctx);
+ return VM_FAULT_SIGBUS;
+}
+
+const struct vm_operations_struct kbase_vm_ops = {
+ .open = kbase_cpu_vm_open,
+ .close = kbase_cpu_vm_close,
+ .fault = kbase_cpu_vm_fault
+};
+
+static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, unsigned long aligned_offset, int free_on_close)
+{
+ struct kbase_cpu_mapping *map;
+ struct tagged_addr *page_array;
+ int err = 0;
+ int i;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+
+ if (!map) {
+ WARN_ON(1);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * VM_DONTCOPY - don't make this mapping available in fork'ed processes
+ * VM_DONTEXPAND - disable mremap on this region
+ * VM_IO - disables paging
+ * VM_DONTDUMP - Don't include in core dumps (3.7 only)
+ * VM_MIXEDMAP - Support mixing struct page*s and raw pfns.
+ * This is needed to support using the dedicated and
+ * the OS based memory backends together.
+ */
+ /*
+ * This will need updating to propagate coherency flags
+ * See MIDBASE-1057
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO;
+#else
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
+#endif
+ vma->vm_ops = &kbase_vm_ops;
+ vma->vm_private_data = map;
+
+ page_array = kbase_get_cpu_phy_pages(reg);
+
+ if (!(reg->flags & KBASE_REG_CPU_CACHED) &&
+ (reg->flags & (KBASE_REG_CPU_WR|KBASE_REG_CPU_RD))) {
+ /* We can't map vmalloc'd memory uncached.
+ * Other memory will have been returned from
+ * kbase_mem_pool which would be
+ * suitable for mapping uncached.
+ */
+ BUG_ON(kaddr);
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ }
+
+ if (!kaddr) {
+ unsigned long addr = vma->vm_start + aligned_offset;
+ u64 start_off = vma->vm_pgoff - reg->start_pfn +
+ (aligned_offset>>PAGE_SHIFT);
+
+ vma->vm_flags |= VM_PFNMAP;
+ for (i = 0; i < nr_pages; i++) {
+ phys_addr_t phys;
+
+ phys = as_phys_addr_t(page_array[i + start_off]);
+ err = vm_insert_pfn(vma, addr, PFN_DOWN(phys));
+ if (WARN_ON(err))
+ break;
+
+ addr += PAGE_SIZE;
+ }
+ } else {
+ WARN_ON(aligned_offset);
+ /* MIXEDMAP so we can vfree the kaddr early and not track it after map time */
+ vma->vm_flags |= VM_MIXEDMAP;
+ /* vmalloc remaping is easy... */
+ err = remap_vmalloc_range(vma, kaddr, 0);
+ WARN_ON(err);
+ }
+
+ if (err) {
+ kfree(map);
+ goto out;
+ }
+
+ map->region = reg;
+ map->free_on_close = free_on_close;
+ map->kctx = reg->kctx;
+ map->alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+ map->count = 1; /* start with one ref */
+
+ if (reg->flags & KBASE_REG_CPU_CACHED)
+ map->alloc->properties |= KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
+
+ list_add(&map->mappings_list, &map->alloc->mappings);
+
+ out:
+ return err;
+}
+
+static int kbase_trace_buffer_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kaddr)
+{
+ struct kbase_va_region *new_reg;
+ u32 nr_pages;
+ size_t size;
+ int err = 0;
+ u32 *tb;
+ int owns_tb = 1;
+
+ dev_dbg(kctx->kbdev->dev, "in %s\n", __func__);
+ size = (vma->vm_end - vma->vm_start);
+ nr_pages = size >> PAGE_SHIFT;
+
+ if (!kctx->jctx.tb) {
+ KBASE_DEBUG_ASSERT(0 != size);
+ tb = vmalloc_user(size);
+
+ if (NULL == tb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = kbase_device_trace_buffer_install(kctx, tb, size);
+ if (err) {
+ vfree(tb);
+ goto out;
+ }
+ } else {
+ err = -EINVAL;
+ goto out;
+ }
+
+ *kaddr = kctx->jctx.tb;
+
+ new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA);
+ if (!new_reg) {
+ err = -ENOMEM;
+ WARN_ON(1);
+ goto out_no_region;
+ }
+
+ new_reg->cpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_TB);
+ if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) {
+ err = -ENOMEM;
+ new_reg->cpu_alloc = NULL;
+ WARN_ON(1);
+ goto out_no_alloc;
+ }
+
+ new_reg->gpu_alloc = kbase_mem_phy_alloc_get(new_reg->cpu_alloc);
+
+ new_reg->cpu_alloc->imported.kctx = kctx;
+ new_reg->flags &= ~KBASE_REG_FREE;
+ new_reg->flags |= KBASE_REG_CPU_CACHED;
+
+ /* alloc now owns the tb */
+ owns_tb = 0;
+
+ if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1) != 0) {
+ err = -ENOMEM;
+ WARN_ON(1);
+ goto out_no_va_region;
+ }
+
+ *reg = new_reg;
+
+ /* map read only, noexec */
+ vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
+ /* the rest of the flags is added by the cpu_mmap handler */
+
+ dev_dbg(kctx->kbdev->dev, "%s done\n", __func__);
+ return 0;
+
+out_no_va_region:
+out_no_alloc:
+ kbase_free_alloced_region(new_reg);
+out_no_region:
+ if (owns_tb) {
+ kbase_device_trace_buffer_uninstall(kctx);
+ vfree(tb);
+ }
+out:
+ return err;
+}
+
+static int kbase_mmu_dump_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kmap_addr)
+{
+ struct kbase_va_region *new_reg;
+ void *kaddr;
+ u32 nr_pages;
+ size_t size;
+ int err = 0;
+
+ dev_dbg(kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n");
+ size = (vma->vm_end - vma->vm_start);
+ nr_pages = size >> PAGE_SHIFT;
+
+ kaddr = kbase_mmu_dump(kctx, nr_pages);
+
+ if (!kaddr) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA);
+ if (!new_reg) {
+ err = -ENOMEM;
+ WARN_ON(1);
+ goto out;
+ }
+
+ new_reg->cpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_RAW);
+ if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) {
+ err = -ENOMEM;
+ new_reg->cpu_alloc = NULL;
+ WARN_ON(1);
+ goto out_no_alloc;
+ }
+
+ new_reg->gpu_alloc = kbase_mem_phy_alloc_get(new_reg->cpu_alloc);
+
+ new_reg->flags &= ~KBASE_REG_FREE;
+ new_reg->flags |= KBASE_REG_CPU_CACHED;
+ if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1) != 0) {
+ err = -ENOMEM;
+ WARN_ON(1);
+ goto out_va_region;
+ }
+
+ *kmap_addr = kaddr;
+ *reg = new_reg;
+
+ dev_dbg(kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n");
+ return 0;
+
+out_no_alloc:
+out_va_region:
+ kbase_free_alloced_region(new_reg);
+out:
+ return err;
+}
+
+
+void kbase_os_mem_map_lock(struct kbase_context *kctx)
+{
+ struct mm_struct *mm = current->mm;
+ (void)kctx;
+ down_read(&mm->mmap_sem);
+}
+
+void kbase_os_mem_map_unlock(struct kbase_context *kctx)
+{
+ struct mm_struct *mm = current->mm;
+ (void)kctx;
+ up_read(&mm->mmap_sem);
+}
+
+static int kbasep_reg_mmap(struct kbase_context *kctx,
+ struct vm_area_struct *vma,
+ struct kbase_va_region **regm,
+ size_t *nr_pages, size_t *aligned_offset)
+
+{
+ int cookie = vma->vm_pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
+ struct kbase_va_region *reg;
+ int err = 0;
+
+ *aligned_offset = 0;
+
+ dev_dbg(kctx->kbdev->dev, "in kbasep_reg_mmap\n");
+
+ /* SAME_VA stuff, fetch the right region */
+ reg = kctx->pending_regions[cookie];
+ if (!reg) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if ((reg->flags & KBASE_REG_GPU_NX) && (reg->nr_pages != *nr_pages)) {
+ /* incorrect mmap size */
+ /* leave the cookie for a potential later
+ * mapping, or to be reclaimed later when the
+ * context is freed */
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if ((vma->vm_flags & VM_READ && !(reg->flags & KBASE_REG_CPU_RD)) ||
+ (vma->vm_flags & VM_WRITE && !(reg->flags & KBASE_REG_CPU_WR))) {
+ /* VM flags inconsistent with region flags */
+ err = -EPERM;
+ dev_err(kctx->kbdev->dev, "%s:%d inconsistent VM flags\n",
+ __FILE__, __LINE__);
+ goto out;
+ }
+
+ /* adjust down nr_pages to what we have physically */
+ *nr_pages = kbase_reg_current_backed_size(reg);
+
+ if (kbase_gpu_mmap(kctx, reg, vma->vm_start + *aligned_offset,
+ reg->nr_pages, 1) != 0) {
+ dev_err(kctx->kbdev->dev, "%s:%d\n", __FILE__, __LINE__);
+ /* Unable to map in GPU space. */
+ WARN_ON(1);
+ err = -ENOMEM;
+ goto out;
+ }
+ /* no need for the cookie anymore */
+ kctx->pending_regions[cookie] = NULL;
+ kctx->cookies |= (1UL << cookie);
+
+ /*
+ * Overwrite the offset with the region start_pfn, so we effectively
+ * map from offset 0 in the region. However subtract the aligned
+ * offset so that when user space trims the mapping the beginning of
+ * the trimmed VMA has the correct vm_pgoff;
+ */
+ vma->vm_pgoff = reg->start_pfn - ((*aligned_offset)>>PAGE_SHIFT);
+out:
+ *regm = reg;
+ dev_dbg(kctx->kbdev->dev, "kbasep_reg_mmap done\n");
+
+ return err;
+}
+
+int kbase_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct kbase_context *kctx = file->private_data;
+ struct kbase_va_region *reg = NULL;
+ void *kaddr = NULL;
+ size_t nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int err = 0;
+ int free_on_close = 0;
+ struct device *dev = kctx->kbdev->dev;
+ size_t aligned_offset = 0;
+
+ dev_dbg(dev, "kbase_mmap\n");
+
+ /* strip away corresponding VM_MAY% flags to the VM_% flags requested */
+ vma->vm_flags &= ~((vma->vm_flags & (VM_READ | VM_WRITE)) << 4);
+
+ if (0 == nr_pages) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!(vma->vm_flags & VM_SHARED)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ kbase_gpu_vm_lock(kctx);
+
+ if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MAP_TRACKING_HANDLE)) {
+ /* The non-mapped tracking helper page */
+ err = kbase_tracking_page_setup(kctx, vma);
+ goto out_unlock;
+ }
+
+ /* if not the MTP, verify that the MTP has been mapped */
+ rcu_read_lock();
+ /* catches both when the special page isn't present or
+ * when we've forked */
+ if (rcu_dereference(kctx->process_mm) != current->mm) {
+ err = -EINVAL;
+ rcu_read_unlock();
+ goto out_unlock;
+ }
+ rcu_read_unlock();
+
+ switch (vma->vm_pgoff) {
+ case PFN_DOWN(BASEP_MEM_INVALID_HANDLE):
+ case PFN_DOWN(BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE):
+ /* Illegal handle for direct map */
+ err = -EINVAL;
+ goto out_unlock;
+ case PFN_DOWN(BASE_MEM_TRACE_BUFFER_HANDLE):
+ err = kbase_trace_buffer_mmap(kctx, vma, &reg, &kaddr);
+ if (0 != err)
+ goto out_unlock;
+ dev_dbg(dev, "kbase_trace_buffer_mmap ok\n");
+ /* free the region on munmap */
+ free_on_close = 1;
+ break;
+ case PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE):
+ /* MMU dump */
+ err = kbase_mmu_dump_mmap(kctx, vma, &reg, &kaddr);
+ if (0 != err)
+ goto out_unlock;
+ /* free the region on munmap */
+ free_on_close = 1;
+ break;
+ case PFN_DOWN(BASE_MEM_COOKIE_BASE) ...
+ PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) - 1: {
+ err = kbasep_reg_mmap(kctx, vma, &reg, &nr_pages,
+ &aligned_offset);
+ if (0 != err)
+ goto out_unlock;
+ /* free the region on munmap */
+ free_on_close = 1;
+ break;
+ }
+ default: {
+ reg = kbase_region_tracker_find_region_enclosing_address(kctx,
+ (u64)vma->vm_pgoff << PAGE_SHIFT);
+
+ if (reg && !(reg->flags & KBASE_REG_FREE)) {
+ /* will this mapping overflow the size of the region? */
+ if (nr_pages > (reg->nr_pages -
+ (vma->vm_pgoff - reg->start_pfn))) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ if ((vma->vm_flags & VM_READ &&
+ !(reg->flags & KBASE_REG_CPU_RD)) ||
+ (vma->vm_flags & VM_WRITE &&
+ !(reg->flags & KBASE_REG_CPU_WR))) {
+ /* VM flags inconsistent with region flags */
+ err = -EPERM;
+ dev_err(dev, "%s:%d inconsistent VM flags\n",
+ __FILE__, __LINE__);
+ goto out_unlock;
+ }
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ if (KBASE_MEM_TYPE_IMPORTED_UMM ==
+ reg->cpu_alloc->type) {
+ err = dma_buf_mmap(
+ reg->cpu_alloc->imported.umm.dma_buf,
+ vma, vma->vm_pgoff - reg->start_pfn);
+ goto out_unlock;
+ }
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+ /* limit what we map to the amount currently backed */
+ if (reg->cpu_alloc->nents < (vma->vm_pgoff - reg->start_pfn + nr_pages)) {
+ if ((vma->vm_pgoff - reg->start_pfn) >= reg->cpu_alloc->nents)
+ nr_pages = 0;
+ else
+ nr_pages = reg->cpu_alloc->nents - (vma->vm_pgoff - reg->start_pfn);
+ }
+ } else {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+ } /* default */
+ } /* switch */
+
+ err = kbase_cpu_mmap(reg, vma, kaddr, nr_pages, aligned_offset, free_on_close);
+
+ if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE)) {
+ /* MMU dump - userspace should now have a reference on
+ * the pages, so we can now free the kernel mapping */
+ vfree(kaddr);
+ }
+
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+out:
+ if (err)
+ dev_err(dev, "mmap failed %d\n", err);
+
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmap);
+
+static void kbasep_sync_mem_regions(struct kbase_context *kctx,
+ struct kbase_vmap_struct *map, enum kbase_sync_type dest)
+{
+ size_t i;
+ off_t const offset = (uintptr_t)map->gpu_addr & ~PAGE_MASK;
+ size_t const page_count = PFN_UP(offset + map->size);
+
+ /* Sync first page */
+ size_t sz = MIN(((size_t) PAGE_SIZE - offset), map->size);
+ struct tagged_addr cpu_pa = map->cpu_pages[0];
+ struct tagged_addr gpu_pa = map->gpu_pages[0];
+
+ kbase_sync_single(kctx, cpu_pa, gpu_pa, offset, sz, dest);
+
+ /* Sync middle pages (if any) */
+ for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+ cpu_pa = map->cpu_pages[i];
+ gpu_pa = map->gpu_pages[i];
+ kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, PAGE_SIZE, dest);
+ }
+
+ /* Sync last page (if any) */
+ if (page_count > 1) {
+ cpu_pa = map->cpu_pages[page_count - 1];
+ gpu_pa = map->gpu_pages[page_count - 1];
+ sz = ((offset + map->size - 1) & ~PAGE_MASK) + 1;
+ kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, sz, dest);
+ }
+}
+
+void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+ unsigned long prot_request, struct kbase_vmap_struct *map)
+{
+ struct kbase_va_region *reg;
+ unsigned long page_index;
+ unsigned int offset = gpu_addr & ~PAGE_MASK;
+ size_t page_count = PFN_UP(offset + size);
+ struct tagged_addr *page_array;
+ struct page **pages;
+ void *cpu_addr = NULL;
+ pgprot_t prot;
+ size_t i;
+
+ if (!size || !map)
+ return NULL;
+
+ /* check if page_count calculation will wrap */
+ if (size > ((size_t)-1 / PAGE_SIZE))
+ return NULL;
+
+ kbase_gpu_vm_lock(kctx);
+
+ reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
+ if (!reg || (reg->flags & KBASE_REG_FREE))
+ goto out_unlock;
+
+ page_index = (gpu_addr >> PAGE_SHIFT) - reg->start_pfn;
+
+ /* check if page_index + page_count will wrap */
+ if (-1UL - page_count < page_index)
+ goto out_unlock;
+
+ if (page_index + page_count > kbase_reg_current_backed_size(reg))
+ goto out_unlock;
+
+ if (reg->flags & KBASE_REG_DONT_NEED)
+ goto out_unlock;
+
+ /* check access permissions can be satisfied
+ * Intended only for checking KBASE_REG_{CPU,GPU}_{RD,WR} */
+ if ((reg->flags & prot_request) != prot_request)
+ goto out_unlock;
+
+ page_array = kbase_get_cpu_phy_pages(reg);
+ if (!page_array)
+ goto out_unlock;
+
+ pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto out_unlock;
+
+ for (i = 0; i < page_count; i++)
+ pages[i] = phys_to_page(as_phys_addr_t(page_array[page_index +
+ i]));
+
+ prot = PAGE_KERNEL;
+ if (!(reg->flags & KBASE_REG_CPU_CACHED)) {
+ /* Map uncached */
+ prot = pgprot_writecombine(prot);
+ }
+ /* Note: enforcing a RO prot_request onto prot is not done, since:
+ * - CPU-arch-specific integration required
+ * - kbase_vmap() requires no access checks to be made/enforced */
+
+ cpu_addr = vmap(pages, page_count, VM_MAP, prot);
+
+ kfree(pages);
+
+ if (!cpu_addr)
+ goto out_unlock;
+
+ map->gpu_addr = gpu_addr;
+ map->cpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+ map->cpu_pages = &kbase_get_cpu_phy_pages(reg)[page_index];
+ map->gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+ map->gpu_pages = &kbase_get_gpu_phy_pages(reg)[page_index];
+ map->addr = (void *)((uintptr_t)cpu_addr + offset);
+ map->size = size;
+ map->sync_needed = ((reg->flags & KBASE_REG_CPU_CACHED) != 0) &&
+ !kbase_mem_is_imported(map->gpu_alloc->type);
+
+ if (map->sync_needed)
+ kbasep_sync_mem_regions(kctx, map, KBASE_SYNC_TO_CPU);
+ kbase_gpu_vm_unlock(kctx);
+
+ return map->addr;
+
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return NULL;
+}
+
+void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+ struct kbase_vmap_struct *map)
+{
+ /* 0 is specified for prot_request to indicate no access checks should
+ * be made.
+ *
+ * As mentioned in kbase_vmap_prot() this means that a kernel-side
+ * CPU-RO mapping is not enforced to allow this to work */
+ return kbase_vmap_prot(kctx, gpu_addr, size, 0u, map);
+}
+KBASE_EXPORT_TEST_API(kbase_vmap);
+
+void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map)
+{
+ void *addr = (void *)((uintptr_t)map->addr & PAGE_MASK);
+ vunmap(addr);
+
+ if (map->sync_needed)
+ kbasep_sync_mem_regions(kctx, map, KBASE_SYNC_TO_DEVICE);
+ map->gpu_addr = 0;
+ map->cpu_alloc = kbase_mem_phy_alloc_put(map->cpu_alloc);
+ map->gpu_alloc = kbase_mem_phy_alloc_put(map->gpu_alloc);
+ map->cpu_pages = NULL;
+ map->gpu_pages = NULL;
+ map->addr = NULL;
+ map->size = 0;
+ map->sync_needed = false;
+}
+KBASE_EXPORT_TEST_API(kbase_vunmap);
+
+void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages)
+{
+ struct mm_struct *mm;
+
+ rcu_read_lock();
+ mm = rcu_dereference(kctx->process_mm);
+ if (mm) {
+ atomic_add(pages, &kctx->nonmapped_pages);
+#ifdef SPLIT_RSS_COUNTING
+ add_mm_counter(mm, MM_FILEPAGES, pages);
+#else
+ spin_lock(&mm->page_table_lock);
+ add_mm_counter(mm, MM_FILEPAGES, pages);
+ spin_unlock(&mm->page_table_lock);
+#endif
+ }
+ rcu_read_unlock();
+}
+
+static void kbasep_os_process_page_usage_drain(struct kbase_context *kctx)
+{
+ int pages;
+ struct mm_struct *mm;
+
+ spin_lock(&kctx->mm_update_lock);
+ mm = rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock));
+ if (!mm) {
+ spin_unlock(&kctx->mm_update_lock);
+ return;
+ }
+
+ rcu_assign_pointer(kctx->process_mm, NULL);
+ spin_unlock(&kctx->mm_update_lock);
+ synchronize_rcu();
+
+ pages = atomic_xchg(&kctx->nonmapped_pages, 0);
+#ifdef SPLIT_RSS_COUNTING
+ add_mm_counter(mm, MM_FILEPAGES, -pages);
+#else
+ spin_lock(&mm->page_table_lock);
+ add_mm_counter(mm, MM_FILEPAGES, -pages);
+ spin_unlock(&mm->page_table_lock);
+#endif
+}
+
+static void kbase_special_vm_close(struct vm_area_struct *vma)
+{
+ struct kbase_context *kctx;
+
+ kctx = vma->vm_private_data;
+ kbasep_os_process_page_usage_drain(kctx);
+}
+
+static const struct vm_operations_struct kbase_vm_special_ops = {
+ .close = kbase_special_vm_close,
+};
+
+static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma)
+{
+ /* check that this is the only tracking page */
+ spin_lock(&kctx->mm_update_lock);
+ if (rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock))) {
+ spin_unlock(&kctx->mm_update_lock);
+ return -EFAULT;
+ }
+
+ rcu_assign_pointer(kctx->process_mm, current->mm);
+
+ spin_unlock(&kctx->mm_update_lock);
+
+ /* no real access */
+ vma->vm_flags &= ~(VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+#else
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
+#endif
+ vma->vm_ops = &kbase_vm_special_ops;
+ vma->vm_private_data = kctx;
+
+ return 0;
+}
+void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle)
+{
+ int i;
+ int res;
+ void *va;
+ dma_addr_t dma_pa;
+ struct kbase_va_region *reg;
+ struct tagged_addr *page_array;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ DEFINE_DMA_ATTRS(attrs);
+#endif
+
+ u32 pages = ((size - 1) >> PAGE_SHIFT) + 1;
+ u32 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR |
+ BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(0 != size);
+ KBASE_DEBUG_ASSERT(0 != pages);
+
+ if (size == 0)
+ goto err;
+
+ /* All the alloc calls return zeroed memory */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL,
+ attrs);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL,
+ &attrs);
+#else
+ va = dma_alloc_writecombine(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL);
+#endif
+ if (!va)
+ goto err;
+
+ /* Store the state so we can free it later. */
+ handle->cpu_va = va;
+ handle->dma_pa = dma_pa;
+ handle->size = size;
+
+
+ reg = kbase_alloc_free_region(kctx, 0, pages, KBASE_REG_ZONE_SAME_VA);
+ if (!reg)
+ goto no_reg;
+
+ reg->flags &= ~KBASE_REG_FREE;
+ if (kbase_update_region_flags(kctx, reg, flags) != 0)
+ goto invalid_flags;
+
+ reg->cpu_alloc = kbase_alloc_create(pages, KBASE_MEM_TYPE_RAW);
+ if (IS_ERR_OR_NULL(reg->cpu_alloc))
+ goto no_alloc;
+
+ reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+
+ page_array = kbase_get_cpu_phy_pages(reg);
+
+ for (i = 0; i < pages; i++)
+ page_array[i] = as_tagged(dma_pa + (i << PAGE_SHIFT));
+
+ reg->cpu_alloc->nents = pages;
+
+ kbase_gpu_vm_lock(kctx);
+ res = kbase_gpu_mmap(kctx, reg, (uintptr_t) va, pages, 1);
+ kbase_gpu_vm_unlock(kctx);
+ if (res)
+ goto no_mmap;
+
+ return va;
+
+no_mmap:
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+no_alloc:
+invalid_flags:
+ kfree(reg);
+no_reg:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, attrs);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, &attrs);
+#else
+ dma_free_writecombine(kctx->kbdev->dev, size, va, dma_pa);
+#endif
+err:
+ return NULL;
+}
+KBASE_EXPORT_SYMBOL(kbase_va_alloc);
+
+void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle)
+{
+ struct kbase_va_region *reg;
+ int err;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+ DEFINE_DMA_ATTRS(attrs);
+#endif
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(handle->cpu_va != NULL);
+
+ kbase_gpu_vm_lock(kctx);
+ reg = kbase_region_tracker_find_region_base_address(kctx, (uintptr_t)handle->cpu_va);
+ KBASE_DEBUG_ASSERT(reg);
+ err = kbase_gpu_munmap(kctx, reg);
+ kbase_gpu_vm_unlock(kctx);
+ KBASE_DEBUG_ASSERT(!err);
+
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+ kfree(reg);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ dma_free_attrs(kctx->kbdev->dev, handle->size,
+ handle->cpu_va, handle->dma_pa, DMA_ATTR_WRITE_COMBINE);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ dma_free_attrs(kctx->kbdev->dev, handle->size,
+ handle->cpu_va, handle->dma_pa, &attrs);
+#else
+ dma_free_writecombine(kctx->kbdev->dev, handle->size,
+ handle->cpu_va, handle->dma_pa);
+#endif
+}
+KBASE_EXPORT_SYMBOL(kbase_va_free);
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mem_linux.h b/drivers/gpu/arm_gpu/mali_kbase_mem_linux.h
new file mode 100644
index 000000000000..db35f62a7431
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mem_linux.h
@@ -0,0 +1,240 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_mem_linux.h
+ * Base kernel memory APIs, Linux implementation.
+ */
+
+#ifndef _KBASE_MEM_LINUX_H_
+#define _KBASE_MEM_LINUX_H_
+
+/** A HWC dump mapping */
+struct kbase_hwc_dma_mapping {
+ void *cpu_va;
+ dma_addr_t dma_pa;
+ size_t size;
+};
+
+struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
+ u64 va_pages, u64 commit_pages, u64 extent, u64 *flags,
+ u64 *gpu_va);
+int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 *const pages);
+int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
+ void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages,
+ u64 *flags);
+u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, u64 *num_pages);
+int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask);
+
+/**
+ * kbase_mem_commit - Change the physical backing size of a region
+ *
+ * @kctx: The kernel context
+ * @gpu_addr: Handle to the memory region
+ * @new_pages: Number of physical pages to back the region with
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages);
+
+int kbase_mmap(struct file *file, struct vm_area_struct *vma);
+
+/**
+ * kbase_mem_evictable_init - Initialize the Ephemeral memory the eviction
+ * mechanism.
+ * @kctx: The kbase context to initialize.
+ *
+ * Return: Zero on success or -errno on failure.
+ */
+int kbase_mem_evictable_init(struct kbase_context *kctx);
+
+/**
+ * kbase_mem_evictable_deinit - De-initialize the Ephemeral memory eviction
+ * mechanism.
+ * @kctx: The kbase context to de-initialize.
+ */
+void kbase_mem_evictable_deinit(struct kbase_context *kctx);
+
+/**
+ * kbase_mem_grow_gpu_mapping - Grow the GPU mapping of an allocation
+ * @kctx: Context the region belongs to
+ * @reg: The GPU region
+ * @new_pages: The number of pages after the grow
+ * @old_pages: The number of pages before the grow
+ *
+ * Return: 0 on success, -errno on error.
+ *
+ * Expand the GPU mapping to encompass the new psychical pages which have
+ * been added to the allocation.
+ *
+ * Note: Caller must be holding the region lock.
+ */
+int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages);
+
+/**
+ * kbase_mem_evictable_make - Make a physical allocation eligible for eviction
+ * @gpu_alloc: The physical allocation to make evictable
+ *
+ * Return: 0 on success, -errno on error.
+ *
+ * Take the provided region and make all the physical pages within it
+ * reclaimable by the kernel, updating the per-process VM stats as well.
+ * Remove any CPU mappings (as these can't be removed in the shrinker callback
+ * as mmap_sem might already be taken) but leave the GPU mapping intact as
+ * and until the shrinker reclaims the allocation.
+ *
+ * Note: Must be called with the region lock of the containing context.
+ */
+int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc);
+
+/**
+ * kbase_mem_evictable_unmake - Remove a physical allocations eligibility for
+ * eviction.
+ * @alloc: The physical allocation to remove eviction eligibility from.
+ *
+ * Return: True if the allocation had its backing restored and false if
+ * it hasn't.
+ *
+ * Make the physical pages in the region no longer reclaimable and update the
+ * per-process stats, if the shrinker has already evicted the memory then
+ * re-allocate it if the region is still alive.
+ *
+ * Note: Must be called with the region lock of the containing context.
+ */
+bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *alloc);
+
+struct kbase_vmap_struct {
+ u64 gpu_addr;
+ struct kbase_mem_phy_alloc *cpu_alloc;
+ struct kbase_mem_phy_alloc *gpu_alloc;
+ struct tagged_addr *cpu_pages;
+ struct tagged_addr *gpu_pages;
+ void *addr;
+ size_t size;
+ bool sync_needed;
+};
+
+
+/**
+ * kbase_vmap_prot - Map a GPU VA range into the kernel safely, only if the
+ * requested access permissions are supported
+ * @kctx: Context the VA range belongs to
+ * @gpu_addr: Start address of VA range
+ * @size: Size of VA range
+ * @prot_request: Flags indicating how the caller will then access the memory
+ * @map: Structure to be given to kbase_vunmap() on freeing
+ *
+ * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
+ *
+ * Map a GPU VA Range into the kernel. The VA range must be contained within a
+ * GPU memory region. Appropriate CPU cache-flushing operations are made as
+ * required, dependent on the CPU mapping for the memory region.
+ *
+ * This is safer than using kmap() on the pages directly,
+ * because the pages here are refcounted to prevent freeing (and hence reuse
+ * elsewhere in the system) until an kbase_vunmap()
+ *
+ * The flags in @prot_request should use KBASE_REG_{CPU,GPU}_{RD,WR}, to check
+ * whether the region should allow the intended access, and return an error if
+ * disallowed. This is essential for security of imported memory, particularly
+ * a user buf from SHM mapped into the process as RO. In that case, write
+ * access must be checked if the intention is for kernel to write to the
+ * memory.
+ *
+ * The checks are also there to help catch access errors on memory where
+ * security is not a concern: imported memory that is always RW, and memory
+ * that was allocated and owned by the process attached to @kctx. In this case,
+ * it helps to identify memory that was was mapped with the wrong access type.
+ *
+ * Note: KBASE_REG_GPU_{RD,WR} flags are currently supported for legacy cases
+ * where either the security of memory is solely dependent on those flags, or
+ * when userspace code was expecting only the GPU to access the memory (e.g. HW
+ * workarounds).
+ *
+ * All cache maintenance operations shall be ignored if the
+ * memory region has been imported.
+ *
+ */
+void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+ unsigned long prot_request, struct kbase_vmap_struct *map);
+
+/**
+ * kbase_vmap - Map a GPU VA range into the kernel safely
+ * @kctx: Context the VA range belongs to
+ * @gpu_addr: Start address of VA range
+ * @size: Size of VA range
+ * @map: Structure to be given to kbase_vunmap() on freeing
+ *
+ * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
+ *
+ * Map a GPU VA Range into the kernel. The VA range must be contained within a
+ * GPU memory region. Appropriate CPU cache-flushing operations are made as
+ * required, dependent on the CPU mapping for the memory region.
+ *
+ * This is safer than using kmap() on the pages directly,
+ * because the pages here are refcounted to prevent freeing (and hence reuse
+ * elsewhere in the system) until an kbase_vunmap()
+ *
+ * kbase_vmap_prot() should be used in preference, since kbase_vmap() makes no
+ * checks to ensure the security of e.g. imported user bufs from RO SHM.
+ *
+ * Note: All cache maintenance operations shall be ignored if the memory region
+ * has been imported.
+ */
+void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+ struct kbase_vmap_struct *map);
+
+/**
+ * kbase_vunmap - Unmap a GPU VA range from the kernel
+ * @kctx: Context the VA range belongs to
+ * @map: Structure describing the mapping from the corresponding kbase_vmap()
+ * call
+ *
+ * Unmaps a GPU VA range from the kernel, given its @map structure obtained
+ * from kbase_vmap(). Appropriate CPU cache-flushing operations are made as
+ * required, dependent on the CPU mapping for the memory region.
+ *
+ * The reference taken on pages during kbase_vmap() is released.
+ *
+ * Note: All cache maintenance operations shall be ignored if the memory region
+ * has been imported.
+ */
+void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map);
+
+/** @brief Allocate memory from kernel space and map it onto the GPU
+ *
+ * @param kctx The context used for the allocation/mapping
+ * @param size The size of the allocation in bytes
+ * @param handle An opaque structure used to contain the state needed to free the memory
+ * @return the VA for kernel space and GPU MMU
+ */
+void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle);
+
+/** @brief Free/unmap memory allocated by kbase_va_alloc
+ *
+ * @param kctx The context used for the allocation/mapping
+ * @param handle An opaque structure returned by the kbase_va_alloc function.
+ */
+void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle);
+
+extern const struct vm_operations_struct kbase_vm_ops;
+
+#endif /* _KBASE_MEM_LINUX_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mem_lowlevel.h b/drivers/gpu/arm_gpu/mali_kbase_mem_lowlevel.h
new file mode 100644
index 000000000000..f4e88491327e
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mem_lowlevel.h
@@ -0,0 +1,89 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2014,2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_MEM_LOWLEVEL_H
+#define _KBASE_MEM_LOWLEVEL_H
+
+#ifndef _KBASE_H_
+#error "Don't include this file directly, use mali_kbase.h instead"
+#endif
+
+#include <linux/dma-mapping.h>
+
+/**
+ * @brief Flags for kbase_phy_allocator_pages_alloc
+ */
+#define KBASE_PHY_PAGES_FLAG_DEFAULT (0) /** Default allocation flag */
+#define KBASE_PHY_PAGES_FLAG_CLEAR (1 << 0) /** Clear the pages after allocation */
+#define KBASE_PHY_PAGES_FLAG_POISON (1 << 1) /** Fill the memory with a poison value */
+
+#define KBASE_PHY_PAGES_SUPPORTED_FLAGS (KBASE_PHY_PAGES_FLAG_DEFAULT|KBASE_PHY_PAGES_FLAG_CLEAR|KBASE_PHY_PAGES_FLAG_POISON)
+
+#define KBASE_PHY_PAGES_POISON_VALUE 0xFD /** Value to fill the memory with when KBASE_PHY_PAGES_FLAG_POISON is set */
+
+enum kbase_sync_type {
+ KBASE_SYNC_TO_CPU,
+ KBASE_SYNC_TO_DEVICE
+};
+
+struct tagged_addr { phys_addr_t tagged_addr; };
+
+#define HUGE_PAGE (1u << 0)
+#define HUGE_HEAD (1u << 1)
+#define FROM_PARTIAL (1u << 2)
+
+static inline phys_addr_t as_phys_addr_t(struct tagged_addr t)
+{
+ return t.tagged_addr & PAGE_MASK;
+}
+
+static inline struct tagged_addr as_tagged(phys_addr_t phys)
+{
+ struct tagged_addr t;
+
+ t.tagged_addr = phys & PAGE_MASK;
+ return t;
+}
+
+static inline struct tagged_addr as_tagged_tag(phys_addr_t phys, int tag)
+{
+ struct tagged_addr t;
+
+ t.tagged_addr = (phys & PAGE_MASK) | (tag & ~PAGE_MASK);
+ return t;
+}
+
+static inline bool is_huge(struct tagged_addr t)
+{
+ return t.tagged_addr & HUGE_PAGE;
+}
+
+static inline bool is_huge_head(struct tagged_addr t)
+{
+ int mask = HUGE_HEAD | HUGE_PAGE;
+
+ return mask == (t.tagged_addr & mask);
+}
+
+static inline bool is_partial(struct tagged_addr t)
+{
+ return t.tagged_addr & FROM_PARTIAL;
+}
+
+#endif /* _KBASE_LOWLEVEL_H */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mem_pool.c b/drivers/gpu/arm_gpu/mali_kbase_mem_pool.c
new file mode 100644
index 000000000000..696730ac5b2b
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mem_pool.c
@@ -0,0 +1,651 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/spinlock.h>
+#include <linux/shrinker.h>
+#include <linux/atomic.h>
+#include <linux/version.h>
+
+#define pool_dbg(pool, format, ...) \
+ dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
+ (pool->next_pool) ? "kctx" : "kbdev", \
+ kbase_mem_pool_size(pool), \
+ kbase_mem_pool_max_size(pool), \
+ ##__VA_ARGS__)
+
+#define NOT_DIRTY false
+#define NOT_RECLAIMED false
+
+static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
+{
+ spin_lock(&pool->pool_lock);
+}
+
+static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
+{
+ spin_unlock(&pool->pool_lock);
+}
+
+static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
+{
+ ssize_t max_size = kbase_mem_pool_max_size(pool);
+ ssize_t cur_size = kbase_mem_pool_size(pool);
+
+ return max(max_size - cur_size, (ssize_t)0);
+}
+
+static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool)
+{
+ return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool);
+}
+
+static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
+{
+ return kbase_mem_pool_size(pool) == 0;
+}
+
+static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
+ struct page *p)
+{
+ lockdep_assert_held(&pool->pool_lock);
+
+ list_add(&p->lru, &pool->page_list);
+ pool->cur_size++;
+
+ zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
+
+ pool_dbg(pool, "added page\n");
+}
+
+static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p)
+{
+ kbase_mem_pool_lock(pool);
+ kbase_mem_pool_add_locked(pool, p);
+ kbase_mem_pool_unlock(pool);
+}
+
+static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
+ struct list_head *page_list, size_t nr_pages)
+{
+ struct page *p;
+
+ lockdep_assert_held(&pool->pool_lock);
+
+ list_for_each_entry(p, page_list, lru) {
+ zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
+ }
+
+ list_splice(page_list, &pool->page_list);
+ pool->cur_size += nr_pages;
+
+ pool_dbg(pool, "added %zu pages\n", nr_pages);
+}
+
+static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
+ struct list_head *page_list, size_t nr_pages)
+{
+ kbase_mem_pool_lock(pool);
+ kbase_mem_pool_add_list_locked(pool, page_list, nr_pages);
+ kbase_mem_pool_unlock(pool);
+}
+
+static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
+{
+ struct page *p;
+
+ lockdep_assert_held(&pool->pool_lock);
+
+ if (kbase_mem_pool_is_empty(pool))
+ return NULL;
+
+ p = list_first_entry(&pool->page_list, struct page, lru);
+ list_del_init(&p->lru);
+ pool->cur_size--;
+
+ zone_page_state_add(-1, page_zone(p), NR_SLAB_RECLAIMABLE);
+
+ pool_dbg(pool, "removed page\n");
+
+ return p;
+}
+
+static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
+{
+ struct page *p;
+
+ kbase_mem_pool_lock(pool);
+ p = kbase_mem_pool_remove_locked(pool);
+ kbase_mem_pool_unlock(pool);
+
+ return p;
+}
+
+static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
+ struct page *p)
+{
+ struct device *dev = pool->kbdev->dev;
+ dma_sync_single_for_device(dev, kbase_dma_addr(p),
+ (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
+}
+
+static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
+ struct page *p)
+{
+ int i;
+
+ for (i = 0; i < (1U << pool->order); i++)
+ clear_highpage(p+i);
+
+ kbase_mem_pool_sync_page(pool, p);
+}
+
+static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
+ struct page *p)
+{
+ /* Zero page before spilling */
+ kbase_mem_pool_zero_page(next_pool, p);
+
+ kbase_mem_pool_add(next_pool, p);
+}
+
+struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
+{
+ struct page *p;
+ gfp_t gfp;
+ struct device *dev = pool->kbdev->dev;
+ dma_addr_t dma_addr;
+ int i;
+
+#if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+ /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
+ gfp = GFP_USER | __GFP_ZERO;
+#else
+ gfp = GFP_HIGHUSER | __GFP_ZERO;
+#endif
+
+ if (current->flags & PF_KTHREAD) {
+ /* Don't trigger OOM killer from kernel threads, e.g. when
+ * growing memory on GPU page fault */
+ gfp |= __GFP_NORETRY;
+ }
+
+ /* don't warn on higer order failures */
+ if (pool->order)
+ gfp |= __GFP_NOWARN;
+
+ p = alloc_pages(gfp, pool->order);
+ if (!p)
+ return NULL;
+
+ dma_addr = dma_map_page(dev, p, 0, (PAGE_SIZE << pool->order),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
+ __free_pages(p, pool->order);
+ return NULL;
+ }
+
+ WARN_ON(dma_addr != page_to_phys(p));
+ for (i = 0; i < (1u << pool->order); i++)
+ kbase_set_dma_addr(p+i, dma_addr + PAGE_SIZE * i);
+
+ return p;
+}
+
+static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
+ struct page *p)
+{
+ struct device *dev = pool->kbdev->dev;
+ dma_addr_t dma_addr = kbase_dma_addr(p);
+ int i;
+
+ dma_unmap_page(dev, dma_addr, (PAGE_SIZE << pool->order),
+ DMA_BIDIRECTIONAL);
+ for (i = 0; i < (1u << pool->order); i++)
+ kbase_clear_dma_addr(p+i);
+ __free_pages(p, pool->order);
+
+ pool_dbg(pool, "freed page to kernel\n");
+}
+
+static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
+ size_t nr_to_shrink)
+{
+ struct page *p;
+ size_t i;
+
+ lockdep_assert_held(&pool->pool_lock);
+
+ for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
+ p = kbase_mem_pool_remove_locked(pool);
+ kbase_mem_pool_free_page(pool, p);
+ }
+
+ return i;
+}
+
+static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
+ size_t nr_to_shrink)
+{
+ size_t nr_freed;
+
+ kbase_mem_pool_lock(pool);
+ nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
+ kbase_mem_pool_unlock(pool);
+
+ return nr_freed;
+}
+
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
+ size_t nr_to_grow)
+{
+ struct page *p;
+ size_t i;
+
+ for (i = 0; i < nr_to_grow; i++) {
+ p = kbase_mem_alloc_page(pool);
+ if (!p)
+ return -ENOMEM;
+ kbase_mem_pool_add(pool, p);
+ }
+
+ return 0;
+}
+
+void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
+{
+ size_t cur_size;
+ int err = 0;
+
+ cur_size = kbase_mem_pool_size(pool);
+
+ if (new_size > pool->max_size)
+ new_size = pool->max_size;
+
+ if (new_size < cur_size)
+ kbase_mem_pool_shrink(pool, cur_size - new_size);
+ else if (new_size > cur_size)
+ err = kbase_mem_pool_grow(pool, new_size - cur_size);
+
+ if (err) {
+ size_t grown_size = kbase_mem_pool_size(pool);
+
+ dev_warn(pool->kbdev->dev,
+ "Mem pool not grown to the required size of %zu bytes, grown for additional %zu bytes instead!\n",
+ (new_size - cur_size), (grown_size - cur_size));
+ }
+}
+
+void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size)
+{
+ size_t cur_size;
+ size_t nr_to_shrink;
+
+ kbase_mem_pool_lock(pool);
+
+ pool->max_size = max_size;
+
+ cur_size = kbase_mem_pool_size(pool);
+ if (max_size < cur_size) {
+ nr_to_shrink = cur_size - max_size;
+ kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
+ }
+
+ kbase_mem_pool_unlock(pool);
+}
+
+
+static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ struct kbase_mem_pool *pool;
+
+ pool = container_of(s, struct kbase_mem_pool, reclaim);
+ pool_dbg(pool, "reclaim count: %zu\n", kbase_mem_pool_size(pool));
+ return kbase_mem_pool_size(pool);
+}
+
+static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ struct kbase_mem_pool *pool;
+ unsigned long freed;
+
+ pool = container_of(s, struct kbase_mem_pool, reclaim);
+
+ pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
+
+ freed = kbase_mem_pool_shrink(pool, sc->nr_to_scan);
+
+ pool_dbg(pool, "reclaim freed %ld pages\n", freed);
+
+ return freed;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int kbase_mem_pool_reclaim_shrink(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ if (sc->nr_to_scan == 0)
+ return kbase_mem_pool_reclaim_count_objects(s, sc);
+
+ return kbase_mem_pool_reclaim_scan_objects(s, sc);
+}
+#endif
+
+int kbase_mem_pool_init(struct kbase_mem_pool *pool,
+ size_t max_size,
+ size_t order,
+ struct kbase_device *kbdev,
+ struct kbase_mem_pool *next_pool)
+{
+ pool->cur_size = 0;
+ pool->max_size = max_size;
+ pool->order = order;
+ pool->kbdev = kbdev;
+ pool->next_pool = next_pool;
+
+ spin_lock_init(&pool->pool_lock);
+ INIT_LIST_HEAD(&pool->page_list);
+
+ /* Register shrinker */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+ pool->reclaim.shrink = kbase_mem_pool_reclaim_shrink;
+#else
+ pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects;
+ pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects;
+#endif
+ pool->reclaim.seeks = DEFAULT_SEEKS;
+ /* Kernel versions prior to 3.1 :
+ * struct shrinker does not define batch */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
+ pool->reclaim.batch = 0;
+#endif
+ register_shrinker(&pool->reclaim);
+
+ pool_dbg(pool, "initialized\n");
+
+ return 0;
+}
+
+void kbase_mem_pool_term(struct kbase_mem_pool *pool)
+{
+ struct kbase_mem_pool *next_pool = pool->next_pool;
+ struct page *p;
+ size_t nr_to_spill = 0;
+ LIST_HEAD(spill_list);
+ int i;
+
+ pool_dbg(pool, "terminate()\n");
+
+ unregister_shrinker(&pool->reclaim);
+
+ kbase_mem_pool_lock(pool);
+ pool->max_size = 0;
+
+ if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
+ /* Spill to next pool (may overspill) */
+ nr_to_spill = kbase_mem_pool_capacity(next_pool);
+ nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill);
+
+ /* Zero pages first without holding the next_pool lock */
+ for (i = 0; i < nr_to_spill; i++) {
+ p = kbase_mem_pool_remove_locked(pool);
+ kbase_mem_pool_zero_page(pool, p);
+ list_add(&p->lru, &spill_list);
+ }
+ }
+
+ while (!kbase_mem_pool_is_empty(pool)) {
+ /* Free remaining pages to kernel */
+ p = kbase_mem_pool_remove_locked(pool);
+ kbase_mem_pool_free_page(pool, p);
+ }
+
+ kbase_mem_pool_unlock(pool);
+
+ if (next_pool && nr_to_spill) {
+ /* Add new page list to next_pool */
+ kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill);
+
+ pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill);
+ }
+
+ pool_dbg(pool, "terminated\n");
+}
+
+struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
+{
+ struct page *p;
+
+ do {
+ pool_dbg(pool, "alloc()\n");
+ p = kbase_mem_pool_remove(pool);
+
+ if (p)
+ return p;
+
+ pool = pool->next_pool;
+ } while (pool);
+
+ return NULL;
+}
+
+void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
+ bool dirty)
+{
+ struct kbase_mem_pool *next_pool = pool->next_pool;
+
+ pool_dbg(pool, "free()\n");
+
+ if (!kbase_mem_pool_is_full(pool)) {
+ /* Add to our own pool */
+ if (dirty)
+ kbase_mem_pool_sync_page(pool, p);
+
+ kbase_mem_pool_add(pool, p);
+ } else if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
+ /* Spill to next pool */
+ kbase_mem_pool_spill(next_pool, p);
+ } else {
+ /* Free page */
+ kbase_mem_pool_free_page(pool, p);
+ }
+}
+
+int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
+ struct tagged_addr *pages, bool partial_allowed)
+{
+ struct page *p;
+ size_t nr_from_pool;
+ size_t i = 0;
+ int err = -ENOMEM;
+ size_t nr_pages_internal;
+
+ nr_pages_internal = nr_4k_pages / (1u << (pool->order));
+
+ if (nr_pages_internal * (1u << pool->order) != nr_4k_pages)
+ return -EINVAL;
+
+ pool_dbg(pool, "alloc_pages(4k=%zu):\n", nr_4k_pages);
+ pool_dbg(pool, "alloc_pages(internal=%zu):\n", nr_pages_internal);
+
+ /* Get pages from this pool */
+ kbase_mem_pool_lock(pool);
+ nr_from_pool = min(nr_pages_internal, kbase_mem_pool_size(pool));
+ while (nr_from_pool--) {
+ int j;
+ p = kbase_mem_pool_remove_locked(pool);
+ if (pool->order) {
+ pages[i++] = as_tagged_tag(page_to_phys(p),
+ HUGE_HEAD | HUGE_PAGE);
+ for (j = 1; j < (1u << pool->order); j++)
+ pages[i++] = as_tagged_tag(page_to_phys(p) +
+ PAGE_SIZE * j,
+ HUGE_PAGE);
+ } else {
+ pages[i++] = as_tagged(page_to_phys(p));
+ }
+ }
+ kbase_mem_pool_unlock(pool);
+
+ if (i != nr_4k_pages && pool->next_pool) {
+ /* Allocate via next pool */
+ err = kbase_mem_pool_alloc_pages(pool->next_pool,
+ nr_4k_pages - i, pages + i, partial_allowed);
+
+ if (err < 0)
+ goto err_rollback;
+
+ i += err;
+ } else {
+ /* Get any remaining pages from kernel */
+ while (i != nr_4k_pages) {
+ p = kbase_mem_alloc_page(pool);
+ if (!p) {
+ if (partial_allowed)
+ goto done;
+ else
+ goto err_rollback;
+ }
+
+ if (pool->order) {
+ int j;
+
+ pages[i++] = as_tagged_tag(page_to_phys(p),
+ HUGE_PAGE |
+ HUGE_HEAD);
+ for (j = 1; j < (1u << pool->order); j++) {
+ phys_addr_t phys;
+
+ phys = page_to_phys(p) + PAGE_SIZE * j;
+ pages[i++] = as_tagged_tag(phys,
+ HUGE_PAGE);
+ }
+ } else {
+ pages[i++] = as_tagged(page_to_phys(p));
+ }
+ }
+ }
+
+done:
+ pool_dbg(pool, "alloc_pages(%zu) done\n", i);
+
+ return i;
+
+err_rollback:
+ kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED);
+ return err;
+}
+
+static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
+ size_t nr_pages, struct tagged_addr *pages,
+ bool zero, bool sync)
+{
+ struct page *p;
+ size_t nr_to_pool = 0;
+ LIST_HEAD(new_page_list);
+ size_t i;
+
+ if (!nr_pages)
+ return;
+
+ pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n",
+ nr_pages, zero, sync);
+
+ /* Zero/sync pages first without holding the pool lock */
+ for (i = 0; i < nr_pages; i++) {
+ if (unlikely(!as_phys_addr_t(pages[i])))
+ continue;
+
+ if (is_huge_head(pages[i]) || !is_huge(pages[i])) {
+ p = phys_to_page(as_phys_addr_t(pages[i]));
+ if (zero)
+ kbase_mem_pool_zero_page(pool, p);
+ else if (sync)
+ kbase_mem_pool_sync_page(pool, p);
+
+ list_add(&p->lru, &new_page_list);
+ nr_to_pool++;
+ }
+ pages[i] = as_tagged(0);
+ }
+
+ /* Add new page list to pool */
+ kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool);
+
+ pool_dbg(pool, "add_array(%zu) added %zu pages\n",
+ nr_pages, nr_to_pool);
+}
+
+void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
+ struct tagged_addr *pages, bool dirty, bool reclaimed)
+{
+ struct kbase_mem_pool *next_pool = pool->next_pool;
+ struct page *p;
+ size_t nr_to_pool;
+ LIST_HEAD(to_pool_list);
+ size_t i = 0;
+
+ pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
+
+ if (!reclaimed) {
+ /* Add to this pool */
+ nr_to_pool = kbase_mem_pool_capacity(pool);
+ nr_to_pool = min(nr_pages, nr_to_pool);
+
+ kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
+
+ i += nr_to_pool;
+
+ if (i != nr_pages && next_pool) {
+ /* Spill to next pool (may overspill) */
+ nr_to_pool = kbase_mem_pool_capacity(next_pool);
+ nr_to_pool = min(nr_pages - i, nr_to_pool);
+
+ kbase_mem_pool_add_array(next_pool, nr_to_pool,
+ pages + i, true, dirty);
+ i += nr_to_pool;
+ }
+ }
+
+ /* Free any remaining pages to kernel */
+ for (; i < nr_pages; i++) {
+ if (unlikely(!as_phys_addr_t(pages[i])))
+ continue;
+
+ if (is_huge(pages[i]) && !is_huge_head(pages[i])) {
+ pages[i] = as_tagged(0);
+ continue;
+ }
+
+ p = phys_to_page(as_phys_addr_t(pages[i]));
+
+ if (reclaimed)
+ zone_page_state_add(-1, page_zone(p),
+ NR_SLAB_RECLAIMABLE);
+
+ kbase_mem_pool_free_page(pool, p);
+ pages[i] = as_tagged(0);
+ }
+
+ pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mem_pool_debugfs.c b/drivers/gpu/arm_gpu/mali_kbase_mem_pool_debugfs.c
new file mode 100644
index 000000000000..585fba036c9e
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mem_pool_debugfs.c
@@ -0,0 +1,81 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <mali_kbase_mem_pool_debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+static int kbase_mem_pool_debugfs_size_get(void *data, u64 *val)
+{
+ struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data;
+
+ *val = kbase_mem_pool_size(pool);
+
+ return 0;
+}
+
+static int kbase_mem_pool_debugfs_size_set(void *data, u64 val)
+{
+ struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data;
+
+ kbase_mem_pool_trim(pool, val);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kbase_mem_pool_debugfs_size_fops,
+ kbase_mem_pool_debugfs_size_get,
+ kbase_mem_pool_debugfs_size_set,
+ "%llu\n");
+
+static int kbase_mem_pool_debugfs_max_size_get(void *data, u64 *val)
+{
+ struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data;
+
+ *val = kbase_mem_pool_max_size(pool);
+
+ return 0;
+}
+
+static int kbase_mem_pool_debugfs_max_size_set(void *data, u64 val)
+{
+ struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data;
+
+ kbase_mem_pool_set_max_size(pool, val);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kbase_mem_pool_debugfs_max_size_fops,
+ kbase_mem_pool_debugfs_max_size_get,
+ kbase_mem_pool_debugfs_max_size_set,
+ "%llu\n");
+
+void kbase_mem_pool_debugfs_init(struct dentry *parent,
+ struct kbase_mem_pool *pool)
+{
+ debugfs_create_file("mem_pool_size", S_IRUGO | S_IWUSR, parent,
+ pool, &kbase_mem_pool_debugfs_size_fops);
+
+ debugfs_create_file("mem_pool_max_size", S_IRUGO | S_IWUSR, parent,
+ pool, &kbase_mem_pool_debugfs_max_size_fops);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mem_pool_debugfs.h b/drivers/gpu/arm_gpu/mali_kbase_mem_pool_debugfs.h
new file mode 100644
index 000000000000..1442854e8956
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mem_pool_debugfs.h
@@ -0,0 +1,36 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_MEM_POOL_DEBUGFS_H
+#define _KBASE_MEM_POOL_DEBUGFS_H
+
+#include <mali_kbase.h>
+
+/**
+ * kbase_mem_pool_debugfs_init - add debugfs knobs for @pool
+ * @parent: Parent debugfs dentry
+ * @pool: Memory pool to control
+ *
+ * Adds two debugfs files under @parent:
+ * - mem_pool_size: get/set the current size of @pool
+ * - mem_pool_max_size: get/set the max size of @pool
+ */
+void kbase_mem_pool_debugfs_init(struct dentry *parent,
+ struct kbase_mem_pool *pool);
+
+#endif /*_KBASE_MEM_POOL_DEBUGFS_H*/
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mem_profile_debugfs.c b/drivers/gpu/arm_gpu/mali_kbase_mem_profile_debugfs.c
new file mode 100644
index 000000000000..d58fd8d62fde
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mem_profile_debugfs.c
@@ -0,0 +1,121 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+/** Show callback for the @c mem_profile debugfs file.
+ *
+ * This function is called to get the contents of the @c mem_profile debugfs
+ * file. This is a report of current memory usage and distribution in userspace.
+ *
+ * @param sfile The debugfs entry
+ * @param data Data associated with the entry
+ *
+ * @return 0 if it successfully prints data in debugfs entry file, non-zero otherwise
+ */
+static int kbasep_mem_profile_seq_show(struct seq_file *sfile, void *data)
+{
+ struct kbase_context *kctx = sfile->private;
+
+ mutex_lock(&kctx->mem_profile_lock);
+
+ seq_write(sfile, kctx->mem_profile_data, kctx->mem_profile_size);
+
+ seq_putc(sfile, '\n');
+
+ mutex_unlock(&kctx->mem_profile_lock);
+
+ return 0;
+}
+
+/*
+ * File operations related to debugfs entry for mem_profile
+ */
+static int kbasep_mem_profile_debugfs_open(struct inode *in, struct file *file)
+{
+ return single_open(file, kbasep_mem_profile_seq_show, in->i_private);
+}
+
+static const struct file_operations kbasep_mem_profile_debugfs_fops = {
+ .open = kbasep_mem_profile_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+ size_t size)
+{
+ int err = 0;
+
+ mutex_lock(&kctx->mem_profile_lock);
+
+ dev_dbg(kctx->kbdev->dev, "initialised: %d",
+ kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED));
+
+ if (!kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) {
+ if (!debugfs_create_file("mem_profile", S_IRUGO,
+ kctx->kctx_dentry, kctx,
+ &kbasep_mem_profile_debugfs_fops)) {
+ err = -EAGAIN;
+ } else {
+ kbase_ctx_flag_set(kctx,
+ KCTX_MEM_PROFILE_INITIALIZED);
+ }
+ }
+
+ if (kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) {
+ kfree(kctx->mem_profile_data);
+ kctx->mem_profile_data = data;
+ kctx->mem_profile_size = size;
+ } else {
+ kfree(data);
+ }
+
+ dev_dbg(kctx->kbdev->dev, "returning: %d, initialised: %d",
+ err, kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED));
+
+ mutex_unlock(&kctx->mem_profile_lock);
+
+ return err;
+}
+
+void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx)
+{
+ mutex_lock(&kctx->mem_profile_lock);
+
+ dev_dbg(kctx->kbdev->dev, "initialised: %d",
+ kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED));
+
+ kfree(kctx->mem_profile_data);
+ kctx->mem_profile_data = NULL;
+ kctx->mem_profile_size = 0;
+
+ mutex_unlock(&kctx->mem_profile_lock);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+ size_t size)
+{
+ kfree(data);
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mem_profile_debugfs.h b/drivers/gpu/arm_gpu/mali_kbase_mem_profile_debugfs.h
new file mode 100644
index 000000000000..a1dc2e0b165b
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mem_profile_debugfs.h
@@ -0,0 +1,59 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_mem_profile_debugfs.h
+ * Header file for mem profiles entries in debugfs
+ *
+ */
+
+#ifndef _KBASE_MEM_PROFILE_DEBUGFS_H
+#define _KBASE_MEM_PROFILE_DEBUGFS_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+/**
+ * @brief Remove entry from Mali memory profile debugfs
+ */
+void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx);
+
+/**
+ * @brief Insert @p data to the debugfs file so it can be read by userspace
+ *
+ * The function takes ownership of @p data and frees it later when new data
+ * is inserted.
+ *
+ * If the debugfs entry corresponding to the @p kctx doesn't exist,
+ * an attempt will be made to create it.
+ *
+ * @param kctx The context whose debugfs file @p data should be inserted to
+ * @param data A NULL-terminated string to be inserted to the debugfs file,
+ * without the trailing new line character
+ * @param size The length of the @p data string
+ * @return 0 if @p data inserted correctly
+ * -EAGAIN in case of error
+ * @post @ref mem_profile_initialized will be set to @c true
+ * the first time this function succeeds.
+ */
+int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+ size_t size);
+
+#endif /*_KBASE_MEM_PROFILE_DEBUGFS_H*/
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mem_profile_debugfs_buf_size.h b/drivers/gpu/arm_gpu/mali_kbase_mem_profile_debugfs_buf_size.h
new file mode 100644
index 000000000000..82f0702974c2
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mem_profile_debugfs_buf_size.h
@@ -0,0 +1,33 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem_profile_debugfs_buf_size.h
+ * Header file for the size of the buffer to accumulate the histogram report text in
+ */
+
+#ifndef _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_
+#define _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_
+
+/**
+ * The size of the buffer to accumulate the histogram report text in
+ * @see @ref CCTXP_HIST_BUF_SIZE_MAX_LENGTH_REPORT
+ */
+#define KBASE_MEM_PROFILE_MAX_BUF_SIZE ((size_t) (64 + ((80 + (56 * 64)) * 15) + 56))
+
+#endif /*_KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_*/
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mmu.c b/drivers/gpu/arm_gpu/mali_kbase_mmu.c
new file mode 100644
index 000000000000..b3aa9e032aa8
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mmu.c
@@ -0,0 +1,2141 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_mmu.c
+ * Base kernel MMU management.
+ */
+
+/* #define DEBUG 1 */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include <mali_kbase_gator.h>
+#endif
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_instr_defs.h>
+#include <mali_kbase_debug.h>
+
+#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
+
+#include <mali_kbase_defs.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_mmu_hw.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_time.h>
+#include <mali_kbase_mem.h>
+
+#define KBASE_MMU_PAGE_ENTRIES 512
+
+/**
+ * kbase_mmu_flush_invalidate() - Flush and invalidate the GPU caches.
+ * @kctx: The KBase context.
+ * @vpfn: The virtual page frame number to start the flush on.
+ * @nr: The number of pages to flush.
+ * @sync: Set if the operation should be synchronous or not.
+ *
+ * Issue a cache flush + invalidate to the GPU caches and invalidate the TLBs.
+ *
+ * If sync is not set then transactions still in flight when the flush is issued
+ * may use the old page tables and the data they write will not be written out
+ * to memory, this function returns after the flush has been issued but
+ * before all accesses which might effect the flushed region have completed.
+ *
+ * If sync is set then accesses in the flushed region will be drained
+ * before data is flush and invalidated through L1, L2 and into memory,
+ * after which point this function will return.
+ */
+static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
+ u64 vpfn, size_t nr, bool sync);
+
+/**
+ * kbase_mmu_sync_pgd - sync page directory to memory
+ * @kbdev: Device pointer.
+ * @handle: Address of DMA region.
+ * @size: Size of the region to sync.
+ *
+ * This should be called after each page directory update.
+ */
+
+static void kbase_mmu_sync_pgd(struct kbase_device *kbdev,
+ dma_addr_t handle, size_t size)
+{
+ /* If page table is not coherent then ensure the gpu can read
+ * the pages from memory
+ */
+ if (kbdev->system_coherency != COHERENCY_ACE)
+ dma_sync_single_for_device(kbdev->dev, handle, size,
+ DMA_TO_DEVICE);
+}
+
+/*
+ * Definitions:
+ * - PGD: Page Directory.
+ * - PTE: Page Table Entry. A 64bit value pointing to the next
+ * level of translation
+ * - ATE: Address Transation Entry. A 64bit value pointing to
+ * a 4kB physical page.
+ */
+
+static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
+ struct kbase_as *as, const char *reason_str);
+
+
+static size_t make_multiple(size_t minimum, size_t multiple)
+{
+ size_t remainder = minimum % multiple;
+
+ if (remainder == 0)
+ return minimum;
+
+ return minimum + multiple - remainder;
+}
+
+void page_fault_worker(struct work_struct *data)
+{
+ u64 fault_pfn;
+ u32 fault_status;
+ size_t new_pages;
+ size_t fault_rel_pfn;
+ struct kbase_as *faulting_as;
+ int as_no;
+ struct kbase_context *kctx;
+ struct kbase_device *kbdev;
+ struct kbase_va_region *region;
+ int err;
+ bool grown = false;
+
+ faulting_as = container_of(data, struct kbase_as, work_pagefault);
+ fault_pfn = faulting_as->fault_addr >> PAGE_SHIFT;
+ as_no = faulting_as->number;
+
+ kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
+
+ /* Grab the context that was already refcounted in kbase_mmu_interrupt().
+ * Therefore, it cannot be scheduled out of this AS until we explicitly release it
+ */
+ kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
+ if (WARN_ON(!kctx)) {
+ atomic_dec(&kbdev->faults_pending);
+ return;
+ }
+
+ KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev);
+
+ if (unlikely(faulting_as->protected_mode))
+ {
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Protected mode fault");
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+
+ goto fault_done;
+ }
+
+ fault_status = faulting_as->fault_status;
+ switch (fault_status & AS_FAULTSTATUS_EXCEPTION_CODE_MASK) {
+
+ case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT:
+ /* need to check against the region to handle this one */
+ break;
+
+ case AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT:
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Permission failure");
+ goto fault_done;
+
+ case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT:
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Translation table bus fault");
+ goto fault_done;
+
+ case AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG:
+ /* nothing to do, but we don't expect this fault currently */
+ dev_warn(kbdev->dev, "Access flag unexpectedly set");
+ goto fault_done;
+
+ case AS_FAULTSTATUS_EXCEPTION_CODE_ADDRESS_SIZE_FAULT:
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Address size fault");
+ else
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Unknown fault code");
+ goto fault_done;
+
+ case AS_FAULTSTATUS_EXCEPTION_CODE_MEMORY_ATTRIBUTES_FAULT:
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Memory attributes fault");
+ else
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Unknown fault code");
+ goto fault_done;
+
+ default:
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Unknown fault code");
+ goto fault_done;
+ }
+
+ /* so we have a translation fault, let's see if it is for growable
+ * memory */
+ kbase_gpu_vm_lock(kctx);
+
+ region = kbase_region_tracker_find_region_enclosing_address(kctx,
+ faulting_as->fault_addr);
+ if (!region || region->flags & KBASE_REG_FREE) {
+ kbase_gpu_vm_unlock(kctx);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Memory is not mapped on the GPU");
+ goto fault_done;
+ }
+
+ if (region->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+ kbase_gpu_vm_unlock(kctx);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "DMA-BUF is not mapped on the GPU");
+ goto fault_done;
+ }
+
+ if ((region->flags & GROWABLE_FLAGS_REQUIRED)
+ != GROWABLE_FLAGS_REQUIRED) {
+ kbase_gpu_vm_unlock(kctx);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Memory is not growable");
+ goto fault_done;
+ }
+
+ if ((region->flags & KBASE_REG_DONT_NEED)) {
+ kbase_gpu_vm_unlock(kctx);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Don't need memory can't be grown");
+ goto fault_done;
+ }
+
+ /* find the size we need to grow it by */
+ /* we know the result fit in a size_t due to kbase_region_tracker_find_region_enclosing_address
+ * validating the fault_adress to be within a size_t from the start_pfn */
+ fault_rel_pfn = fault_pfn - region->start_pfn;
+
+ if (fault_rel_pfn < kbase_reg_current_backed_size(region)) {
+ dev_dbg(kbdev->dev, "Page fault @ 0x%llx in allocated region 0x%llx-0x%llx of growable TMEM: Ignoring",
+ faulting_as->fault_addr, region->start_pfn,
+ region->start_pfn +
+ kbase_reg_current_backed_size(region));
+
+ mutex_lock(&kbdev->mmu_hw_mutex);
+
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+ /* [1] in case another page fault occurred while we were
+ * handling the (duplicate) page fault we need to ensure we
+ * don't loose the other page fault as result of us clearing
+ * the MMU IRQ. Therefore, after we clear the MMU IRQ we send
+ * an UNLOCK command that will retry any stalled memory
+ * transaction (which should cause the other page fault to be
+ * raised again).
+ */
+ kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
+ AS_COMMAND_UNLOCK, 1);
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_gpu_vm_unlock(kctx);
+
+ goto fault_done;
+ }
+
+ new_pages = make_multiple(fault_rel_pfn -
+ kbase_reg_current_backed_size(region) + 1,
+ region->extent);
+
+ /* cap to max vsize */
+ if (new_pages + kbase_reg_current_backed_size(region) >
+ region->nr_pages)
+ new_pages = region->nr_pages -
+ kbase_reg_current_backed_size(region);
+
+ if (0 == new_pages) {
+ mutex_lock(&kbdev->mmu_hw_mutex);
+
+ /* Duplicate of a fault we've already handled, nothing to do */
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+ /* See comment [1] about UNLOCK usage */
+ kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
+ AS_COMMAND_UNLOCK, 1);
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_gpu_vm_unlock(kctx);
+ goto fault_done;
+ }
+
+ if (kbase_alloc_phy_pages_helper(region->gpu_alloc, new_pages) == 0) {
+ if (region->gpu_alloc != region->cpu_alloc) {
+ if (kbase_alloc_phy_pages_helper(
+ region->cpu_alloc, new_pages) == 0) {
+ grown = true;
+ } else {
+ kbase_free_phy_pages_helper(region->gpu_alloc,
+ new_pages);
+ }
+ } else {
+ grown = true;
+ }
+ }
+
+
+ if (grown) {
+ u64 pfn_offset;
+ u32 op;
+
+ /* alloc success */
+ KBASE_DEBUG_ASSERT(kbase_reg_current_backed_size(region) <= region->nr_pages);
+
+ /* set up the new pages */
+ pfn_offset = kbase_reg_current_backed_size(region) - new_pages;
+ /*
+ * Note:
+ * Issuing an MMU operation will unlock the MMU and cause the
+ * translation to be replayed. If the page insertion fails then
+ * rather then trying to continue the context should be killed
+ * so the no_flush version of insert_pages is used which allows
+ * us to unlock the MMU as we see fit.
+ */
+ err = kbase_mmu_insert_pages_no_flush(kctx,
+ region->start_pfn + pfn_offset,
+ &kbase_get_gpu_phy_pages(region)[pfn_offset],
+ new_pages, region->flags);
+ if (err) {
+ kbase_free_phy_pages_helper(region->gpu_alloc, new_pages);
+ if (region->gpu_alloc != region->cpu_alloc)
+ kbase_free_phy_pages_helper(region->cpu_alloc,
+ new_pages);
+ kbase_gpu_vm_unlock(kctx);
+ /* The locked VA region will be unlocked and the cache invalidated in here */
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Page table update failure");
+ goto fault_done;
+ }
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_page_fault_insert_pages(as_no, new_pages);
+#endif
+ KBASE_TLSTREAM_AUX_PAGEFAULT(kctx->id, (u64)new_pages);
+
+ /* AS transaction begin */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+
+ /* flush L2 and unlock the VA (resumes the MMU) */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
+ op = AS_COMMAND_FLUSH;
+ else
+ op = AS_COMMAND_FLUSH_PT;
+
+ /* clear MMU interrupt - this needs to be done after updating
+ * the page tables but before issuing a FLUSH command. The
+ * FLUSH cmd has a side effect that it restarts stalled memory
+ * transactions in other address spaces which may cause
+ * another fault to occur. If we didn't clear the interrupt at
+ * this stage a new IRQ might not be raised when the GPU finds
+ * a MMU IRQ is already pending.
+ */
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+
+ kbase_mmu_hw_do_operation(kbdev, faulting_as, kctx,
+ faulting_as->fault_addr >> PAGE_SHIFT,
+ new_pages,
+ op, 1);
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ /* AS transaction end */
+
+ /* reenable this in the mask */
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_gpu_vm_unlock(kctx);
+ } else {
+ /* failed to extend, handle as a normal PF */
+ kbase_gpu_vm_unlock(kctx);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Page allocation failure");
+ }
+
+fault_done:
+ /*
+ * By this point, the fault was handled in some way,
+ * so release the ctx refcount
+ */
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+
+ atomic_dec(&kbdev->faults_pending);
+}
+
+phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx)
+{
+ u64 *page;
+ int i;
+ struct page *p;
+ int new_page_count __maybe_unused;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ new_page_count = kbase_atomic_add_pages(1, &kctx->used_pages);
+ kbase_atomic_add_pages(1, &kctx->kbdev->memdev.used_pages);
+
+ p = kbase_mem_pool_alloc(&kctx->mem_pool);
+ if (!p)
+ goto sub_pages;
+
+ KBASE_TLSTREAM_AUX_PAGESALLOC(
+ (u32)kctx->id,
+ (u64)new_page_count);
+
+ page = kmap(p);
+ if (NULL == page)
+ goto alloc_free;
+
+ kbase_process_page_usage_inc(kctx, 1);
+
+ for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++)
+ kctx->kbdev->mmu_mode->entry_invalidate(&page[i]);
+
+ kbase_mmu_sync_pgd(kctx->kbdev, kbase_dma_addr(p), PAGE_SIZE);
+
+ kunmap(p);
+ return page_to_phys(p);
+
+alloc_free:
+ kbase_mem_pool_free(&kctx->mem_pool, p, false);
+sub_pages:
+ kbase_atomic_sub_pages(1, &kctx->used_pages);
+ kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
+
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_alloc_pgd);
+
+/* Given PGD PFN for level N, return PGD PFN for level N+1, allocating the
+ * new table from the pool if needed and possible
+ */
+static int mmu_get_next_pgd(struct kbase_context *kctx,
+ phys_addr_t *pgd, u64 vpfn, int level)
+{
+ u64 *page;
+ phys_addr_t target_pgd;
+ struct page *p;
+
+ KBASE_DEBUG_ASSERT(*pgd);
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ lockdep_assert_held(&kctx->mmu_lock);
+
+ /*
+ * Architecture spec defines level-0 as being the top-most.
+ * This is a bit unfortunate here, but we keep the same convention.
+ */
+ vpfn >>= (3 - level) * 9;
+ vpfn &= 0x1FF;
+
+ p = pfn_to_page(PFN_DOWN(*pgd));
+ page = kmap(p);
+ if (NULL == page) {
+ dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kmap failure\n");
+ return -EINVAL;
+ }
+
+ target_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]);
+
+ if (!target_pgd) {
+ target_pgd = kbase_mmu_alloc_pgd(kctx);
+ if (!target_pgd) {
+ dev_dbg(kctx->kbdev->dev, "mmu_get_next_pgd: kbase_mmu_alloc_pgd failure\n");
+ kunmap(p);
+ return -ENOMEM;
+ }
+
+ kctx->kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pgd);
+
+ kbase_mmu_sync_pgd(kctx->kbdev, kbase_dma_addr(p), PAGE_SIZE);
+ /* Rely on the caller to update the address space flags. */
+ }
+
+ kunmap(p);
+ *pgd = target_pgd;
+
+ return 0;
+}
+
+/*
+ * Returns the PGD for the specified level of translation
+ */
+static int mmu_get_pgd_at_level(struct kbase_context *kctx,
+ u64 vpfn,
+ unsigned int level,
+ phys_addr_t *out_pgd)
+{
+ phys_addr_t pgd;
+ int l;
+
+ lockdep_assert_held(&kctx->mmu_lock);
+ pgd = kctx->pgd;
+
+ for (l = MIDGARD_MMU_TOPLEVEL; l < level; l++) {
+ int err = mmu_get_next_pgd(kctx, &pgd, vpfn, l);
+ /* Handle failure condition */
+ if (err) {
+ dev_dbg(kctx->kbdev->dev,
+ "%s: mmu_get_next_pgd failure at level %d\n",
+ __func__, l);
+ return err;
+ }
+ }
+
+ *out_pgd = pgd;
+
+ return 0;
+}
+
+#define mmu_get_bottom_pgd(kctx, vpfn, out_pgd) \
+ mmu_get_pgd_at_level((kctx), (vpfn), MIDGARD_MMU_BOTTOMLEVEL, (out_pgd))
+
+
+static void mmu_insert_pages_failure_recovery(struct kbase_context *kctx,
+ u64 from_vpfn, u64 to_vpfn)
+{
+ phys_addr_t pgd;
+ u64 vpfn = from_vpfn;
+ struct kbase_mmu_mode const *mmu_mode;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(0 != vpfn);
+ /* 64-bit address range is the max */
+ KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+ KBASE_DEBUG_ASSERT(from_vpfn <= to_vpfn);
+
+ lockdep_assert_held(&kctx->mmu_lock);
+ lockdep_assert_held(&kctx->reg_lock);
+
+ mmu_mode = kctx->kbdev->mmu_mode;
+
+ while (vpfn < to_vpfn) {
+ unsigned int i;
+ unsigned int idx = vpfn & 0x1FF;
+ unsigned int count = KBASE_MMU_PAGE_ENTRIES - idx;
+ unsigned int pcount = 0;
+ unsigned int left = to_vpfn - vpfn;
+ unsigned int level;
+ u64 *page;
+
+ if (count > left)
+ count = left;
+
+ /* need to check if this is a 2MB page or a 4kB */
+ pgd = kctx->pgd;
+
+ for (level = MIDGARD_MMU_TOPLEVEL;
+ level <= MIDGARD_MMU_BOTTOMLEVEL; level++) {
+ idx = (vpfn >> ((3 - level) * 9)) & 0x1FF;
+ page = kmap(phys_to_page(pgd));
+ if (mmu_mode->ate_is_valid(page[idx], level))
+ break; /* keep the mapping */
+ kunmap(phys_to_page(pgd));
+ pgd = mmu_mode->pte_to_phy_addr(page[idx]);
+ }
+
+ switch (level) {
+ case MIDGARD_MMU_LEVEL(2):
+ /* remap to single entry to update */
+ pcount = 1;
+ break;
+ case MIDGARD_MMU_BOTTOMLEVEL:
+ /* page count is the same as the logical count */
+ pcount = count;
+ break;
+ default:
+ dev_warn(kctx->kbdev->dev, "%sNo support for ATEs at level %d\n",
+ __func__, level);
+ goto next;
+ }
+
+ /* Invalidate the entries we added */
+ for (i = 0; i < pcount; i++)
+ mmu_mode->entry_invalidate(&page[idx + i]);
+
+ kbase_mmu_sync_pgd(kctx->kbdev,
+ kbase_dma_addr(phys_to_page(pgd)) + 8 * idx,
+ 8 * pcount);
+ kunmap(phys_to_page(pgd));
+
+next:
+ vpfn += count;
+ }
+}
+
+/*
+ * Map the single page 'phys' 'nr' of times, starting at GPU PFN 'vpfn'
+ */
+int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
+ struct tagged_addr phys, size_t nr,
+ unsigned long flags)
+{
+ phys_addr_t pgd;
+ u64 *pgd_page;
+ /* In case the insert_single_page only partially completes we need to be
+ * able to recover */
+ bool recover_required = false;
+ u64 recover_vpfn = vpfn;
+ size_t recover_count = 0;
+ size_t remain = nr;
+ int err;
+ struct kbase_mmu_mode const *mmu_mode;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(0 != vpfn);
+ /* 64-bit address range is the max */
+ KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+
+ mmu_mode = kctx->kbdev->mmu_mode;
+
+ /* Early out if there is nothing to do */
+ if (nr == 0)
+ return 0;
+
+ mutex_lock(&kctx->mmu_lock);
+
+ while (remain) {
+ unsigned int i;
+ unsigned int index = vpfn & 0x1FF;
+ unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+ struct page *p;
+
+ if (count > remain)
+ count = remain;
+
+ /*
+ * Repeatedly calling mmu_get_bottom_pte() is clearly
+ * suboptimal. We don't have to re-parse the whole tree
+ * each time (just cache the l0-l2 sequence).
+ * On the other hand, it's only a gain when we map more than
+ * 256 pages at once (on average). Do we really care?
+ */
+ do {
+ err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
+ if (err != -ENOMEM)
+ break;
+ /* Fill the memory pool with enough pages for
+ * the page walk to succeed
+ */
+ mutex_unlock(&kctx->mmu_lock);
+ err = kbase_mem_pool_grow(&kctx->mem_pool,
+ MIDGARD_MMU_BOTTOMLEVEL);
+ mutex_lock(&kctx->mmu_lock);
+ } while (!err);
+ if (err) {
+ dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
+ if (recover_required) {
+ /* Invalidate the pages we have partially
+ * completed */
+ mmu_insert_pages_failure_recovery(kctx,
+ recover_vpfn,
+ recover_vpfn +
+ recover_count
+ );
+ }
+ goto fail_unlock;
+ }
+
+ p = pfn_to_page(PFN_DOWN(pgd));
+ pgd_page = kmap(p);
+ if (!pgd_page) {
+ dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: kmap failure\n");
+ if (recover_required) {
+ /* Invalidate the pages we have partially
+ * completed */
+ mmu_insert_pages_failure_recovery(kctx,
+ recover_vpfn,
+ recover_vpfn +
+ recover_count
+ );
+ }
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ for (i = 0; i < count; i++) {
+ unsigned int ofs = index + i;
+
+ /* Fail if the current page is a valid ATE entry */
+ KBASE_DEBUG_ASSERT(0 == (pgd_page[ofs] & 1UL));
+
+ mmu_mode->entry_set_ate(&pgd_page[ofs],
+ phys, flags,
+ MIDGARD_MMU_BOTTOMLEVEL);
+ }
+
+ vpfn += count;
+ remain -= count;
+
+ kbase_mmu_sync_pgd(kctx->kbdev,
+ kbase_dma_addr(p) + (index * sizeof(u64)),
+ count * sizeof(u64));
+
+ kunmap(p);
+ /* We have started modifying the page table.
+ * If further pages need inserting and fail we need to undo what
+ * has already taken place */
+ recover_required = true;
+ recover_count += count;
+ }
+ mutex_unlock(&kctx->mmu_lock);
+ kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&kctx->mmu_lock);
+ kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
+ return err;
+}
+
+static inline void cleanup_empty_pte(struct kbase_context *kctx, u64 *pte)
+{
+ phys_addr_t tmp_pgd;
+ struct page *tmp_p;
+
+ tmp_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(*pte);
+ tmp_p = phys_to_page(tmp_pgd);
+ kbase_mem_pool_free(&kctx->mem_pool, tmp_p, false);
+ kbase_process_page_usage_dec(kctx, 1);
+ kbase_atomic_sub_pages(1, &kctx->used_pages);
+ kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
+}
+
+int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx,
+ const u64 start_vpfn,
+ struct tagged_addr *phys, size_t nr,
+ unsigned long flags)
+{
+ phys_addr_t pgd;
+ u64 *pgd_page;
+ u64 insert_vpfn = start_vpfn;
+ size_t remain = nr;
+ int err;
+ struct kbase_mmu_mode const *mmu_mode;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(start_vpfn);
+ /* 64-bit address range is the max */
+ KBASE_DEBUG_ASSERT(start_vpfn <= (U64_MAX / PAGE_SIZE));
+
+ mmu_mode = kctx->kbdev->mmu_mode;
+
+ /* Early out if there is nothing to do */
+ if (nr == 0)
+ return 0;
+
+ mutex_lock(&kctx->mmu_lock);
+
+ while (remain) {
+ unsigned int i;
+ unsigned int vindex = insert_vpfn & 0x1FF;
+ unsigned int count = KBASE_MMU_PAGE_ENTRIES - vindex;
+ struct page *p;
+ unsigned int cur_level;
+
+ if (count > remain)
+ count = remain;
+
+ if (!vindex && is_huge_head(*phys))
+ cur_level = MIDGARD_MMU_LEVEL(2);
+ else
+ cur_level = MIDGARD_MMU_BOTTOMLEVEL;
+
+ /*
+ * Repeatedly calling mmu_get_pgd_at_level() is clearly
+ * suboptimal. We don't have to re-parse the whole tree
+ * each time (just cache the l0-l2 sequence).
+ * On the other hand, it's only a gain when we map more than
+ * 256 pages at once (on average). Do we really care?
+ */
+ do {
+ err = mmu_get_pgd_at_level(kctx, insert_vpfn, cur_level,
+ &pgd);
+ if (err != -ENOMEM)
+ break;
+ /* Fill the memory pool with enough pages for
+ * the page walk to succeed
+ */
+ mutex_unlock(&kctx->mmu_lock);
+ err = kbase_mem_pool_grow(&kctx->mem_pool,
+ cur_level);
+ mutex_lock(&kctx->mmu_lock);
+ } while (!err);
+
+ if (err) {
+ dev_warn(kctx->kbdev->dev,
+ "%s: mmu_get_bottom_pgd failure\n", __func__);
+ if (insert_vpfn != start_vpfn) {
+ /* Invalidate the pages we have partially
+ * completed */
+ mmu_insert_pages_failure_recovery(kctx,
+ start_vpfn,
+ insert_vpfn);
+ }
+ goto fail_unlock;
+ }
+
+ p = pfn_to_page(PFN_DOWN(pgd));
+ pgd_page = kmap(p);
+ if (!pgd_page) {
+ dev_warn(kctx->kbdev->dev, "%s: kmap failure\n",
+ __func__);
+ if (insert_vpfn != start_vpfn) {
+ /* Invalidate the pages we have partially
+ * completed */
+ mmu_insert_pages_failure_recovery(kctx,
+ start_vpfn,
+ insert_vpfn);
+ }
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ if (cur_level == MIDGARD_MMU_LEVEL(2)) {
+ unsigned int level_index = (insert_vpfn >> 9) & 0x1FF;
+ u64 *target = &pgd_page[level_index];
+
+ if (mmu_mode->pte_is_valid(*target))
+ cleanup_empty_pte(kctx, target);
+ mmu_mode->entry_set_ate(target, *phys, flags,
+ cur_level);
+ } else {
+ for (i = 0; i < count; i++) {
+ unsigned int ofs = vindex + i;
+ u64 *target = &pgd_page[ofs];
+
+ /* Fail if the current page is a valid ATE entry
+ */
+ KBASE_DEBUG_ASSERT(0 == (*target & 1UL));
+
+ kctx->kbdev->mmu_mode->entry_set_ate(target,
+ phys[i], flags, cur_level);
+ }
+ }
+
+ phys += count;
+ insert_vpfn += count;
+ remain -= count;
+
+ kbase_mmu_sync_pgd(kctx->kbdev,
+ kbase_dma_addr(p) + (vindex * sizeof(u64)),
+ count * sizeof(u64));
+
+ kunmap(p);
+ }
+
+ mutex_unlock(&kctx->mmu_lock);
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&kctx->mmu_lock);
+ return err;
+}
+
+/*
+ * Map 'nr' pages pointed to by 'phys' at GPU PFN 'vpfn'
+ */
+int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn,
+ struct tagged_addr *phys, size_t nr,
+ unsigned long flags)
+{
+ int err;
+
+ err = kbase_mmu_insert_pages_no_flush(kctx, vpfn, phys, nr, flags);
+ kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_insert_pages);
+
+/**
+ * kbase_mmu_flush_invalidate_noretain() - Flush and invalidate the GPU caches
+ * without retaining the kbase context.
+ * @kctx: The KBase context.
+ * @vpfn: The virtual page frame number to start the flush on.
+ * @nr: The number of pages to flush.
+ * @sync: Set if the operation should be synchronous or not.
+ *
+ * As per kbase_mmu_flush_invalidate but doesn't retain the kctx or do any
+ * other locking.
+ */
+static void kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx,
+ u64 vpfn, size_t nr, bool sync)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ int err;
+ u32 op;
+
+ /* Early out if there is nothing to do */
+ if (nr == 0)
+ return;
+
+ if (sync)
+ op = AS_COMMAND_FLUSH_MEM;
+ else
+ op = AS_COMMAND_FLUSH_PT;
+
+ err = kbase_mmu_hw_do_operation(kbdev,
+ &kbdev->as[kctx->as_nr],
+ kctx, vpfn, nr, op, 0);
+#if KBASE_GPU_RESET_EN
+ if (err) {
+ /* Flush failed to complete, assume the
+ * GPU has hung and perform a reset to
+ * recover */
+ dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issuing GPU soft-reset to recover\n");
+
+ if (kbase_prepare_to_reset_gpu_locked(kbdev))
+ kbase_reset_gpu_locked(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+
+#ifndef CONFIG_MALI_NO_MALI
+ /*
+ * As this function could be called in interrupt context the sync
+ * request can't block. Instead log the request and the next flush
+ * request will pick it up.
+ */
+ if ((!err) && sync &&
+ kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_6367))
+ atomic_set(&kctx->drain_pending, 1);
+#endif /* !CONFIG_MALI_NO_MALI */
+}
+
+static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
+ u64 vpfn, size_t nr, bool sync)
+{
+ struct kbase_device *kbdev;
+ bool ctx_is_in_runpool;
+#ifndef CONFIG_MALI_NO_MALI
+ bool drain_pending = false;
+
+ if (atomic_xchg(&kctx->drain_pending, 0))
+ drain_pending = true;
+#endif /* !CONFIG_MALI_NO_MALI */
+
+ /* Early out if there is nothing to do */
+ if (nr == 0)
+ return;
+
+ kbdev = kctx->kbdev;
+ mutex_lock(&kbdev->js_data.queue_mutex);
+ ctx_is_in_runpool = kbasep_js_runpool_retain_ctx(kbdev, kctx);
+ mutex_unlock(&kbdev->js_data.queue_mutex);
+
+ if (ctx_is_in_runpool) {
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+ if (!kbase_pm_context_active_handle_suspend(kbdev,
+ KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+ int err;
+ u32 op;
+
+ /* AS transaction begin */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+
+ if (sync)
+ op = AS_COMMAND_FLUSH_MEM;
+ else
+ op = AS_COMMAND_FLUSH_PT;
+
+ err = kbase_mmu_hw_do_operation(kbdev,
+ &kbdev->as[kctx->as_nr],
+ kctx, vpfn, nr, op, 0);
+
+#if KBASE_GPU_RESET_EN
+ if (err) {
+ /* Flush failed to complete, assume the
+ * GPU has hung and perform a reset to
+ * recover */
+ dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issueing GPU soft-reset to recover\n");
+
+ if (kbase_prepare_to_reset_gpu(kbdev))
+ kbase_reset_gpu(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ /* AS transaction end */
+
+#ifndef CONFIG_MALI_NO_MALI
+ /*
+ * The transaction lock must be dropped before here
+ * as kbase_wait_write_flush could take it if
+ * the GPU was powered down (static analysis doesn't
+ * know this can't happen).
+ */
+ drain_pending |= (!err) && sync &&
+ kbase_hw_has_issue(kctx->kbdev,
+ BASE_HW_ISSUE_6367);
+ if (drain_pending) {
+ /* Wait for GPU to flush write buffer */
+ kbase_wait_write_flush(kctx);
+ }
+#endif /* !CONFIG_MALI_NO_MALI */
+
+ kbase_pm_context_idle(kbdev);
+ }
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+ }
+}
+
+void kbase_mmu_update(struct kbase_context *kctx)
+{
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+ lockdep_assert_held(&kctx->kbdev->mmu_hw_mutex);
+ /* ASSERT that the context has a valid as_nr, which is only the case
+ * when it's scheduled in.
+ *
+ * as_nr won't change because the caller has the hwaccess_lock */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+ kctx->kbdev->mmu_mode->update(kctx);
+}
+KBASE_EXPORT_TEST_API(kbase_mmu_update);
+
+void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ lockdep_assert_held(&kbdev->mmu_hw_mutex);
+
+ kbdev->mmu_mode->disable_as(kbdev, as_nr);
+}
+
+void kbase_mmu_disable(struct kbase_context *kctx)
+{
+ /* ASSERT that the context has a valid as_nr, which is only the case
+ * when it's scheduled in.
+ *
+ * as_nr won't change because the caller has the hwaccess_lock */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ /*
+ * The address space is being disabled, drain all knowledge of it out
+ * from the caches as pages and page tables might be freed after this.
+ *
+ * The job scheduler code will already be holding the locks and context
+ * so just do the flush.
+ */
+ kbase_mmu_flush_invalidate_noretain(kctx, 0, ~0, true);
+
+ kctx->kbdev->mmu_mode->disable_as(kctx->kbdev, kctx->as_nr);
+}
+KBASE_EXPORT_TEST_API(kbase_mmu_disable);
+
+/*
+ * We actually only discard the ATE, and not the page table
+ * pages. There is a potential DoS here, as we'll leak memory by
+ * having PTEs that are potentially unused. Will require physical
+ * page accounting, so MMU pages are part of the process allocation.
+ *
+ * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is
+ * currently scheduled into the runpool, and so potentially uses a lot of locks.
+ * These locks must be taken in the correct order with respect to others
+ * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
+ * information.
+ */
+int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr)
+{
+ phys_addr_t pgd;
+ size_t requested_nr = nr;
+ struct kbase_mmu_mode const *mmu_mode;
+ int err = -EFAULT;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ beenthere(kctx, "kctx %p vpfn %lx nr %zd", (void *)kctx, (unsigned long)vpfn, nr);
+
+ if (0 == nr) {
+ /* early out if nothing to do */
+ return 0;
+ }
+
+ mutex_lock(&kctx->mmu_lock);
+
+ mmu_mode = kctx->kbdev->mmu_mode;
+
+ while (nr) {
+ unsigned int i;
+ unsigned int index = vpfn & 0x1FF;
+ unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+ unsigned int pcount;
+ unsigned int level;
+ u64 *page;
+
+ if (count > nr)
+ count = nr;
+
+ /* need to check if this is a 2MB or a 4kB page */
+ pgd = kctx->pgd;
+
+ for (level = MIDGARD_MMU_TOPLEVEL;
+ level <= MIDGARD_MMU_BOTTOMLEVEL; level++) {
+ phys_addr_t next_pgd;
+
+ index = (vpfn >> ((3 - level) * 9)) & 0x1FF;
+ page = kmap(phys_to_page(pgd));
+ if (mmu_mode->ate_is_valid(page[index], level))
+ break; /* keep the mapping */
+ else if (!mmu_mode->pte_is_valid(page[index])) {
+ /* nothing here, advance */
+ switch (level) {
+ case MIDGARD_MMU_LEVEL(0):
+ count = 134217728;
+ break;
+ case MIDGARD_MMU_LEVEL(1):
+ count = 262144;
+ break;
+ case MIDGARD_MMU_LEVEL(2):
+ count = 512;
+ break;
+ case MIDGARD_MMU_LEVEL(3):
+ count = 1;
+ break;
+ }
+ if (count > nr)
+ count = nr;
+ goto next;
+ }
+ next_pgd = mmu_mode->pte_to_phy_addr(page[index]);
+ kunmap(phys_to_page(pgd));
+ pgd = next_pgd;
+ }
+
+ switch (level) {
+ case MIDGARD_MMU_LEVEL(0):
+ case MIDGARD_MMU_LEVEL(1):
+ dev_warn(kctx->kbdev->dev,
+ "%s: No support for ATEs at level %d\n",
+ __func__, level);
+ kunmap(phys_to_page(pgd));
+ goto out;
+ case MIDGARD_MMU_LEVEL(2):
+ /* can only teardown if count >= 512 */
+ if (count >= 512) {
+ pcount = 1;
+ } else {
+ dev_warn(kctx->kbdev->dev,
+ "%s: limiting teardown as it tries to do a partial 2MB teardown, need 512, but have %d to tear down\n",
+ __func__, count);
+ pcount = 0;
+ }
+ break;
+ case MIDGARD_MMU_BOTTOMLEVEL:
+ /* page count is the same as the logical count */
+ pcount = count;
+ break;
+ default:
+ dev_err(kctx->kbdev->dev,
+ "%s: found non-mapped memory, early out\n",
+ __func__);
+ vpfn += count;
+ nr -= count;
+ continue;
+ }
+
+ /* Invalidate the entries we added */
+ for (i = 0; i < pcount; i++)
+ mmu_mode->entry_invalidate(&page[index + i]);
+
+ kbase_mmu_sync_pgd(kctx->kbdev,
+ kbase_dma_addr(phys_to_page(pgd)) +
+ 8 * index, 8*pcount);
+
+next:
+ kunmap(phys_to_page(pgd));
+ vpfn += count;
+ nr -= count;
+ }
+ err = 0;
+out:
+ mutex_unlock(&kctx->mmu_lock);
+ kbase_mmu_flush_invalidate(kctx, vpfn, requested_nr, true);
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_teardown_pages);
+
+/**
+ * Update the entries for specified number of pages pointed to by 'phys' at GPU PFN 'vpfn'.
+ * This call is being triggered as a response to the changes of the mem attributes
+ *
+ * @pre : The caller is responsible for validating the memory attributes
+ *
+ * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is
+ * currently scheduled into the runpool, and so potentially uses a lot of locks.
+ * These locks must be taken in the correct order with respect to others
+ * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
+ * information.
+ */
+int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
+ struct tagged_addr *phys, size_t nr,
+ unsigned long flags)
+{
+ phys_addr_t pgd;
+ u64 *pgd_page;
+ size_t requested_nr = nr;
+ struct kbase_mmu_mode const *mmu_mode;
+ int err;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(0 != vpfn);
+ KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+
+ /* Early out if there is nothing to do */
+ if (nr == 0)
+ return 0;
+
+ mutex_lock(&kctx->mmu_lock);
+
+ mmu_mode = kctx->kbdev->mmu_mode;
+
+ dev_warn(kctx->kbdev->dev, "kbase_mmu_update_pages(): updating page share flags on GPU PFN 0x%llx from phys %p, %zu pages",
+ vpfn, phys, nr);
+
+ while (nr) {
+ unsigned int i;
+ unsigned int index = vpfn & 0x1FF;
+ size_t count = KBASE_MMU_PAGE_ENTRIES - index;
+ struct page *p;
+
+ if (count > nr)
+ count = nr;
+
+ do {
+ err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
+ if (err != -ENOMEM)
+ break;
+ /* Fill the memory pool with enough pages for
+ * the page walk to succeed
+ */
+ mutex_unlock(&kctx->mmu_lock);
+ err = kbase_mem_pool_grow(&kctx->mem_pool,
+ MIDGARD_MMU_BOTTOMLEVEL);
+ mutex_lock(&kctx->mmu_lock);
+ } while (!err);
+ if (err) {
+ dev_warn(kctx->kbdev->dev,
+ "mmu_get_bottom_pgd failure\n");
+ goto fail_unlock;
+ }
+
+ p = pfn_to_page(PFN_DOWN(pgd));
+ pgd_page = kmap(p);
+ if (!pgd_page) {
+ dev_warn(kctx->kbdev->dev, "kmap failure\n");
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ for (i = 0; i < count; i++)
+ mmu_mode->entry_set_ate(&pgd_page[index + i], phys[i],
+ flags, MIDGARD_MMU_BOTTOMLEVEL);
+
+ phys += count;
+ vpfn += count;
+ nr -= count;
+
+ kbase_mmu_sync_pgd(kctx->kbdev,
+ kbase_dma_addr(p) + (index * sizeof(u64)),
+ count * sizeof(u64));
+
+ kunmap(pfn_to_page(PFN_DOWN(pgd)));
+ }
+
+ mutex_unlock(&kctx->mmu_lock);
+ kbase_mmu_flush_invalidate(kctx, vpfn, requested_nr, true);
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&kctx->mmu_lock);
+ kbase_mmu_flush_invalidate(kctx, vpfn, requested_nr, true);
+ return err;
+}
+
+static void mmu_teardown_level(struct kbase_context *kctx, phys_addr_t pgd,
+ int level, u64 *pgd_page_buffer)
+{
+ phys_addr_t target_pgd;
+ struct page *p;
+ u64 *pgd_page;
+ int i;
+ struct kbase_mmu_mode const *mmu_mode;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ lockdep_assert_held(&kctx->mmu_lock);
+ lockdep_assert_held(&kctx->reg_lock);
+
+ pgd_page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd)));
+ /* kmap_atomic should NEVER fail. */
+ KBASE_DEBUG_ASSERT(NULL != pgd_page);
+ /* Copy the page to our preallocated buffer so that we can minimize
+ * kmap_atomic usage */
+ memcpy(pgd_page_buffer, pgd_page, PAGE_SIZE);
+ kunmap_atomic(pgd_page);
+ pgd_page = pgd_page_buffer;
+
+ mmu_mode = kctx->kbdev->mmu_mode;
+
+ for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) {
+ target_pgd = mmu_mode->pte_to_phy_addr(pgd_page[i]);
+
+ if (target_pgd) {
+ if (mmu_mode->pte_is_valid(pgd_page[i])) {
+ mmu_teardown_level(kctx,
+ target_pgd,
+ level + 1,
+ pgd_page_buffer +
+ (PAGE_SIZE / sizeof(u64)));
+ }
+ }
+ }
+
+ p = pfn_to_page(PFN_DOWN(pgd));
+ kbase_mem_pool_free(&kctx->mem_pool, p, true);
+ kbase_process_page_usage_dec(kctx, 1);
+ kbase_atomic_sub_pages(1, &kctx->used_pages);
+ kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
+}
+
+int kbase_mmu_init(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL == kctx->mmu_teardown_pages);
+
+ mutex_init(&kctx->mmu_lock);
+
+ /* Preallocate MMU depth of four pages for mmu_teardown_level to use */
+ kctx->mmu_teardown_pages = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
+
+ if (NULL == kctx->mmu_teardown_pages)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void kbase_mmu_term(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages);
+
+ kfree(kctx->mmu_teardown_pages);
+ kctx->mmu_teardown_pages = NULL;
+}
+
+void kbase_mmu_free_pgd(struct kbase_context *kctx)
+{
+ int new_page_count = 0;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages);
+
+ mutex_lock(&kctx->mmu_lock);
+ mmu_teardown_level(kctx, kctx->pgd, MIDGARD_MMU_TOPLEVEL,
+ kctx->mmu_teardown_pages);
+ mutex_unlock(&kctx->mmu_lock);
+
+ KBASE_TLSTREAM_AUX_PAGESALLOC(
+ (u32)kctx->id,
+ (u64)new_page_count);
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_free_pgd);
+
+static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left)
+{
+ phys_addr_t target_pgd;
+ u64 *pgd_page;
+ int i;
+ size_t size = KBASE_MMU_PAGE_ENTRIES * sizeof(u64) + sizeof(u64);
+ size_t dump_size;
+ struct kbase_mmu_mode const *mmu_mode;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ lockdep_assert_held(&kctx->mmu_lock);
+
+ mmu_mode = kctx->kbdev->mmu_mode;
+
+ pgd_page = kmap(pfn_to_page(PFN_DOWN(pgd)));
+ if (!pgd_page) {
+ dev_warn(kctx->kbdev->dev, "kbasep_mmu_dump_level: kmap failure\n");
+ return 0;
+ }
+
+ if (*size_left >= size) {
+ /* A modified physical address that contains the page table level */
+ u64 m_pgd = pgd | level;
+
+ /* Put the modified physical address in the output buffer */
+ memcpy(*buffer, &m_pgd, sizeof(m_pgd));
+ *buffer += sizeof(m_pgd);
+
+ /* Followed by the page table itself */
+ memcpy(*buffer, pgd_page, sizeof(u64) * KBASE_MMU_PAGE_ENTRIES);
+ *buffer += sizeof(u64) * KBASE_MMU_PAGE_ENTRIES;
+
+ *size_left -= size;
+ }
+
+ if (level < MIDGARD_MMU_BOTTOMLEVEL) {
+ for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) {
+ if (mmu_mode->pte_is_valid(pgd_page[i])) {
+ target_pgd = mmu_mode->pte_to_phy_addr(
+ pgd_page[i]);
+
+ dump_size = kbasep_mmu_dump_level(kctx,
+ target_pgd, level + 1,
+ buffer, size_left);
+ if (!dump_size) {
+ kunmap(pfn_to_page(PFN_DOWN(pgd)));
+ return 0;
+ }
+ size += dump_size;
+ }
+ }
+ }
+
+ kunmap(pfn_to_page(PFN_DOWN(pgd)));
+
+ return size;
+}
+
+void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
+{
+ void *kaddr;
+ size_t size_left;
+
+ KBASE_DEBUG_ASSERT(kctx);
+
+ if (0 == nr_pages) {
+ /* can't dump in a 0 sized buffer, early out */
+ return NULL;
+ }
+
+ size_left = nr_pages * PAGE_SIZE;
+
+ KBASE_DEBUG_ASSERT(0 != size_left);
+ kaddr = vmalloc_user(size_left);
+
+ mutex_lock(&kctx->mmu_lock);
+
+ if (kaddr) {
+ u64 end_marker = 0xFFULL;
+ char *buffer;
+ char *mmu_dump_buffer;
+ u64 config[3];
+ size_t size;
+
+ buffer = (char *)kaddr;
+ mmu_dump_buffer = buffer;
+
+ if (kctx->api_version >= KBASE_API_VERSION(8, 4)) {
+ struct kbase_mmu_setup as_setup;
+
+ kctx->kbdev->mmu_mode->get_as_setup(kctx, &as_setup);
+ config[0] = as_setup.transtab;
+ config[1] = as_setup.memattr;
+ config[2] = as_setup.transcfg;
+ memcpy(buffer, &config, sizeof(config));
+ mmu_dump_buffer += sizeof(config);
+ size_left -= sizeof(config);
+ }
+
+
+
+ size = kbasep_mmu_dump_level(kctx,
+ kctx->pgd,
+ MIDGARD_MMU_TOPLEVEL,
+ &mmu_dump_buffer,
+ &size_left);
+
+ if (!size)
+ goto fail_free;
+
+ /* Add on the size for the end marker */
+ size += sizeof(u64);
+ /* Add on the size for the config */
+ if (kctx->api_version >= KBASE_API_VERSION(8, 4))
+ size += sizeof(config);
+
+
+ if (size > nr_pages * PAGE_SIZE || size_left < sizeof(u64)) {
+ /* The buffer isn't big enough - free the memory and return failure */
+ goto fail_free;
+ }
+
+ /* Add the end marker */
+ memcpy(mmu_dump_buffer, &end_marker, sizeof(u64));
+ }
+
+ mutex_unlock(&kctx->mmu_lock);
+ return kaddr;
+
+fail_free:
+ vfree(kaddr);
+ mutex_unlock(&kctx->mmu_lock);
+ return NULL;
+}
+KBASE_EXPORT_TEST_API(kbase_mmu_dump);
+
+void bus_fault_worker(struct work_struct *data)
+{
+ struct kbase_as *faulting_as;
+ int as_no;
+ struct kbase_context *kctx;
+ struct kbase_device *kbdev;
+#if KBASE_GPU_RESET_EN
+ bool reset_status = false;
+#endif /* KBASE_GPU_RESET_EN */
+
+ faulting_as = container_of(data, struct kbase_as, work_busfault);
+
+ as_no = faulting_as->number;
+
+ kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
+
+ /* Grab the context that was already refcounted in kbase_mmu_interrupt().
+ * Therefore, it cannot be scheduled out of this AS until we explicitly release it
+ */
+ kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
+ if (WARN_ON(!kctx)) {
+ atomic_dec(&kbdev->faults_pending);
+ return;
+ }
+
+ if (unlikely(faulting_as->protected_mode))
+ {
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Permission failure");
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+ atomic_dec(&kbdev->faults_pending);
+ return;
+
+ }
+
+#if KBASE_GPU_RESET_EN
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
+ /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
+ * We start the reset before switching to UNMAPPED to ensure that unrelated jobs
+ * are evicted from the GPU before the switch.
+ */
+ dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n");
+ reset_status = kbase_prepare_to_reset_gpu(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+ /* NOTE: If GPU already powered off for suspend, we don't need to switch to unmapped */
+ if (!kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+ unsigned long flags;
+
+ /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
+ /* AS transaction begin */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+
+ /* Set the MMU into unmapped mode */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_mmu_disable(kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ /* AS transaction end */
+
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+
+ kbase_pm_context_idle(kbdev);
+ }
+
+#if KBASE_GPU_RESET_EN
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status)
+ kbase_reset_gpu(kbdev);
+#endif /* KBASE_GPU_RESET_EN */
+
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+
+ atomic_dec(&kbdev->faults_pending);
+}
+
+const char *kbase_exception_name(struct kbase_device *kbdev, u32 exception_code)
+{
+ const char *e;
+
+ switch (exception_code) {
+ /* Non-Fault Status code */
+ case 0x00:
+ e = "NOT_STARTED/IDLE/OK";
+ break;
+ case 0x01:
+ e = "DONE";
+ break;
+ case 0x02:
+ e = "INTERRUPTED";
+ break;
+ case 0x03:
+ e = "STOPPED";
+ break;
+ case 0x04:
+ e = "TERMINATED";
+ break;
+ case 0x08:
+ e = "ACTIVE";
+ break;
+ /* Job exceptions */
+ case 0x40:
+ e = "JOB_CONFIG_FAULT";
+ break;
+ case 0x41:
+ e = "JOB_POWER_FAULT";
+ break;
+ case 0x42:
+ e = "JOB_READ_FAULT";
+ break;
+ case 0x43:
+ e = "JOB_WRITE_FAULT";
+ break;
+ case 0x44:
+ e = "JOB_AFFINITY_FAULT";
+ break;
+ case 0x48:
+ e = "JOB_BUS_FAULT";
+ break;
+ case 0x50:
+ e = "INSTR_INVALID_PC";
+ break;
+ case 0x51:
+ e = "INSTR_INVALID_ENC";
+ break;
+ case 0x52:
+ e = "INSTR_TYPE_MISMATCH";
+ break;
+ case 0x53:
+ e = "INSTR_OPERAND_FAULT";
+ break;
+ case 0x54:
+ e = "INSTR_TLS_FAULT";
+ break;
+ case 0x55:
+ e = "INSTR_BARRIER_FAULT";
+ break;
+ case 0x56:
+ e = "INSTR_ALIGN_FAULT";
+ break;
+ case 0x58:
+ e = "DATA_INVALID_FAULT";
+ break;
+ case 0x59:
+ e = "TILE_RANGE_FAULT";
+ break;
+ case 0x5A:
+ e = "ADDR_RANGE_FAULT";
+ break;
+ case 0x60:
+ e = "OUT_OF_MEMORY";
+ break;
+ /* GPU exceptions */
+ case 0x80:
+ e = "DELAYED_BUS_FAULT";
+ break;
+ case 0x88:
+ e = "SHAREABILITY_FAULT";
+ break;
+ /* MMU exceptions */
+ case 0xC0:
+ case 0xC1:
+ case 0xC2:
+ case 0xC3:
+ case 0xC4:
+ case 0xC5:
+ case 0xC6:
+ case 0xC7:
+ e = "TRANSLATION_FAULT";
+ break;
+ case 0xC8:
+ e = "PERMISSION_FAULT";
+ break;
+ case 0xC9:
+ case 0xCA:
+ case 0xCB:
+ case 0xCC:
+ case 0xCD:
+ case 0xCE:
+ case 0xCF:
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+ e = "PERMISSION_FAULT";
+ else
+ e = "UNKNOWN";
+ break;
+ case 0xD0:
+ case 0xD1:
+ case 0xD2:
+ case 0xD3:
+ case 0xD4:
+ case 0xD5:
+ case 0xD6:
+ case 0xD7:
+ e = "TRANSTAB_BUS_FAULT";
+ break;
+ case 0xD8:
+ e = "ACCESS_FLAG";
+ break;
+ case 0xD9:
+ case 0xDA:
+ case 0xDB:
+ case 0xDC:
+ case 0xDD:
+ case 0xDE:
+ case 0xDF:
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+ e = "ACCESS_FLAG";
+ else
+ e = "UNKNOWN";
+ break;
+ case 0xE0:
+ case 0xE1:
+ case 0xE2:
+ case 0xE3:
+ case 0xE4:
+ case 0xE5:
+ case 0xE6:
+ case 0xE7:
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+ e = "ADDRESS_SIZE_FAULT";
+ else
+ e = "UNKNOWN";
+ break;
+ case 0xE8:
+ case 0xE9:
+ case 0xEA:
+ case 0xEB:
+ case 0xEC:
+ case 0xED:
+ case 0xEE:
+ case 0xEF:
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+ e = "MEMORY_ATTRIBUTES_FAULT";
+ else
+ e = "UNKNOWN";
+ break;
+ default:
+ e = "UNKNOWN";
+ break;
+ };
+
+ return e;
+}
+
+static const char *access_type_name(struct kbase_device *kbdev,
+ u32 fault_status)
+{
+ switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
+ case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+ return "ATOMIC";
+ else
+ return "UNKNOWN";
+ case AS_FAULTSTATUS_ACCESS_TYPE_READ:
+ return "READ";
+ case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
+ return "WRITE";
+ case AS_FAULTSTATUS_ACCESS_TYPE_EX:
+ return "EXECUTE";
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
+/**
+ * The caller must ensure it's retained the ctx to prevent it from being scheduled out whilst it's being worked on.
+ */
+static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
+ struct kbase_as *as, const char *reason_str)
+{
+ unsigned long flags;
+ int exception_type;
+ int access_type;
+ int source_id;
+ int as_no;
+ struct kbase_device *kbdev;
+ struct kbasep_js_device_data *js_devdata;
+
+#if KBASE_GPU_RESET_EN
+ bool reset_status = false;
+#endif
+
+ as_no = as->number;
+ kbdev = kctx->kbdev;
+ js_devdata = &kbdev->js_data;
+
+ /* ASSERT that the context won't leave the runpool */
+ KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0);
+
+ /* decode the fault status */
+ exception_type = as->fault_status & 0xFF;
+ access_type = (as->fault_status >> 8) & 0x3;
+ source_id = (as->fault_status >> 16);
+
+ /* terminal fault, print info about the fault */
+ dev_err(kbdev->dev,
+ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+ "Reason: %s\n"
+ "raw fault status: 0x%X\n"
+ "decoded fault status: %s\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n"
+ "pid: %d\n",
+ as_no, as->fault_addr,
+ reason_str,
+ as->fault_status,
+ (as->fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+ exception_type, kbase_exception_name(kbdev, exception_type),
+ access_type, access_type_name(kbdev, as->fault_status),
+ source_id,
+ kctx->pid);
+
+ /* hardware counters dump fault handling */
+ if ((kbdev->hwcnt.kctx) && (kbdev->hwcnt.kctx->as_nr == as_no) &&
+ (kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_DUMPING)) {
+ unsigned int num_core_groups = kbdev->gpu_props.num_core_groups;
+
+ if ((as->fault_addr >= kbdev->hwcnt.addr) &&
+ (as->fault_addr < (kbdev->hwcnt.addr +
+ (num_core_groups * 2048))))
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT;
+ }
+
+ /* Stop the kctx from submitting more jobs and cause it to be scheduled
+ * out/rescheduled - this will occur on releasing the context's refcount */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbasep_js_clear_submit_allowed(js_devdata, kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ /* Kill any running jobs from the context. Submit is disallowed, so no more jobs from this
+ * context can appear in the job slots from this point on */
+ kbase_backend_jm_kill_jobs_from_kctx(kctx);
+ /* AS transaction begin */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+#if KBASE_GPU_RESET_EN
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
+ /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
+ * We start the reset before switching to UNMAPPED to ensure that unrelated jobs
+ * are evicted from the GPU before the switch.
+ */
+ dev_err(kbdev->dev, "Unhandled page fault. For this GPU version we now soft-reset the GPU as part of page fault recovery.");
+ reset_status = kbase_prepare_to_reset_gpu(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+ /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_mmu_disable(kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ /* AS transaction end */
+ /* Clear down the fault */
+ kbase_mmu_hw_clear_fault(kbdev, as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+
+#if KBASE_GPU_RESET_EN
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status)
+ kbase_reset_gpu(kbdev);
+#endif /* KBASE_GPU_RESET_EN */
+}
+
+void kbasep_as_do_poke(struct work_struct *work)
+{
+ struct kbase_as *as;
+ struct kbase_device *kbdev;
+ struct kbase_context *kctx;
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(work);
+ as = container_of(work, struct kbase_as, poke_work);
+ kbdev = container_of(as, struct kbase_device, as[as->number]);
+ KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
+
+ /* GPU power will already be active by virtue of the caller holding a JS
+ * reference on the address space, and will not release it until this worker
+ * has finished */
+
+ /* Further to the comment above, we know that while this function is running
+ * the AS will not be released as before the atom is released this workqueue
+ * is flushed (in kbase_as_poking_timer_release_atom)
+ */
+ kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as->number);
+
+ /* AS transaction begin */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ /* Force a uTLB invalidate */
+ kbase_mmu_hw_do_operation(kbdev, as, kctx, 0, 0,
+ AS_COMMAND_UNLOCK, 0);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ /* AS transaction end */
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ if (as->poke_refcount &&
+ !(as->poke_state & KBASE_AS_POKE_STATE_KILLING_POKE)) {
+ /* Only queue up the timer if we need it, and we're not trying to kill it */
+ hrtimer_start(&as->poke_timer, HR_TIMER_DELAY_MSEC(5), HRTIMER_MODE_REL);
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer)
+{
+ struct kbase_as *as;
+ int queue_work_ret;
+
+ KBASE_DEBUG_ASSERT(NULL != timer);
+ as = container_of(timer, struct kbase_as, poke_timer);
+ KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
+
+ queue_work_ret = queue_work(as->poke_wq, &as->poke_work);
+ KBASE_DEBUG_ASSERT(queue_work_ret);
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * Retain the poking timer on an atom's context (if the atom hasn't already
+ * done so), and start the timer (if it's not already started).
+ *
+ * This must only be called on a context that's scheduled in, and an atom
+ * that's running on the GPU.
+ *
+ * The caller must hold hwaccess_lock
+ *
+ * This can be called safely from atomic context
+ */
+void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ struct kbase_as *as;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(katom);
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (katom->poking)
+ return;
+
+ katom->poking = 1;
+
+ /* It's safe to work on the as/as_nr without an explicit reference,
+ * because the caller holds the hwaccess_lock, and the atom itself
+ * was also running and had already taken a reference */
+ as = &kbdev->as[kctx->as_nr];
+
+ if (++(as->poke_refcount) == 1) {
+ /* First refcount for poke needed: check if not already in flight */
+ if (!as->poke_state) {
+ /* need to start poking */
+ as->poke_state |= KBASE_AS_POKE_STATE_IN_FLIGHT;
+ queue_work(as->poke_wq, &as->poke_work);
+ }
+ }
+}
+
+/**
+ * If an atom holds a poking timer, release it and wait for it to finish
+ *
+ * This must only be called on a context that's scheduled in, and an atom
+ * that still has a JS reference on the context
+ *
+ * This must \b not be called from atomic context, since it can sleep.
+ */
+void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ struct kbase_as *as;
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(katom);
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+ if (!katom->poking)
+ return;
+
+ as = &kbdev->as[kctx->as_nr];
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ KBASE_DEBUG_ASSERT(as->poke_refcount > 0);
+ KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
+
+ if (--(as->poke_refcount) == 0) {
+ as->poke_state |= KBASE_AS_POKE_STATE_KILLING_POKE;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ hrtimer_cancel(&as->poke_timer);
+ flush_workqueue(as->poke_wq);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* Re-check whether it's still needed */
+ if (as->poke_refcount) {
+ int queue_work_ret;
+ /* Poking still needed:
+ * - Another retain will not be starting the timer or queueing work,
+ * because it's still marked as in-flight
+ * - The hrtimer has finished, and has not started a new timer or
+ * queued work because it's been marked as killing
+ *
+ * So whatever happens now, just queue the work again */
+ as->poke_state &= ~((kbase_as_poke_state)KBASE_AS_POKE_STATE_KILLING_POKE);
+ queue_work_ret = queue_work(as->poke_wq, &as->poke_work);
+ KBASE_DEBUG_ASSERT(queue_work_ret);
+ } else {
+ /* It isn't - so mark it as not in flight, and not killing */
+ as->poke_state = 0u;
+
+ /* The poke associated with the atom has now finished. If this is
+ * also the last atom on the context, then we can guarentee no more
+ * pokes (and thus no more poking register accesses) will occur on
+ * the context until new atoms are run */
+ }
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ katom->poking = 0;
+}
+
+void kbase_mmu_interrupt_process(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_as *as)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (!kctx) {
+ dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Suprious IRQ or SW Design Error?\n",
+ kbase_as_has_bus_fault(as) ? "Bus error" : "Page fault",
+ as->number, as->fault_addr);
+
+ /* Since no ctx was found, the MMU must be disabled. */
+ WARN_ON(as->current_setup.transtab);
+
+ if (kbase_as_has_bus_fault(as)) {
+ kbase_mmu_hw_clear_fault(kbdev, as, kctx,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as, kctx,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ } else if (kbase_as_has_page_fault(as)) {
+ kbase_mmu_hw_clear_fault(kbdev, as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ }
+
+#if KBASE_GPU_RESET_EN
+ if (kbase_as_has_bus_fault(as) &&
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
+ bool reset_status;
+ /*
+ * Reset the GPU, like in bus_fault_worker, in case an
+ * earlier error hasn't been properly cleared by this
+ * point.
+ */
+ dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n");
+ reset_status = kbase_prepare_to_reset_gpu_locked(kbdev);
+ if (reset_status)
+ kbase_reset_gpu_locked(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+
+ return;
+ }
+
+ if (kbase_as_has_bus_fault(as)) {
+ /*
+ * hw counters dumping in progress, signal the
+ * other thread that it failed
+ */
+ if ((kbdev->hwcnt.kctx == kctx) &&
+ (kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_DUMPING))
+ kbdev->hwcnt.backend.state =
+ KBASE_INSTR_STATE_FAULT;
+
+ /*
+ * Stop the kctx from submitting more jobs and cause it
+ * to be scheduled out/rescheduled when all references
+ * to it are released
+ */
+ kbasep_js_clear_submit_allowed(js_devdata, kctx);
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+ dev_warn(kbdev->dev,
+ "Bus error in AS%d at VA=0x%016llx, IPA=0x%016llx\n",
+ as->number, as->fault_addr,
+ as->fault_extra_addr);
+ else
+ dev_warn(kbdev->dev, "Bus error in AS%d at 0x%016llx\n",
+ as->number, as->fault_addr);
+
+ /*
+ * We need to switch to UNMAPPED mode - but we do this in a
+ * worker so that we can sleep
+ */
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_busfault));
+ WARN_ON(work_pending(&as->work_busfault));
+ queue_work(as->pf_wq, &as->work_busfault);
+ atomic_inc(&kbdev->faults_pending);
+ } else {
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_pagefault));
+ WARN_ON(work_pending(&as->work_pagefault));
+ queue_work(as->pf_wq, &as->work_pagefault);
+ atomic_inc(&kbdev->faults_pending);
+ }
+}
+
+void kbase_flush_mmu_wqs(struct kbase_device *kbdev)
+{
+ int i;
+
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ struct kbase_as *as = &kbdev->as[i];
+
+ flush_workqueue(as->pf_wq);
+ }
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mmu_hw.h b/drivers/gpu/arm_gpu/mali_kbase_mmu_hw.h
new file mode 100644
index 000000000000..986e959e9a0c
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mmu_hw.h
@@ -0,0 +1,123 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file
+ * Interface file for accessing MMU hardware functionality
+ */
+
+/**
+ * @page mali_kbase_mmu_hw_page MMU hardware interface
+ *
+ * @section mali_kbase_mmu_hw_intro_sec Introduction
+ * This module provides an abstraction for accessing the functionality provided
+ * by the midgard MMU and thus allows all MMU HW access to be contained within
+ * one common place and allows for different backends (implementations) to
+ * be provided.
+ */
+
+#ifndef _MALI_KBASE_MMU_HW_H_
+#define _MALI_KBASE_MMU_HW_H_
+
+/* Forward declarations */
+struct kbase_device;
+struct kbase_as;
+struct kbase_context;
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup mali_kbase_mmu_hw MMU access APIs
+ * @{
+ */
+
+/** @brief MMU fault type descriptor.
+ */
+enum kbase_mmu_fault_type {
+ KBASE_MMU_FAULT_TYPE_UNKNOWN = 0,
+ KBASE_MMU_FAULT_TYPE_PAGE,
+ KBASE_MMU_FAULT_TYPE_BUS,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED
+};
+
+/** @brief Configure an address space for use.
+ *
+ * Configure the MMU using the address space details setup in the
+ * @ref kbase_context structure.
+ *
+ * @param[in] kbdev kbase device to configure.
+ * @param[in] as address space to configure.
+ * @param[in] kctx kbase context to configure.
+ */
+void kbase_mmu_hw_configure(struct kbase_device *kbdev,
+ struct kbase_as *as, struct kbase_context *kctx);
+
+/** @brief Issue an operation to the MMU.
+ *
+ * Issue an operation (MMU invalidate, MMU flush, etc) on the address space that
+ * is associated with the provided @ref kbase_context over the specified range
+ *
+ * @param[in] kbdev kbase device to issue the MMU operation on.
+ * @param[in] as address space to issue the MMU operation on.
+ * @param[in] kctx kbase context to issue the MMU operation on.
+ * @param[in] vpfn MMU Virtual Page Frame Number to start the
+ * operation on.
+ * @param[in] nr Number of pages to work on.
+ * @param[in] type Operation type (written to ASn_COMMAND).
+ * @param[in] handling_irq Is this operation being called during the handling
+ * of an interrupt?
+ *
+ * @return Zero if the operation was successful, non-zero otherwise.
+ */
+int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, u64 vpfn, u32 nr, u32 type,
+ unsigned int handling_irq);
+
+/** @brief Clear a fault that has been previously reported by the MMU.
+ *
+ * Clear a bus error or page fault that has been reported by the MMU.
+ *
+ * @param[in] kbdev kbase device to clear the fault from.
+ * @param[in] as address space to clear the fault from.
+ * @param[in] kctx kbase context to clear the fault from or NULL.
+ * @param[in] type The type of fault that needs to be cleared.
+ */
+void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, enum kbase_mmu_fault_type type);
+
+/** @brief Enable fault that has been previously reported by the MMU.
+ *
+ * After a page fault or bus error has been reported by the MMU these
+ * will be disabled. After these are handled this function needs to be
+ * called to enable the page fault or bus error fault again.
+ *
+ * @param[in] kbdev kbase device to again enable the fault from.
+ * @param[in] as address space to again enable the fault from.
+ * @param[in] kctx kbase context to again enable the fault from.
+ * @param[in] type The type of fault that needs to be enabled again.
+ */
+void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, enum kbase_mmu_fault_type type);
+
+/** @} *//* end group mali_kbase_mmu_hw */
+/** @} *//* end group base_kbase_api */
+
+#endif /* _MALI_KBASE_MMU_HW_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mmu_mode.h b/drivers/gpu/arm_gpu/mali_kbase_mmu_mode.h
new file mode 100644
index 000000000000..a2c38625f639
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mmu_mode.h
@@ -0,0 +1,49 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015,2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _MALI_KBASE_MMU_MODE_
+#define _MALI_KBASE_MMU_MODE_
+
+#include <linux/types.h>
+#include <mali_kbase.h>
+
+/* Forward declarations */
+struct kbase_context;
+struct kbase_device;
+struct kbase_as;
+struct kbase_mmu_setup;
+
+struct kbase_mmu_mode {
+ void (*update)(struct kbase_context *kctx);
+ void (*get_as_setup)(struct kbase_context *kctx,
+ struct kbase_mmu_setup * const setup);
+ void (*disable_as)(struct kbase_device *kbdev, int as_nr);
+ phys_addr_t (*pte_to_phy_addr)(u64 entry);
+ int (*ate_is_valid)(u64 ate, unsigned int level);
+ int (*pte_is_valid)(u64 pte);
+ void (*entry_set_ate)(u64 *entry, struct tagged_addr phy,
+ unsigned long flags, unsigned int level);
+ void (*entry_set_pte)(u64 *entry, phys_addr_t phy);
+ void (*entry_invalidate)(u64 *entry);
+};
+
+struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void);
+struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void);
+
+#endif /* _MALI_KBASE_MMU_MODE_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mmu_mode_aarch64.c b/drivers/gpu/arm_gpu/mali_kbase_mmu_mode_aarch64.c
new file mode 100644
index 000000000000..eccb33adb888
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mmu_mode_aarch64.c
@@ -0,0 +1,212 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include "mali_kbase_mmu_mode.h"
+
+#include "mali_kbase.h"
+#include "mali_midg_regmap.h"
+
+#define ENTRY_TYPE_MASK 3ULL
+/* For valid ATEs bit 1 = ((level == 3) ? 1 : 0).
+ * Valid ATE entries at level 3 are flagged with the value 3.
+ * Valid ATE entries at level 0-2 are flagged with the value 1.
+ */
+#define ENTRY_IS_ATE_L3 3ULL
+#define ENTRY_IS_ATE_L02 1ULL
+#define ENTRY_IS_INVAL 2ULL
+#define ENTRY_IS_PTE 3ULL
+
+#define ENTRY_ATTR_BITS (7ULL << 2) /* bits 4:2 */
+#define ENTRY_ACCESS_RW (1ULL << 6) /* bits 6:7 */
+#define ENTRY_ACCESS_RO (3ULL << 6)
+#define ENTRY_SHARE_BITS (3ULL << 8) /* bits 9:8 */
+#define ENTRY_ACCESS_BIT (1ULL << 10)
+#define ENTRY_NX_BIT (1ULL << 54)
+
+/* Helper Function to perform assignment of page table entries, to
+ * ensure the use of strd, which is required on LPAE systems.
+ */
+static inline void page_table_entry_set(u64 *pte, u64 phy)
+{
+#ifdef CONFIG_64BIT
+ *pte = phy;
+#elif defined(CONFIG_ARM)
+ /*
+ * In order to prevent the compiler keeping cached copies of
+ * memory, we have to explicitly say that we have updated memory.
+ *
+ * Note: We could manually move the data ourselves into R0 and
+ * R1 by specifying register variables that are explicitly
+ * given registers assignments, the down side of this is that
+ * we have to assume cpu endianness. To avoid this we can use
+ * the ldrd to read the data from memory into R0 and R1 which
+ * will respect the cpu endianness, we then use strd to make
+ * the 64 bit assignment to the page table entry.
+ */
+ asm volatile("ldrd r0, r1, [%[ptemp]]\n\t"
+ "strd r0, r1, [%[pte]]\n\t"
+ : "=m" (*pte)
+ : [ptemp] "r" (&phy), [pte] "r" (pte), "m" (phy)
+ : "r0", "r1");
+#else
+#error "64-bit atomic write must be implemented for your architecture"
+#endif
+}
+
+static void mmu_get_as_setup(struct kbase_context *kctx,
+ struct kbase_mmu_setup * const setup)
+{
+ /* Set up the required caching policies at the correct indices
+ * in the memattr register.
+ */
+ setup->memattr =
+ (AS_MEMATTR_IMPL_DEF_CACHE_POLICY <<
+ (AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
+ (AS_MEMATTR_FORCE_TO_CACHE_ALL <<
+ (AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
+ (AS_MEMATTR_WRITE_ALLOC <<
+ (AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
+ (AS_MEMATTR_AARCH64_OUTER_IMPL_DEF <<
+ (AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
+ (AS_MEMATTR_AARCH64_OUTER_WA <<
+ (AS_MEMATTR_INDEX_OUTER_WA * 8));
+
+ setup->transtab = (u64)kctx->pgd & AS_TRANSTAB_BASE_MASK;
+ setup->transcfg = AS_TRANSCFG_ADRMODE_AARCH64_4K;
+}
+
+static void mmu_update(struct kbase_context *kctx)
+{
+ struct kbase_device * const kbdev = kctx->kbdev;
+ struct kbase_as * const as = &kbdev->as[kctx->as_nr];
+ struct kbase_mmu_setup * const current_setup = &as->current_setup;
+
+ mmu_get_as_setup(kctx, current_setup);
+
+ /* Apply the address space setting */
+ kbase_mmu_hw_configure(kbdev, as, kctx);
+}
+
+static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
+{
+ struct kbase_as * const as = &kbdev->as[as_nr];
+ struct kbase_mmu_setup * const current_setup = &as->current_setup;
+
+ current_setup->transtab = 0ULL;
+ current_setup->transcfg = AS_TRANSCFG_ADRMODE_UNMAPPED;
+
+ /* Apply the address space setting */
+ kbase_mmu_hw_configure(kbdev, as, NULL);
+}
+
+static phys_addr_t pte_to_phy_addr(u64 entry)
+{
+ if (!(entry & 1))
+ return 0;
+
+ return entry & ~0xFFF;
+}
+
+static int ate_is_valid(u64 ate, unsigned int level)
+{
+ if (level == MIDGARD_MMU_BOTTOMLEVEL)
+ return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE_L3);
+ else
+ return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE_L02);
+}
+
+static int pte_is_valid(u64 pte)
+{
+ return ((pte & ENTRY_TYPE_MASK) == ENTRY_IS_PTE);
+}
+
+/*
+ * Map KBASE_REG flags to MMU flags
+ */
+static u64 get_mmu_flags(unsigned long flags)
+{
+ u64 mmu_flags;
+
+ /* store mem_attr index as 4:2 (macro called ensures 3 bits already) */
+ mmu_flags = KBASE_REG_MEMATTR_VALUE(flags) << 2;
+
+ /* Set access flags - note that AArch64 stage 1 does not support
+ * write-only access, so we use read/write instead
+ */
+ if (flags & KBASE_REG_GPU_WR)
+ mmu_flags |= ENTRY_ACCESS_RW;
+ else if (flags & KBASE_REG_GPU_RD)
+ mmu_flags |= ENTRY_ACCESS_RO;
+
+ /* nx if requested */
+ mmu_flags |= (flags & KBASE_REG_GPU_NX) ? ENTRY_NX_BIT : 0;
+
+ if (flags & KBASE_REG_SHARE_BOTH) {
+ /* inner and outer shareable */
+ mmu_flags |= SHARE_BOTH_BITS;
+ } else if (flags & KBASE_REG_SHARE_IN) {
+ /* inner shareable coherency */
+ mmu_flags |= SHARE_INNER_BITS;
+ }
+
+ return mmu_flags;
+}
+
+static void entry_set_ate(u64 *entry,
+ struct tagged_addr phy,
+ unsigned long flags,
+ unsigned int level)
+{
+ if (level == MIDGARD_MMU_BOTTOMLEVEL)
+ page_table_entry_set(entry, as_phys_addr_t(phy) |
+ get_mmu_flags(flags) |
+ ENTRY_ACCESS_BIT | ENTRY_IS_ATE_L3);
+ else
+ page_table_entry_set(entry, as_phys_addr_t(phy) |
+ get_mmu_flags(flags) |
+ ENTRY_ACCESS_BIT | ENTRY_IS_ATE_L02);
+}
+
+static void entry_set_pte(u64 *entry, phys_addr_t phy)
+{
+ page_table_entry_set(entry, (phy & PAGE_MASK) |
+ ENTRY_ACCESS_BIT | ENTRY_IS_PTE);
+}
+
+static void entry_invalidate(u64 *entry)
+{
+ page_table_entry_set(entry, ENTRY_IS_INVAL);
+}
+
+static struct kbase_mmu_mode const aarch64_mode = {
+ .update = mmu_update,
+ .get_as_setup = mmu_get_as_setup,
+ .disable_as = mmu_disable_as,
+ .pte_to_phy_addr = pte_to_phy_addr,
+ .ate_is_valid = ate_is_valid,
+ .pte_is_valid = pte_is_valid,
+ .entry_set_ate = entry_set_ate,
+ .entry_set_pte = entry_set_pte,
+ .entry_invalidate = entry_invalidate
+};
+
+struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void)
+{
+ return &aarch64_mode;
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_mmu_mode_lpae.c b/drivers/gpu/arm_gpu/mali_kbase_mmu_mode_lpae.c
new file mode 100644
index 000000000000..5500127b74c8
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_mmu_mode_lpae.c
@@ -0,0 +1,200 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include "mali_kbase_mmu_mode.h"
+
+#include "mali_kbase.h"
+#include "mali_midg_regmap.h"
+
+#define ENTRY_TYPE_MASK 3ULL
+#define ENTRY_IS_ATE 1ULL
+#define ENTRY_IS_INVAL 2ULL
+#define ENTRY_IS_PTE 3ULL
+
+#define ENTRY_ATTR_BITS (7ULL << 2) /* bits 4:2 */
+#define ENTRY_RD_BIT (1ULL << 6)
+#define ENTRY_WR_BIT (1ULL << 7)
+#define ENTRY_SHARE_BITS (3ULL << 8) /* bits 9:8 */
+#define ENTRY_ACCESS_BIT (1ULL << 10)
+#define ENTRY_NX_BIT (1ULL << 54)
+
+#define ENTRY_FLAGS_MASK (ENTRY_ATTR_BITS | ENTRY_RD_BIT | ENTRY_WR_BIT | \
+ ENTRY_SHARE_BITS | ENTRY_ACCESS_BIT | ENTRY_NX_BIT)
+
+/* Helper Function to perform assignment of page table entries, to
+ * ensure the use of strd, which is required on LPAE systems.
+ */
+static inline void page_table_entry_set(u64 *pte, u64 phy)
+{
+#ifdef CONFIG_64BIT
+ *pte = phy;
+#elif defined(CONFIG_ARM)
+ /*
+ * In order to prevent the compiler keeping cached copies of
+ * memory, we have to explicitly say that we have updated
+ * memory.
+ *
+ * Note: We could manually move the data ourselves into R0 and
+ * R1 by specifying register variables that are explicitly
+ * given registers assignments, the down side of this is that
+ * we have to assume cpu endianness. To avoid this we can use
+ * the ldrd to read the data from memory into R0 and R1 which
+ * will respect the cpu endianness, we then use strd to make
+ * the 64 bit assignment to the page table entry.
+ */
+ asm volatile("ldrd r0, r1, [%[ptemp]]\n\t"
+ "strd r0, r1, [%[pte]]\n\t"
+ : "=m" (*pte)
+ : [ptemp] "r" (&phy), [pte] "r" (pte), "m" (phy)
+ : "r0", "r1");
+#else
+#error "64-bit atomic write must be implemented for your architecture"
+#endif
+}
+
+static void mmu_get_as_setup(struct kbase_context *kctx,
+ struct kbase_mmu_setup * const setup)
+{
+ /* Set up the required caching policies at the correct indices
+ * in the memattr register. */
+ setup->memattr =
+ (AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY <<
+ (AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
+ (AS_MEMATTR_LPAE_FORCE_TO_CACHE_ALL <<
+ (AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
+ (AS_MEMATTR_LPAE_WRITE_ALLOC <<
+ (AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
+ (AS_MEMATTR_LPAE_OUTER_IMPL_DEF <<
+ (AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
+ (AS_MEMATTR_LPAE_OUTER_WA <<
+ (AS_MEMATTR_INDEX_OUTER_WA * 8)) |
+ 0; /* The other indices are unused for now */
+
+ setup->transtab = ((u64)kctx->pgd &
+ ((0xFFFFFFFFULL << 32) | AS_TRANSTAB_LPAE_ADDR_SPACE_MASK)) |
+ AS_TRANSTAB_LPAE_ADRMODE_TABLE |
+ AS_TRANSTAB_LPAE_READ_INNER;
+
+ setup->transcfg = 0;
+}
+
+static void mmu_update(struct kbase_context *kctx)
+{
+ struct kbase_device * const kbdev = kctx->kbdev;
+ struct kbase_as * const as = &kbdev->as[kctx->as_nr];
+ struct kbase_mmu_setup * const current_setup = &as->current_setup;
+
+ mmu_get_as_setup(kctx, current_setup);
+
+ /* Apply the address space setting */
+ kbase_mmu_hw_configure(kbdev, as, kctx);
+}
+
+static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
+{
+ struct kbase_as * const as = &kbdev->as[as_nr];
+ struct kbase_mmu_setup * const current_setup = &as->current_setup;
+
+ current_setup->transtab = AS_TRANSTAB_LPAE_ADRMODE_UNMAPPED;
+
+ /* Apply the address space setting */
+ kbase_mmu_hw_configure(kbdev, as, NULL);
+}
+
+static phys_addr_t pte_to_phy_addr(u64 entry)
+{
+ if (!(entry & 1))
+ return 0;
+
+ return entry & ~0xFFF;
+}
+
+static int ate_is_valid(u64 ate, unsigned int level)
+{
+ return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE);
+}
+
+static int pte_is_valid(u64 pte)
+{
+ return ((pte & ENTRY_TYPE_MASK) == ENTRY_IS_PTE);
+}
+
+/*
+ * Map KBASE_REG flags to MMU flags
+ */
+static u64 get_mmu_flags(unsigned long flags)
+{
+ u64 mmu_flags;
+
+ /* store mem_attr index as 4:2 (macro called ensures 3 bits already) */
+ mmu_flags = KBASE_REG_MEMATTR_VALUE(flags) << 2;
+
+ /* write perm if requested */
+ mmu_flags |= (flags & KBASE_REG_GPU_WR) ? ENTRY_WR_BIT : 0;
+ /* read perm if requested */
+ mmu_flags |= (flags & KBASE_REG_GPU_RD) ? ENTRY_RD_BIT : 0;
+ /* nx if requested */
+ mmu_flags |= (flags & KBASE_REG_GPU_NX) ? ENTRY_NX_BIT : 0;
+
+ if (flags & KBASE_REG_SHARE_BOTH) {
+ /* inner and outer shareable */
+ mmu_flags |= SHARE_BOTH_BITS;
+ } else if (flags & KBASE_REG_SHARE_IN) {
+ /* inner shareable coherency */
+ mmu_flags |= SHARE_INNER_BITS;
+ }
+
+ return mmu_flags;
+}
+
+static void entry_set_ate(u64 *entry,
+ struct tagged_addr phy,
+ unsigned long flags,
+ unsigned int level)
+{
+ page_table_entry_set(entry, as_phys_addr_t(phy) | get_mmu_flags(flags) |
+ ENTRY_IS_ATE);
+}
+
+static void entry_set_pte(u64 *entry, phys_addr_t phy)
+{
+ page_table_entry_set(entry, (phy & ~0xFFF) | ENTRY_IS_PTE);
+}
+
+static void entry_invalidate(u64 *entry)
+{
+ page_table_entry_set(entry, ENTRY_IS_INVAL);
+}
+
+static struct kbase_mmu_mode const lpae_mode = {
+ .update = mmu_update,
+ .get_as_setup = mmu_get_as_setup,
+ .disable_as = mmu_disable_as,
+ .pte_to_phy_addr = pte_to_phy_addr,
+ .ate_is_valid = ate_is_valid,
+ .pte_is_valid = pte_is_valid,
+ .entry_set_ate = entry_set_ate,
+ .entry_set_pte = entry_set_pte,
+ .entry_invalidate = entry_invalidate
+};
+
+struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void)
+{
+ return &lpae_mode;
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_platform_fake.c b/drivers/gpu/arm_gpu/mali_kbase_platform_fake.c
new file mode 100644
index 000000000000..1a44957fe44a
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_platform_fake.c
@@ -0,0 +1,124 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+
+
+/*
+ * This file is included only for type definitions and functions belonging to
+ * specific platform folders. Do not add dependencies with symbols that are
+ * defined somewhere else.
+ */
+#include <mali_kbase_config.h>
+
+#define PLATFORM_CONFIG_RESOURCE_COUNT 4
+#define PLATFORM_CONFIG_IRQ_RES_COUNT 3
+
+static struct platform_device *mali_device;
+
+#ifndef CONFIG_OF
+/**
+ * @brief Convert data in struct kbase_io_resources struct to Linux-specific resources
+ *
+ * Function converts data in struct kbase_io_resources struct to an array of Linux resource structures. Note that function
+ * assumes that size of linux_resource array is at least PLATFORM_CONFIG_RESOURCE_COUNT.
+ * Resources are put in fixed order: I/O memory region, job IRQ, MMU IRQ, GPU IRQ.
+ *
+ * @param[in] io_resource Input IO resource data
+ * @param[out] linux_resources Pointer to output array of Linux resource structures
+ */
+static void kbasep_config_parse_io_resources(const struct kbase_io_resources *io_resources, struct resource *const linux_resources)
+{
+ if (!io_resources || !linux_resources) {
+ pr_err("%s: couldn't find proper resources\n", __func__);
+ return;
+ }
+
+ memset(linux_resources, 0, PLATFORM_CONFIG_RESOURCE_COUNT * sizeof(struct resource));
+
+ linux_resources[0].start = io_resources->io_memory_region.start;
+ linux_resources[0].end = io_resources->io_memory_region.end;
+ linux_resources[0].flags = IORESOURCE_MEM;
+
+ linux_resources[1].start = io_resources->job_irq_number;
+ linux_resources[1].end = io_resources->job_irq_number;
+ linux_resources[1].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
+
+ linux_resources[2].start = io_resources->mmu_irq_number;
+ linux_resources[2].end = io_resources->mmu_irq_number;
+ linux_resources[2].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
+
+ linux_resources[3].start = io_resources->gpu_irq_number;
+ linux_resources[3].end = io_resources->gpu_irq_number;
+ linux_resources[3].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
+}
+#endif /* CONFIG_OF */
+
+int kbase_platform_fake_register(void)
+{
+ struct kbase_platform_config *config;
+#ifndef CONFIG_OF
+ struct resource resources[PLATFORM_CONFIG_RESOURCE_COUNT];
+#endif
+ int err;
+
+ config = kbase_get_platform_config(); /* declared in midgard/mali_kbase_config.h but defined in platform folder */
+ if (config == NULL) {
+ pr_err("%s: couldn't get platform config\n", __func__);
+ return -ENODEV;
+ }
+
+ mali_device = platform_device_alloc("mali", 0);
+ if (mali_device == NULL)
+ return -ENOMEM;
+
+#ifndef CONFIG_OF
+ kbasep_config_parse_io_resources(config->io_resources, resources);
+ err = platform_device_add_resources(mali_device, resources, PLATFORM_CONFIG_RESOURCE_COUNT);
+ if (err) {
+ platform_device_put(mali_device);
+ mali_device = NULL;
+ return err;
+ }
+#endif /* CONFIG_OF */
+
+ err = platform_device_add(mali_device);
+ if (err) {
+ platform_device_unregister(mali_device);
+ mali_device = NULL;
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(kbase_platform_fake_register);
+
+void kbase_platform_fake_unregister(void)
+{
+ if (mali_device)
+ platform_device_unregister(mali_device);
+}
+EXPORT_SYMBOL(kbase_platform_fake_unregister);
+
+#endif /* CONFIG_MALI_PLATFORM_FAKE */
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_pm.c b/drivers/gpu/arm_gpu/mali_kbase_pm.c
new file mode 100644
index 000000000000..97d543464c28
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_pm.c
@@ -0,0 +1,205 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_pm.c
+ * Base kernel power management APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_vinstr.h>
+
+#include <mali_kbase_pm.h>
+
+int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags)
+{
+ return kbase_hwaccess_pm_powerup(kbdev, flags);
+}
+
+void kbase_pm_halt(struct kbase_device *kbdev)
+{
+ kbase_hwaccess_pm_halt(kbdev);
+}
+
+void kbase_pm_context_active(struct kbase_device *kbdev)
+{
+ (void)kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE);
+}
+
+int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ int c;
+ int old_count;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ /* Trace timeline information about how long it took to handle the decision
+ * to powerup. Sometimes the event might be missed due to reading the count
+ * outside of mutex, but this is necessary to get the trace timing
+ * correct. */
+ old_count = kbdev->pm.active_count;
+ if (old_count == 0)
+ kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+ if (kbase_pm_is_suspending(kbdev)) {
+ switch (suspend_handler) {
+ case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
+ if (kbdev->pm.active_count != 0)
+ break;
+ /* FALLTHROUGH */
+ case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ if (old_count == 0)
+ kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
+ return 1;
+
+ case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
+ /* FALLTHROUGH */
+ default:
+ KBASE_DEBUG_ASSERT_MSG(false, "unreachable");
+ break;
+ }
+ }
+ c = ++kbdev->pm.active_count;
+ KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c);
+
+ /* Trace the event being handled */
+ if (old_count == 0)
+ kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
+
+ if (c == 1)
+ /* First context active: Power on the GPU and any cores requested by
+ * the policy */
+ kbase_hwaccess_pm_gpu_active(kbdev);
+
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_context_active);
+
+void kbase_pm_context_idle(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ int c;
+ int old_count;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ /* Trace timeline information about how long it took to handle the decision
+ * to powerdown. Sometimes the event might be missed due to reading the
+ * count outside of mutex, but this is necessary to get the trace timing
+ * correct. */
+ old_count = kbdev->pm.active_count;
+ if (old_count == 0)
+ kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+ c = --kbdev->pm.active_count;
+ KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c);
+
+ KBASE_DEBUG_ASSERT(c >= 0);
+
+ /* Trace the event being handled */
+ if (old_count == 0)
+ kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
+
+ if (c == 0) {
+ /* Last context has gone idle */
+ kbase_hwaccess_pm_gpu_idle(kbdev);
+
+ /* Wake up anyone waiting for this to become 0 (e.g. suspend). The
+ * waiters must synchronize with us by locking the pm.lock after
+ * waiting */
+ wake_up(&kbdev->pm.zero_active_count_wait);
+ }
+
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_context_idle);
+
+void kbase_pm_suspend(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Suspend vinstr.
+ * This call will block until vinstr is suspended. */
+ kbase_vinstr_suspend(kbdev->vinstr_ctx);
+
+ mutex_lock(&kbdev->pm.lock);
+ KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+ kbdev->pm.suspending = true;
+ mutex_unlock(&kbdev->pm.lock);
+
+ /* From now on, the active count will drop towards zero. Sometimes, it'll
+ * go up briefly before going down again. However, once it reaches zero it
+ * will stay there - guaranteeing that we've idled all pm references */
+
+ /* Suspend job scheduler and associated components, so that it releases all
+ * the PM active count references */
+ kbasep_js_suspend(kbdev);
+
+ /* Wait for the active count to reach zero. This is not the same as
+ * waiting for a power down, since not all policies power down when this
+ * reaches zero. */
+ wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);
+
+ /* NOTE: We synchronize with anything that was just finishing a
+ * kbase_pm_context_idle() call by locking the pm.lock below */
+
+ kbase_hwaccess_pm_suspend(kbdev);
+}
+
+void kbase_pm_resume(struct kbase_device *kbdev)
+{
+ /* MUST happen before any pm_context_active calls occur */
+ kbase_hwaccess_pm_resume(kbdev);
+
+ /* Initial active call, to power on the GPU/cores if needed */
+ kbase_pm_context_active(kbdev);
+
+ /* Resume any blocked atoms (which may cause contexts to be scheduled in
+ * and dependent atoms to run) */
+ kbase_resume_suspended_soft_jobs(kbdev);
+
+ /* Resume the Job Scheduler and associated components, and start running
+ * atoms */
+ kbasep_js_resume(kbdev);
+
+ /* Matching idle call, to power off the GPU/cores if we didn't actually
+ * need it and the policy doesn't want it on */
+ kbase_pm_context_idle(kbdev);
+
+ /* Resume vinstr operation */
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+}
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_pm.h b/drivers/gpu/arm_gpu/mali_kbase_pm.h
new file mode 100644
index 000000000000..37fa2479df74
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_pm.h
@@ -0,0 +1,171 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_pm.h
+ * Power management API definitions
+ */
+
+#ifndef _KBASE_PM_H_
+#define _KBASE_PM_H_
+
+#include "mali_kbase_hwaccess_pm.h"
+
+#define PM_ENABLE_IRQS 0x01
+#define PM_HW_ISSUES_DETECT 0x02
+
+
+/** Initialize the power management framework.
+ *
+ * Must be called before any other power management function
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ *
+ * @return 0 if the power management framework was successfully initialized.
+ */
+int kbase_pm_init(struct kbase_device *kbdev);
+
+/** Power up GPU after all modules have been initialized and interrupt handlers installed.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ *
+ * @param flags Flags to pass on to kbase_pm_init_hw
+ *
+ * @return 0 if powerup was successful.
+ */
+int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags);
+
+/**
+ * Halt the power management framework.
+ * Should ensure that no new interrupts are generated,
+ * but allow any currently running interrupt handlers to complete successfully.
+ * The GPU is forced off by the time this function returns, regardless of
+ * whether or not the active power policy asks for the GPU to be powered off.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_halt(struct kbase_device *kbdev);
+
+/** Terminate the power management framework.
+ *
+ * No power management functions may be called after this
+ * (except @ref kbase_pm_init)
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_term(struct kbase_device *kbdev);
+
+/** Increment the count of active contexts.
+ *
+ * This function should be called when a context is about to submit a job. It informs the active power policy that the
+ * GPU is going to be in use shortly and the policy is expected to start turning on the GPU.
+ *
+ * This function will block until the GPU is available.
+ *
+ * This function ASSERTS if a suspend is occuring/has occurred whilst this is
+ * in use. Use kbase_pm_contect_active_unless_suspending() instead.
+ *
+ * @note a Suspend is only visible to Kernel threads; user-space threads in a
+ * syscall cannot witness a suspend, because they are frozen before the suspend
+ * begins.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_context_active(struct kbase_device *kbdev);
+
+
+/** Handler codes for doing kbase_pm_context_active_handle_suspend() */
+enum kbase_pm_suspend_handler {
+ /** A suspend is not expected/not possible - this is the same as
+ * kbase_pm_context_active() */
+ KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE,
+ /** If we're suspending, fail and don't increase the active count */
+ KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE,
+ /** If we're suspending, succeed and allow the active count to increase iff
+ * it didn't go from 0->1 (i.e., we didn't re-activate the GPU).
+ *
+ * This should only be used when there is a bounded time on the activation
+ * (e.g. guarantee it's going to be idled very soon after) */
+ KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE
+};
+
+/** Suspend 'safe' variant of kbase_pm_context_active()
+ *
+ * If a suspend is in progress, this allows for various different ways of
+ * handling the suspend. Refer to @ref enum kbase_pm_suspend_handler for details.
+ *
+ * We returns a status code indicating whether we're allowed to keep the GPU
+ * active during the suspend, depending on the handler code. If the status code
+ * indicates a failure, the caller must abort whatever operation it was
+ * attempting, and potentially queue it up for after the OS has resumed.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ * @param suspend_handler The handler code for how to handle a suspend that might occur
+ * @return zero Indicates success
+ * @return non-zero Indicates failure due to the system being suspending/suspended.
+ */
+int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler);
+
+/** Decrement the reference count of active contexts.
+ *
+ * This function should be called when a context becomes idle. After this call the GPU may be turned off by the power
+ * policy so the calling code should ensure that it does not access the GPU's registers.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_context_idle(struct kbase_device *kbdev);
+
+/**
+ * Suspend the GPU and prevent any further register accesses to it from Kernel
+ * threads.
+ *
+ * This is called in response to an OS suspend event, and calls into the various
+ * kbase components to complete the suspend.
+ *
+ * @note the mechanisms used here rely on all user-space threads being frozen
+ * by the OS before we suspend. Otherwise, an IOCTL could occur that powers up
+ * the GPU e.g. via atom submission.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_suspend(struct kbase_device *kbdev);
+
+/**
+ * Resume the GPU, allow register accesses to it, and resume running atoms on
+ * the GPU.
+ *
+ * This is called in response to an OS resume event, and calls into the various
+ * kbase components to complete the resume.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_resume(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_vsync_callback - vsync callback
+ *
+ * @buffer_updated: 1 if a new frame was displayed, 0 otherwise
+ * @data: Pointer to the kbase device as returned by kbase_find_device()
+ *
+ * Callback function used to notify the power management code that a vsync has
+ * occurred on the display.
+ */
+void kbase_pm_vsync_callback(int buffer_updated, void *data);
+
+#endif /* _KBASE_PM_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_profiling_gator_api.h b/drivers/gpu/arm_gpu/mali_kbase_profiling_gator_api.h
new file mode 100644
index 000000000000..7fb674eded37
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_profiling_gator_api.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_profiling_gator_api.h
+ * Model interface
+ */
+
+#ifndef _KBASE_PROFILING_GATOR_API_H_
+#define _KBASE_PROFILING_GATOR_API_H_
+
+/*
+ * List of possible actions to be controlled by Streamline.
+ * The following numbers are used by gator to control
+ * the frame buffer dumping and s/w counter reporting.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_COUNTER_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+#define FBDUMP_CONTROL_MAX (5)
+#define FBDUMP_CONTROL_MIN FBDUMP_CONTROL_ENABLE
+
+void _mali_profiling_control(u32 action, u32 value);
+
+#endif /* _KBASE_PROFILING_GATOR_API */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_regs_history_debugfs.c b/drivers/gpu/arm_gpu/mali_kbase_regs_history_debugfs.c
new file mode 100644
index 000000000000..c970650069cd
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_regs_history_debugfs.c
@@ -0,0 +1,130 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include "mali_kbase.h"
+
+#include "mali_kbase_regs_history_debugfs.h"
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
+
+#include <linux/debugfs.h>
+
+
+static int regs_history_size_get(void *data, u64 *val)
+{
+ struct kbase_io_history *const h = data;
+
+ *val = h->size;
+
+ return 0;
+}
+
+static int regs_history_size_set(void *data, u64 val)
+{
+ struct kbase_io_history *const h = data;
+
+ return kbase_io_history_resize(h, (u16)val);
+}
+
+
+DEFINE_SIMPLE_ATTRIBUTE(regs_history_size_fops,
+ regs_history_size_get,
+ regs_history_size_set,
+ "%llu\n");
+
+
+/**
+ * regs_history_show - show callback for the register access history file.
+ *
+ * @sfile: The debugfs entry
+ * @data: Data associated with the entry
+ *
+ * This function is called to dump all recent accesses to the GPU registers.
+ *
+ * @return 0 if successfully prints data in debugfs entry file, failure
+ * otherwise
+ */
+static int regs_history_show(struct seq_file *sfile, void *data)
+{
+ struct kbase_io_history *const h = sfile->private;
+ u16 i;
+ size_t iters;
+ unsigned long flags;
+
+ if (!h->enabled) {
+ seq_puts(sfile, "The register access history is disabled\n");
+ goto out;
+ }
+
+ spin_lock_irqsave(&h->lock, flags);
+
+ iters = (h->size > h->count) ? h->count : h->size;
+ seq_printf(sfile, "Last %zu register accesses of %zu total:\n", iters,
+ h->count);
+ for (i = 0; i < iters; ++i) {
+ struct kbase_io_access *io =
+ &h->buf[(h->count - iters + i) % h->size];
+ char const access = (io->addr & 1) ? 'w' : 'r';
+
+ seq_printf(sfile, "%6i: %c: reg 0x%p val %08x\n", i, access,
+ (void *)(io->addr & ~0x1), io->value);
+ }
+
+ spin_unlock_irqrestore(&h->lock, flags);
+
+out:
+ return 0;
+}
+
+
+/**
+ * regs_history_open - open operation for regs_history debugfs file
+ *
+ * @in: &struct inode pointer
+ * @file: &struct file pointer
+ *
+ * @return file descriptor
+ */
+static int regs_history_open(struct inode *in, struct file *file)
+{
+ return single_open(file, &regs_history_show, in->i_private);
+}
+
+
+static const struct file_operations regs_history_fops = {
+ .open = &regs_history_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+void kbasep_regs_history_debugfs_init(struct kbase_device *kbdev)
+{
+ debugfs_create_bool("regs_history_enabled", S_IRUGO | S_IWUSR,
+ kbdev->mali_debugfs_directory,
+ &kbdev->io_history.enabled);
+ debugfs_create_file("regs_history_size", S_IRUGO | S_IWUSR,
+ kbdev->mali_debugfs_directory,
+ &kbdev->io_history, &regs_history_size_fops);
+ debugfs_create_file("regs_history", S_IRUGO,
+ kbdev->mali_debugfs_directory, &kbdev->io_history,
+ &regs_history_fops);
+}
+
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_regs_history_debugfs.h b/drivers/gpu/arm_gpu/mali_kbase_regs_history_debugfs.h
new file mode 100644
index 000000000000..f10837002330
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_regs_history_debugfs.h
@@ -0,0 +1,50 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Header file for register access history support via debugfs
+ *
+ * This interface is made available via /sys/kernel/debug/mali#/regs_history*.
+ *
+ * Usage:
+ * - regs_history_enabled: whether recording of register accesses is enabled.
+ * Write 'y' to enable, 'n' to disable.
+ * - regs_history_size: size of the register history buffer, must be > 0
+ * - regs_history: return the information about last accesses to the registers.
+ */
+
+#ifndef _KBASE_REGS_HISTORY_DEBUGFS_H
+#define _KBASE_REGS_HISTORY_DEBUGFS_H
+
+struct kbase_device;
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
+
+/**
+ * kbasep_regs_history_debugfs_init - add debugfs entries for register history
+ *
+ * @kbdev: Pointer to kbase_device containing the register history
+ */
+void kbasep_regs_history_debugfs_init(struct kbase_device *kbdev);
+
+#else /* CONFIG_DEBUG_FS */
+
+#define kbasep_regs_history_debugfs_init CSTD_NOP
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /*_KBASE_REGS_HISTORY_DEBUGFS_H*/
diff --git a/drivers/gpu/arm_gpu/mali_kbase_replay.c b/drivers/gpu/arm_gpu/mali_kbase_replay.c
new file mode 100644
index 000000000000..2f8eccfc1757
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_replay.c
@@ -0,0 +1,1166 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_replay.c
+ * Replay soft job handlers
+ */
+
+#include <linux/dma-mapping.h>
+#include <mali_kbase_config.h>
+#include <mali_kbase.h>
+#include <mali_kbase_mem.h>
+#include <mali_kbase_mem_linux.h>
+
+#define JOB_NOT_STARTED 0
+#define JOB_TYPE_NULL (1)
+#define JOB_TYPE_VERTEX (5)
+#define JOB_TYPE_TILER (7)
+#define JOB_TYPE_FUSED (8)
+#define JOB_TYPE_FRAGMENT (9)
+
+#define JOB_HEADER_32_FBD_OFFSET (31*4)
+#define JOB_HEADER_64_FBD_OFFSET (44*4)
+
+#define FBD_POINTER_MASK (~0x3f)
+
+#define SFBD_TILER_OFFSET (48*4)
+
+#define MFBD_TILER_OFFSET (14*4)
+
+#define FBD_HIERARCHY_WEIGHTS 8
+#define FBD_HIERARCHY_MASK_MASK 0x1fff
+
+#define FBD_TYPE 1
+
+#define HIERARCHY_WEIGHTS 13
+
+#define JOB_HEADER_ID_MAX 0xffff
+
+#define JOB_SOURCE_ID(status) (((status) >> 16) & 0xFFFF)
+#define JOB_POLYGON_LIST (0x03)
+
+struct fragment_job {
+ struct job_descriptor_header header;
+
+ u32 x[2];
+ union {
+ u64 _64;
+ u32 _32;
+ } fragment_fbd;
+};
+
+static void dump_job_head(struct kbase_context *kctx, char *head_str,
+ struct job_descriptor_header *job)
+{
+#ifdef CONFIG_MALI_DEBUG
+ dev_dbg(kctx->kbdev->dev, "%s\n", head_str);
+ dev_dbg(kctx->kbdev->dev,
+ "addr = %p\n"
+ "exception_status = %x (Source ID: 0x%x Access: 0x%x Exception: 0x%x)\n"
+ "first_incomplete_task = %x\n"
+ "fault_pointer = %llx\n"
+ "job_descriptor_size = %x\n"
+ "job_type = %x\n"
+ "job_barrier = %x\n"
+ "_reserved_01 = %x\n"
+ "_reserved_02 = %x\n"
+ "_reserved_03 = %x\n"
+ "_reserved_04/05 = %x,%x\n"
+ "job_index = %x\n"
+ "dependencies = %x,%x\n",
+ job, job->exception_status,
+ JOB_SOURCE_ID(job->exception_status),
+ (job->exception_status >> 8) & 0x3,
+ job->exception_status & 0xFF,
+ job->first_incomplete_task,
+ job->fault_pointer, job->job_descriptor_size,
+ job->job_type, job->job_barrier, job->_reserved_01,
+ job->_reserved_02, job->_reserved_03,
+ job->_reserved_04, job->_reserved_05,
+ job->job_index,
+ job->job_dependency_index_1,
+ job->job_dependency_index_2);
+
+ if (job->job_descriptor_size)
+ dev_dbg(kctx->kbdev->dev, "next = %llx\n",
+ job->next_job._64);
+ else
+ dev_dbg(kctx->kbdev->dev, "next = %x\n",
+ job->next_job._32);
+#endif
+}
+
+static int kbasep_replay_reset_sfbd(struct kbase_context *kctx,
+ u64 fbd_address, u64 tiler_heap_free,
+ u16 hierarchy_mask, u32 default_weight)
+{
+ struct {
+ u32 padding_1[1];
+ u32 flags;
+ u64 padding_2[2];
+ u64 heap_free_address;
+ u32 padding[8];
+ u32 weights[FBD_HIERARCHY_WEIGHTS];
+ } *fbd_tiler;
+ struct kbase_vmap_struct map;
+
+ dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
+
+ fbd_tiler = kbase_vmap(kctx, fbd_address + SFBD_TILER_OFFSET,
+ sizeof(*fbd_tiler), &map);
+ if (!fbd_tiler) {
+ dev_err(kctx->kbdev->dev, "kbasep_replay_reset_fbd: failed to map fbd\n");
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_MALI_DEBUG
+ dev_dbg(kctx->kbdev->dev,
+ "FBD tiler:\n"
+ "flags = %x\n"
+ "heap_free_address = %llx\n",
+ fbd_tiler->flags, fbd_tiler->heap_free_address);
+#endif
+ if (hierarchy_mask) {
+ u32 weights[HIERARCHY_WEIGHTS];
+ u16 old_hierarchy_mask = fbd_tiler->flags &
+ FBD_HIERARCHY_MASK_MASK;
+ int i, j = 0;
+
+ for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+ if (old_hierarchy_mask & (1 << i)) {
+ KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+ weights[i] = fbd_tiler->weights[j++];
+ } else {
+ weights[i] = default_weight;
+ }
+ }
+
+
+ dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x New hierarchy mask=%x\n",
+ old_hierarchy_mask, hierarchy_mask);
+
+ for (i = 0; i < HIERARCHY_WEIGHTS; i++)
+ dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
+ i, weights[i]);
+
+ j = 0;
+
+ for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+ if (hierarchy_mask & (1 << i)) {
+ KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+
+ dev_dbg(kctx->kbdev->dev, " Writing hierarchy level %02d (%08x) to %d\n",
+ i, weights[i], j);
+
+ fbd_tiler->weights[j++] = weights[i];
+ }
+ }
+
+ for (; j < FBD_HIERARCHY_WEIGHTS; j++)
+ fbd_tiler->weights[j] = 0;
+
+ fbd_tiler->flags = hierarchy_mask | (1 << 16);
+ }
+
+ fbd_tiler->heap_free_address = tiler_heap_free;
+
+ dev_dbg(kctx->kbdev->dev, "heap_free_address=%llx flags=%x\n",
+ fbd_tiler->heap_free_address, fbd_tiler->flags);
+
+ kbase_vunmap(kctx, &map);
+
+ return 0;
+}
+
+static int kbasep_replay_reset_mfbd(struct kbase_context *kctx,
+ u64 fbd_address, u64 tiler_heap_free,
+ u16 hierarchy_mask, u32 default_weight)
+{
+ struct kbase_vmap_struct map;
+ struct {
+ u32 padding_0;
+ u32 flags;
+ u64 padding_1[2];
+ u64 heap_free_address;
+ u64 padding_2;
+ u32 weights[FBD_HIERARCHY_WEIGHTS];
+ } *fbd_tiler;
+
+ dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
+
+ fbd_tiler = kbase_vmap(kctx, fbd_address + MFBD_TILER_OFFSET,
+ sizeof(*fbd_tiler), &map);
+ if (!fbd_tiler) {
+ dev_err(kctx->kbdev->dev,
+ "kbasep_replay_reset_fbd: failed to map fbd\n");
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_MALI_DEBUG
+ dev_dbg(kctx->kbdev->dev, "FBD tiler:\n"
+ "flags = %x\n"
+ "heap_free_address = %llx\n",
+ fbd_tiler->flags,
+ fbd_tiler->heap_free_address);
+#endif
+ if (hierarchy_mask) {
+ u32 weights[HIERARCHY_WEIGHTS];
+ u16 old_hierarchy_mask = (fbd_tiler->flags) &
+ FBD_HIERARCHY_MASK_MASK;
+ int i, j = 0;
+
+ for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+ if (old_hierarchy_mask & (1 << i)) {
+ KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+ weights[i] = fbd_tiler->weights[j++];
+ } else {
+ weights[i] = default_weight;
+ }
+ }
+
+
+ dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x New hierarchy mask=%x\n",
+ old_hierarchy_mask, hierarchy_mask);
+
+ for (i = 0; i < HIERARCHY_WEIGHTS; i++)
+ dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
+ i, weights[i]);
+
+ j = 0;
+
+ for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+ if (hierarchy_mask & (1 << i)) {
+ KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+
+ dev_dbg(kctx->kbdev->dev,
+ " Writing hierarchy level %02d (%08x) to %d\n",
+ i, weights[i], j);
+
+ fbd_tiler->weights[j++] = weights[i];
+ }
+ }
+
+ for (; j < FBD_HIERARCHY_WEIGHTS; j++)
+ fbd_tiler->weights[j] = 0;
+
+ fbd_tiler->flags = hierarchy_mask | (1 << 16);
+ }
+
+ fbd_tiler->heap_free_address = tiler_heap_free;
+
+ kbase_vunmap(kctx, &map);
+
+ return 0;
+}
+
+/**
+ * @brief Reset the status of an FBD pointed to by a tiler job
+ *
+ * This performs two functions :
+ * - Set the hierarchy mask
+ * - Reset the tiler free heap address
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] job_header Address of job header to reset.
+ * @param[in] tiler_heap_free The value to reset Tiler Heap Free to
+ * @param[in] hierarchy_mask The hierarchy mask to use
+ * @param[in] default_weight Default hierarchy weight to write when no other
+ * weight is given in the FBD
+ * @param[in] job_64 true if this job is using 64-bit
+ * descriptors
+ *
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_reset_tiler_job(struct kbase_context *kctx,
+ u64 job_header, u64 tiler_heap_free,
+ u16 hierarchy_mask, u32 default_weight, bool job_64)
+{
+ struct kbase_vmap_struct map;
+ u64 fbd_address;
+
+ if (job_64) {
+ u64 *job_ext;
+
+ job_ext = kbase_vmap(kctx,
+ job_header + JOB_HEADER_64_FBD_OFFSET,
+ sizeof(*job_ext), &map);
+
+ if (!job_ext) {
+ dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
+ return -EINVAL;
+ }
+
+ fbd_address = *job_ext;
+
+ kbase_vunmap(kctx, &map);
+ } else {
+ u32 *job_ext;
+
+ job_ext = kbase_vmap(kctx,
+ job_header + JOB_HEADER_32_FBD_OFFSET,
+ sizeof(*job_ext), &map);
+
+ if (!job_ext) {
+ dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
+ return -EINVAL;
+ }
+
+ fbd_address = *job_ext;
+
+ kbase_vunmap(kctx, &map);
+ }
+
+ if (fbd_address & FBD_TYPE) {
+ return kbasep_replay_reset_mfbd(kctx,
+ fbd_address & FBD_POINTER_MASK,
+ tiler_heap_free,
+ hierarchy_mask,
+ default_weight);
+ } else {
+ return kbasep_replay_reset_sfbd(kctx,
+ fbd_address & FBD_POINTER_MASK,
+ tiler_heap_free,
+ hierarchy_mask,
+ default_weight);
+ }
+}
+
+/**
+ * @brief Reset the status of a job
+ *
+ * This performs the following functions :
+ *
+ * - Reset the Job Status field of each job to NOT_STARTED.
+ * - Set the Job Type field of any Vertex Jobs to Null Job.
+ * - For any jobs using an FBD, set the Tiler Heap Free field to the value of
+ * the tiler_heap_free parameter, and set the hierarchy level mask to the
+ * hier_mask parameter.
+ * - Offset HW dependencies by the hw_job_id_offset parameter
+ * - Set the Perform Job Barrier flag if this job is the first in the chain
+ * - Read the address of the next job header
+ *
+ * @param[in] kctx Context pointer
+ * @param[in,out] job_header Address of job header to reset. Set to address
+ * of next job header on exit.
+ * @param[in] prev_jc Previous job chain to link to, if this job is
+ * the last in the chain.
+ * @param[in] hw_job_id_offset Offset for HW job IDs
+ * @param[in] tiler_heap_free The value to reset Tiler Heap Free to
+ * @param[in] hierarchy_mask The hierarchy mask to use
+ * @param[in] default_weight Default hierarchy weight to write when no other
+ * weight is given in the FBD
+ * @param[in] first_in_chain true if this job is the first in the chain
+ * @param[in] fragment_chain true if this job is in the fragment chain
+ *
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_reset_job(struct kbase_context *kctx,
+ u64 *job_header, u64 prev_jc,
+ u64 tiler_heap_free, u16 hierarchy_mask,
+ u32 default_weight, u16 hw_job_id_offset,
+ bool first_in_chain, bool fragment_chain)
+{
+ struct fragment_job *frag_job;
+ struct job_descriptor_header *job;
+ u64 new_job_header;
+ struct kbase_vmap_struct map;
+
+ frag_job = kbase_vmap(kctx, *job_header, sizeof(*frag_job), &map);
+ if (!frag_job) {
+ dev_err(kctx->kbdev->dev,
+ "kbasep_replay_parse_jc: failed to map jc\n");
+ return -EINVAL;
+ }
+ job = &frag_job->header;
+
+ dump_job_head(kctx, "Job header:", job);
+
+ if (job->exception_status == JOB_NOT_STARTED && !fragment_chain) {
+ dev_err(kctx->kbdev->dev, "Job already not started\n");
+ goto out_unmap;
+ }
+ job->exception_status = JOB_NOT_STARTED;
+
+ if (job->job_type == JOB_TYPE_VERTEX)
+ job->job_type = JOB_TYPE_NULL;
+
+ if (job->job_type == JOB_TYPE_FUSED) {
+ dev_err(kctx->kbdev->dev, "Fused jobs can not be replayed\n");
+ goto out_unmap;
+ }
+
+ if (first_in_chain)
+ job->job_barrier = 1;
+
+ if ((job->job_dependency_index_1 + hw_job_id_offset) >
+ JOB_HEADER_ID_MAX ||
+ (job->job_dependency_index_2 + hw_job_id_offset) >
+ JOB_HEADER_ID_MAX ||
+ (job->job_index + hw_job_id_offset) > JOB_HEADER_ID_MAX) {
+ dev_err(kctx->kbdev->dev,
+ "Job indicies/dependencies out of valid range\n");
+ goto out_unmap;
+ }
+
+ if (job->job_dependency_index_1)
+ job->job_dependency_index_1 += hw_job_id_offset;
+ if (job->job_dependency_index_2)
+ job->job_dependency_index_2 += hw_job_id_offset;
+
+ job->job_index += hw_job_id_offset;
+
+ if (job->job_descriptor_size) {
+ new_job_header = job->next_job._64;
+ if (!job->next_job._64)
+ job->next_job._64 = prev_jc;
+ } else {
+ new_job_header = job->next_job._32;
+ if (!job->next_job._32)
+ job->next_job._32 = prev_jc;
+ }
+ dump_job_head(kctx, "Updated to:", job);
+
+ if (job->job_type == JOB_TYPE_TILER) {
+ bool job_64 = job->job_descriptor_size != 0;
+
+ if (kbasep_replay_reset_tiler_job(kctx, *job_header,
+ tiler_heap_free, hierarchy_mask,
+ default_weight, job_64) != 0)
+ goto out_unmap;
+
+ } else if (job->job_type == JOB_TYPE_FRAGMENT) {
+ u64 fbd_address;
+
+ if (job->job_descriptor_size)
+ fbd_address = frag_job->fragment_fbd._64;
+ else
+ fbd_address = (u64)frag_job->fragment_fbd._32;
+
+ if (fbd_address & FBD_TYPE) {
+ if (kbasep_replay_reset_mfbd(kctx,
+ fbd_address & FBD_POINTER_MASK,
+ tiler_heap_free,
+ hierarchy_mask,
+ default_weight) != 0)
+ goto out_unmap;
+ } else {
+ if (kbasep_replay_reset_sfbd(kctx,
+ fbd_address & FBD_POINTER_MASK,
+ tiler_heap_free,
+ hierarchy_mask,
+ default_weight) != 0)
+ goto out_unmap;
+ }
+ }
+
+ kbase_vunmap(kctx, &map);
+
+ *job_header = new_job_header;
+
+ return 0;
+
+out_unmap:
+ kbase_vunmap(kctx, &map);
+ return -EINVAL;
+}
+
+/**
+ * @brief Find the highest job ID in a job chain
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] jc Job chain start address
+ * @param[out] hw_job_id Highest job ID in chain
+ *
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_find_hw_job_id(struct kbase_context *kctx,
+ u64 jc, u16 *hw_job_id)
+{
+ while (jc) {
+ struct job_descriptor_header *job;
+ struct kbase_vmap_struct map;
+
+ dev_dbg(kctx->kbdev->dev,
+ "kbasep_replay_find_hw_job_id: parsing jc=%llx\n", jc);
+
+ job = kbase_vmap(kctx, jc, sizeof(*job), &map);
+ if (!job) {
+ dev_err(kctx->kbdev->dev, "failed to map jc\n");
+
+ return -EINVAL;
+ }
+
+ if (job->job_index > *hw_job_id)
+ *hw_job_id = job->job_index;
+
+ if (job->job_descriptor_size)
+ jc = job->next_job._64;
+ else
+ jc = job->next_job._32;
+
+ kbase_vunmap(kctx, &map);
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Reset the status of a number of jobs
+ *
+ * This function walks the provided job chain, and calls
+ * kbasep_replay_reset_job for each job. It also links the job chain to the
+ * provided previous job chain.
+ *
+ * The function will fail if any of the jobs passed already have status of
+ * NOT_STARTED.
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] jc Job chain to be processed
+ * @param[in] prev_jc Job chain to be added to. May be NULL
+ * @param[in] tiler_heap_free The value to reset Tiler Heap Free to
+ * @param[in] hierarchy_mask The hierarchy mask to use
+ * @param[in] default_weight Default hierarchy weight to write when no other
+ * weight is given in the FBD
+ * @param[in] hw_job_id_offset Offset for HW job IDs
+ * @param[in] fragment_chain true if this chain is the fragment chain
+ *
+ * @return 0 on success, error code otherwise
+ */
+static int kbasep_replay_parse_jc(struct kbase_context *kctx,
+ u64 jc, u64 prev_jc,
+ u64 tiler_heap_free, u16 hierarchy_mask,
+ u32 default_weight, u16 hw_job_id_offset,
+ bool fragment_chain)
+{
+ bool first_in_chain = true;
+ int nr_jobs = 0;
+
+ dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: jc=%llx hw_job_id=%x\n",
+ jc, hw_job_id_offset);
+
+ while (jc) {
+ dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: parsing jc=%llx\n", jc);
+
+ if (kbasep_replay_reset_job(kctx, &jc, prev_jc,
+ tiler_heap_free, hierarchy_mask,
+ default_weight, hw_job_id_offset,
+ first_in_chain, fragment_chain) != 0)
+ return -EINVAL;
+
+ first_in_chain = false;
+
+ nr_jobs++;
+ if (fragment_chain &&
+ nr_jobs >= BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT) {
+ dev_err(kctx->kbdev->dev,
+ "Exceeded maximum number of jobs in fragment chain\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Reset the status of a replay job, and set up dependencies
+ *
+ * This performs the actions to allow the replay job to be re-run following
+ * completion of the passed dependency.
+ *
+ * @param[in] katom The atom to be reset
+ * @param[in] dep_atom The dependency to be attached to the atom
+ */
+static void kbasep_replay_reset_softjob(struct kbase_jd_atom *katom,
+ struct kbase_jd_atom *dep_atom)
+{
+ katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+ kbase_jd_katom_dep_set(&katom->dep[0], dep_atom, BASE_JD_DEP_TYPE_DATA);
+ list_add_tail(&katom->dep_item[0], &dep_atom->dep_head[0]);
+}
+
+/**
+ * @brief Allocate an unused katom
+ *
+ * This will search the provided context for an unused katom, and will mark it
+ * as KBASE_JD_ATOM_STATE_QUEUED.
+ *
+ * If no atoms are available then the function will fail.
+ *
+ * @param[in] kctx Context pointer
+ * @return An atom ID, or -1 on failure
+ */
+static int kbasep_allocate_katom(struct kbase_context *kctx)
+{
+ struct kbase_jd_context *jctx = &kctx->jctx;
+ int i;
+
+ for (i = BASE_JD_ATOM_COUNT-1; i > 0; i--) {
+ if (jctx->atoms[i].status == KBASE_JD_ATOM_STATE_UNUSED) {
+ jctx->atoms[i].status = KBASE_JD_ATOM_STATE_QUEUED;
+ dev_dbg(kctx->kbdev->dev,
+ "kbasep_allocate_katom: Allocated atom %d\n",
+ i);
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+/**
+ * @brief Release a katom
+ *
+ * This will mark the provided atom as available, and remove any dependencies.
+ *
+ * For use on error path.
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] atom_id ID of atom to release
+ */
+static void kbasep_release_katom(struct kbase_context *kctx, int atom_id)
+{
+ struct kbase_jd_context *jctx = &kctx->jctx;
+
+ dev_dbg(kctx->kbdev->dev, "kbasep_release_katom: Released atom %d\n",
+ atom_id);
+
+ while (!list_empty(&jctx->atoms[atom_id].dep_head[0]))
+ list_del(jctx->atoms[atom_id].dep_head[0].next);
+
+ while (!list_empty(&jctx->atoms[atom_id].dep_head[1]))
+ list_del(jctx->atoms[atom_id].dep_head[1].next);
+
+ jctx->atoms[atom_id].status = KBASE_JD_ATOM_STATE_UNUSED;
+}
+
+static void kbasep_replay_create_atom(struct kbase_context *kctx,
+ struct base_jd_atom_v2 *atom,
+ int atom_nr,
+ base_jd_prio prio)
+{
+ atom->nr_extres = 0;
+ atom->extres_list = 0;
+ atom->device_nr = 0;
+ atom->prio = prio;
+ atom->atom_number = atom_nr;
+
+ base_jd_atom_dep_set(&atom->pre_dep[0], 0 , BASE_JD_DEP_TYPE_INVALID);
+ base_jd_atom_dep_set(&atom->pre_dep[1], 0 , BASE_JD_DEP_TYPE_INVALID);
+
+ atom->udata.blob[0] = 0;
+ atom->udata.blob[1] = 0;
+}
+
+/**
+ * @brief Create two atoms for the purpose of replaying jobs
+ *
+ * Two atoms are allocated and created. The jc pointer is not set at this
+ * stage. The second atom has a dependency on the first. The remaining fields
+ * are set up as follows :
+ *
+ * - No external resources. Any required external resources will be held by the
+ * replay atom.
+ * - device_nr is set to 0. This is not relevant as
+ * BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set.
+ * - Priority is inherited from the replay job.
+ *
+ * @param[out] t_atom Atom to use for tiler jobs
+ * @param[out] f_atom Atom to use for fragment jobs
+ * @param[in] prio Priority of new atom (inherited from replay soft
+ * job)
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_create_atoms(struct kbase_context *kctx,
+ struct base_jd_atom_v2 *t_atom,
+ struct base_jd_atom_v2 *f_atom,
+ base_jd_prio prio)
+{
+ int t_atom_nr, f_atom_nr;
+
+ t_atom_nr = kbasep_allocate_katom(kctx);
+ if (t_atom_nr < 0) {
+ dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
+ return -EINVAL;
+ }
+
+ f_atom_nr = kbasep_allocate_katom(kctx);
+ if (f_atom_nr < 0) {
+ dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
+ kbasep_release_katom(kctx, t_atom_nr);
+ return -EINVAL;
+ }
+
+ kbasep_replay_create_atom(kctx, t_atom, t_atom_nr, prio);
+ kbasep_replay_create_atom(kctx, f_atom, f_atom_nr, prio);
+
+ base_jd_atom_dep_set(&f_atom->pre_dep[0], t_atom_nr , BASE_JD_DEP_TYPE_DATA);
+
+ return 0;
+}
+
+#ifdef CONFIG_MALI_DEBUG
+static void payload_dump(struct kbase_context *kctx, base_jd_replay_payload *payload)
+{
+ u64 next;
+
+ dev_dbg(kctx->kbdev->dev, "Tiler jc list :\n");
+ next = payload->tiler_jc_list;
+
+ while (next) {
+ struct kbase_vmap_struct map;
+ base_jd_replay_jc *jc_struct;
+
+ jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &map);
+
+ if (!jc_struct)
+ return;
+
+ dev_dbg(kctx->kbdev->dev, "* jc_struct=%p jc=%llx next=%llx\n",
+ jc_struct, jc_struct->jc, jc_struct->next);
+
+ next = jc_struct->next;
+
+ kbase_vunmap(kctx, &map);
+ }
+}
+#endif
+
+/**
+ * @brief Parse a base_jd_replay_payload provided by userspace
+ *
+ * This will read the payload from userspace, and parse the job chains.
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] replay_atom Replay soft job atom
+ * @param[in] t_atom Atom to use for tiler jobs
+ * @param[in] f_atom Atom to use for fragment jobs
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_parse_payload(struct kbase_context *kctx,
+ struct kbase_jd_atom *replay_atom,
+ struct base_jd_atom_v2 *t_atom,
+ struct base_jd_atom_v2 *f_atom)
+{
+ base_jd_replay_payload *payload = NULL;
+ u64 next;
+ u64 prev_jc = 0;
+ u16 hw_job_id_offset = 0;
+ int ret = -EINVAL;
+ struct kbase_vmap_struct map;
+
+ dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: replay_atom->jc = %llx sizeof(payload) = %zu\n",
+ replay_atom->jc, sizeof(payload));
+
+ payload = kbase_vmap(kctx, replay_atom->jc, sizeof(*payload), &map);
+ if (!payload) {
+ dev_err(kctx->kbdev->dev, "kbasep_replay_parse_payload: failed to map payload into kernel space\n");
+ return -EINVAL;
+ }
+
+#ifdef BASE_LEGACY_UK10_2_SUPPORT
+ if (KBASE_API_VERSION(10, 3) > replay_atom->kctx->api_version) {
+ base_jd_replay_payload_uk10_2 *payload_uk10_2;
+ u16 tiler_core_req;
+ u16 fragment_core_req;
+
+ payload_uk10_2 = (base_jd_replay_payload_uk10_2 *) payload;
+ memcpy(&tiler_core_req, &payload_uk10_2->tiler_core_req,
+ sizeof(tiler_core_req));
+ memcpy(&fragment_core_req, &payload_uk10_2->fragment_core_req,
+ sizeof(fragment_core_req));
+ payload->tiler_core_req = (u32)(tiler_core_req & 0x7fff);
+ payload->fragment_core_req = (u32)(fragment_core_req & 0x7fff);
+ }
+#endif /* BASE_LEGACY_UK10_2_SUPPORT */
+
+#ifdef CONFIG_MALI_DEBUG
+ dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
+ dev_dbg(kctx->kbdev->dev, "Payload structure:\n"
+ "tiler_jc_list = %llx\n"
+ "fragment_jc = %llx\n"
+ "tiler_heap_free = %llx\n"
+ "fragment_hierarchy_mask = %x\n"
+ "tiler_hierarchy_mask = %x\n"
+ "hierarchy_default_weight = %x\n"
+ "tiler_core_req = %x\n"
+ "fragment_core_req = %x\n",
+ payload->tiler_jc_list,
+ payload->fragment_jc,
+ payload->tiler_heap_free,
+ payload->fragment_hierarchy_mask,
+ payload->tiler_hierarchy_mask,
+ payload->hierarchy_default_weight,
+ payload->tiler_core_req,
+ payload->fragment_core_req);
+ payload_dump(kctx, payload);
+#endif
+ t_atom->core_req = payload->tiler_core_req | BASEP_JD_REQ_EVENT_NEVER;
+ f_atom->core_req = payload->fragment_core_req | BASEP_JD_REQ_EVENT_NEVER;
+
+ /* Sanity check core requirements*/
+ if ((t_atom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_T ||
+ (f_atom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_FS ||
+ t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES ||
+ f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+
+ int t_atom_type = t_atom->core_req & BASE_JD_REQ_ATOM_TYPE & ~BASE_JD_REQ_COHERENT_GROUP;
+ int f_atom_type = f_atom->core_req & BASE_JD_REQ_ATOM_TYPE & ~BASE_JD_REQ_COHERENT_GROUP & ~BASE_JD_REQ_FS_AFBC;
+ int t_has_ex_res = t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES;
+ int f_has_ex_res = f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES;
+
+ if (t_atom_type != BASE_JD_REQ_T) {
+ dev_err(kctx->kbdev->dev, "Invalid core requirement: Tiler atom not a tiler job. Was: 0x%x\n Expected: 0x%x",
+ t_atom_type, BASE_JD_REQ_T);
+ }
+ if (f_atom_type != BASE_JD_REQ_FS) {
+ dev_err(kctx->kbdev->dev, "Invalid core requirement: Fragment shader atom not a fragment shader. Was 0x%x Expected: 0x%x\n",
+ f_atom_type, BASE_JD_REQ_FS);
+ }
+ if (t_has_ex_res) {
+ dev_err(kctx->kbdev->dev, "Invalid core requirement: Tiler atom has external resources.\n");
+ }
+ if (f_has_ex_res) {
+ dev_err(kctx->kbdev->dev, "Invalid core requirement: Fragment shader atom has external resources.\n");
+ }
+
+ goto out;
+ }
+
+ /* Process tiler job chains */
+ next = payload->tiler_jc_list;
+ if (!next) {
+ dev_err(kctx->kbdev->dev, "Invalid tiler JC list\n");
+ goto out;
+ }
+
+ while (next) {
+ base_jd_replay_jc *jc_struct;
+ struct kbase_vmap_struct jc_map;
+ u64 jc;
+
+ jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &jc_map);
+
+ if (!jc_struct) {
+ dev_err(kctx->kbdev->dev, "Failed to map jc struct\n");
+ goto out;
+ }
+
+ jc = jc_struct->jc;
+ next = jc_struct->next;
+ if (next)
+ jc_struct->jc = 0;
+
+ kbase_vunmap(kctx, &jc_map);
+
+ if (jc) {
+ u16 max_hw_job_id = 0;
+
+ if (kbasep_replay_find_hw_job_id(kctx, jc,
+ &max_hw_job_id) != 0)
+ goto out;
+
+ if (kbasep_replay_parse_jc(kctx, jc, prev_jc,
+ payload->tiler_heap_free,
+ payload->tiler_hierarchy_mask,
+ payload->hierarchy_default_weight,
+ hw_job_id_offset, false) != 0) {
+ goto out;
+ }
+
+ hw_job_id_offset += max_hw_job_id;
+
+ prev_jc = jc;
+ }
+ }
+ t_atom->jc = prev_jc;
+
+ /* Process fragment job chain */
+ f_atom->jc = payload->fragment_jc;
+ if (kbasep_replay_parse_jc(kctx, payload->fragment_jc, 0,
+ payload->tiler_heap_free,
+ payload->fragment_hierarchy_mask,
+ payload->hierarchy_default_weight, 0,
+ true) != 0) {
+ goto out;
+ }
+
+ if (!t_atom->jc || !f_atom->jc) {
+ dev_err(kctx->kbdev->dev, "Invalid payload\n");
+ goto out;
+ }
+
+ dev_dbg(kctx->kbdev->dev, "t_atom->jc=%llx f_atom->jc=%llx\n",
+ t_atom->jc, f_atom->jc);
+ ret = 0;
+
+out:
+ kbase_vunmap(kctx, &map);
+
+ return ret;
+}
+
+static void kbase_replay_process_worker(struct work_struct *data)
+{
+ struct kbase_jd_atom *katom;
+ struct kbase_context *kctx;
+ struct kbase_jd_context *jctx;
+ bool need_to_try_schedule_context = false;
+
+ struct base_jd_atom_v2 t_atom, f_atom;
+ struct kbase_jd_atom *t_katom, *f_katom;
+ base_jd_prio atom_prio;
+
+ katom = container_of(data, struct kbase_jd_atom, work);
+ kctx = katom->kctx;
+ jctx = &kctx->jctx;
+
+ mutex_lock(&jctx->lock);
+
+ atom_prio = kbasep_js_sched_prio_to_atom_prio(katom->sched_priority);
+
+ if (kbasep_replay_create_atoms(
+ kctx, &t_atom, &f_atom, atom_prio) != 0) {
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ goto out;
+ }
+
+ t_katom = &jctx->atoms[t_atom.atom_number];
+ f_katom = &jctx->atoms[f_atom.atom_number];
+
+ if (kbasep_replay_parse_payload(kctx, katom, &t_atom, &f_atom) != 0) {
+ kbasep_release_katom(kctx, t_atom.atom_number);
+ kbasep_release_katom(kctx, f_atom.atom_number);
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ goto out;
+ }
+
+ kbasep_replay_reset_softjob(katom, f_katom);
+
+ need_to_try_schedule_context |= jd_submit_atom(kctx, &t_atom, t_katom);
+ if (t_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
+ dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
+ kbasep_release_katom(kctx, f_atom.atom_number);
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ goto out;
+ }
+ need_to_try_schedule_context |= jd_submit_atom(kctx, &f_atom, f_katom);
+ if (f_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
+ dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ goto out;
+ }
+
+ katom->event_code = BASE_JD_EVENT_DONE;
+
+out:
+ if (katom->event_code != BASE_JD_EVENT_DONE) {
+ kbase_disjoint_state_down(kctx->kbdev);
+
+ need_to_try_schedule_context |= jd_done_nolock(katom, NULL);
+ }
+
+ if (need_to_try_schedule_context)
+ kbase_js_sched_all(kctx->kbdev);
+
+ mutex_unlock(&jctx->lock);
+}
+
+/**
+ * @brief Check job replay fault
+ *
+ * This will read the job payload, checks fault type and source, then decides
+ * whether replay is required.
+ *
+ * @param[in] katom The atom to be processed
+ * @return true (success) if replay required or false on failure.
+ */
+static bool kbase_replay_fault_check(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct device *dev = kctx->kbdev->dev;
+ base_jd_replay_payload *payload;
+ u64 job_header;
+ u64 job_loop_detect;
+ struct job_descriptor_header *job;
+ struct kbase_vmap_struct job_map;
+ struct kbase_vmap_struct map;
+ bool err = false;
+
+ /* Replay job if fault is of type BASE_JD_EVENT_JOB_WRITE_FAULT or
+ * if force_replay is enabled.
+ */
+ if (BASE_JD_EVENT_TERMINATED == katom->event_code) {
+ return false;
+ } else if (BASE_JD_EVENT_JOB_WRITE_FAULT == katom->event_code) {
+ return true;
+ } else if (BASE_JD_EVENT_FORCE_REPLAY == katom->event_code) {
+ katom->event_code = BASE_JD_EVENT_DATA_INVALID_FAULT;
+ return true;
+ } else if (BASE_JD_EVENT_DATA_INVALID_FAULT != katom->event_code) {
+ /* No replay for faults of type other than
+ * BASE_JD_EVENT_DATA_INVALID_FAULT.
+ */
+ return false;
+ }
+
+ /* Job fault is BASE_JD_EVENT_DATA_INVALID_FAULT, now scan fragment jc
+ * to find out whether the source of exception is POLYGON_LIST. Replay
+ * is required if the source of fault is POLYGON_LIST.
+ */
+ payload = kbase_vmap(kctx, katom->jc, sizeof(*payload), &map);
+ if (!payload) {
+ dev_err(dev, "kbase_replay_fault_check: failed to map payload.\n");
+ return false;
+ }
+
+#ifdef CONFIG_MALI_DEBUG
+ dev_dbg(dev, "kbase_replay_fault_check: payload=%p\n", payload);
+ dev_dbg(dev, "\nPayload structure:\n"
+ "fragment_jc = 0x%llx\n"
+ "fragment_hierarchy_mask = 0x%x\n"
+ "fragment_core_req = 0x%x\n",
+ payload->fragment_jc,
+ payload->fragment_hierarchy_mask,
+ payload->fragment_core_req);
+#endif
+ /* Process fragment job chain */
+ job_header = (u64) payload->fragment_jc;
+ job_loop_detect = job_header;
+ while (job_header) {
+ job = kbase_vmap(kctx, job_header, sizeof(*job), &job_map);
+ if (!job) {
+ dev_err(dev, "failed to map jc\n");
+ /* unmap payload*/
+ kbase_vunmap(kctx, &map);
+ return false;
+ }
+
+
+ dump_job_head(kctx, "\njob_head structure:\n", job);
+
+ /* Replay only when the polygon list reader caused the
+ * DATA_INVALID_FAULT */
+ if ((BASE_JD_EVENT_DATA_INVALID_FAULT == katom->event_code) &&
+ (JOB_POLYGON_LIST == JOB_SOURCE_ID(job->exception_status))) {
+ err = true;
+ kbase_vunmap(kctx, &job_map);
+ break;
+ }
+
+ /* Move on to next fragment job in the list */
+ if (job->job_descriptor_size)
+ job_header = job->next_job._64;
+ else
+ job_header = job->next_job._32;
+
+ kbase_vunmap(kctx, &job_map);
+
+ /* Job chain loop detected */
+ if (job_header == job_loop_detect)
+ break;
+ }
+
+ /* unmap payload*/
+ kbase_vunmap(kctx, &map);
+
+ return err;
+}
+
+
+/**
+ * @brief Process a replay job
+ *
+ * Called from kbase_process_soft_job.
+ *
+ * On exit, if the job has completed, katom->event_code will have been updated.
+ * If the job has not completed, and is replaying jobs, then the atom status
+ * will have been reset to KBASE_JD_ATOM_STATE_QUEUED.
+ *
+ * @param[in] katom The atom to be processed
+ * @return false if the atom has completed
+ * true if the atom is replaying jobs
+ */
+bool kbase_replay_process(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct kbase_device *kbdev = kctx->kbdev;
+
+ /* Don't replay this atom if these issues are not present in the
+ * hardware */
+ if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11020) &&
+ !kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11024)) {
+ dev_dbg(kbdev->dev, "Hardware does not need replay workaround");
+
+ /* Signal failure to userspace */
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+
+ return false;
+ }
+
+ if (katom->event_code == BASE_JD_EVENT_DONE) {
+ dev_dbg(kbdev->dev, "Previous job succeeded - not replaying\n");
+
+ if (katom->retry_count)
+ kbase_disjoint_state_down(kbdev);
+
+ return false;
+ }
+
+ if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+ dev_dbg(kbdev->dev, "Not replaying; context is dying\n");
+
+ if (katom->retry_count)
+ kbase_disjoint_state_down(kbdev);
+
+ return false;
+ }
+
+ /* Check job exception type and source before replaying. */
+ if (!kbase_replay_fault_check(katom)) {
+ dev_dbg(kbdev->dev,
+ "Replay cancelled on event %x\n", katom->event_code);
+ /* katom->event_code is already set to the failure code of the
+ * previous job.
+ */
+ return false;
+ }
+
+ dev_warn(kbdev->dev, "Replaying jobs retry=%d\n",
+ katom->retry_count);
+
+ katom->retry_count++;
+
+ if (katom->retry_count > BASEP_JD_REPLAY_LIMIT) {
+ dev_err(kbdev->dev, "Replay exceeded limit - failing jobs\n");
+
+ kbase_disjoint_state_down(kbdev);
+
+ /* katom->event_code is already set to the failure code of the
+ previous job */
+ return false;
+ }
+
+ /* only enter the disjoint state once for the whole time while the replay is ongoing */
+ if (katom->retry_count == 1)
+ kbase_disjoint_state_up(kbdev);
+
+ INIT_WORK(&katom->work, kbase_replay_process_worker);
+ queue_work(kctx->event_workq, &katom->work);
+
+ return true;
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_smc.c b/drivers/gpu/arm_gpu/mali_kbase_smc.c
new file mode 100644
index 000000000000..43175c85988f
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_smc.c
@@ -0,0 +1,74 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifdef CONFIG_ARM64
+
+#include <mali_kbase.h>
+#include <mali_kbase_smc.h>
+
+#include <linux/compiler.h>
+
+static noinline u64 invoke_smc_fid(u64 function_id,
+ u64 arg0, u64 arg1, u64 arg2)
+{
+ register u64 x0 asm("x0") = function_id;
+ register u64 x1 asm("x1") = arg0;
+ register u64 x2 asm("x2") = arg1;
+ register u64 x3 asm("x3") = arg2;
+
+ asm volatile(
+ __asmeq("%0", "x0")
+ __asmeq("%1", "x1")
+ __asmeq("%2", "x2")
+ __asmeq("%3", "x3")
+ "smc #0\n"
+ : "+r" (x0)
+ : "r" (x1), "r" (x2), "r" (x3));
+
+ return x0;
+}
+
+u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2)
+{
+ /* Is fast call (bit 31 set) */
+ KBASE_DEBUG_ASSERT(fid & ~SMC_FAST_CALL);
+ /* bits 16-23 must be zero for fast calls */
+ KBASE_DEBUG_ASSERT((fid & (0xFF << 16)) == 0);
+
+ return invoke_smc_fid(fid, arg0, arg1, arg2);
+}
+
+u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64,
+ u64 arg0, u64 arg1, u64 arg2)
+{
+ u32 fid = 0;
+
+ /* Only the six bits allowed should be used. */
+ KBASE_DEBUG_ASSERT((oen & ~SMC_OEN_MASK) == 0);
+
+ fid |= SMC_FAST_CALL; /* Bit 31: Fast call */
+ if (smc64)
+ fid |= SMC_64; /* Bit 30: 1=SMC64, 0=SMC32 */
+ fid |= oen; /* Bit 29:24: OEN */
+ /* Bit 23:16: Must be zero for fast calls */
+ fid |= (function_number); /* Bit 15:0: function number */
+
+ return kbase_invoke_smc_fid(fid, arg0, arg1, arg2);
+}
+
+#endif /* CONFIG_ARM64 */
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_smc.h b/drivers/gpu/arm_gpu/mali_kbase_smc.h
new file mode 100644
index 000000000000..9bff3d2e8b4d
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_smc.h
@@ -0,0 +1,67 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_SMC_H_
+#define _KBASE_SMC_H_
+
+#ifdef CONFIG_ARM64
+
+#include <mali_kbase.h>
+
+#define SMC_FAST_CALL (1 << 31)
+#define SMC_64 (1 << 30)
+
+#define SMC_OEN_OFFSET 24
+#define SMC_OEN_MASK (0x3F << SMC_OEN_OFFSET) /* 6 bits */
+#define SMC_OEN_SIP (2 << SMC_OEN_OFFSET)
+#define SMC_OEN_STD (4 << SMC_OEN_OFFSET)
+
+
+/**
+ * kbase_invoke_smc_fid - Perform a secure monitor call
+ * @fid: The SMC function to call, see SMC Calling convention.
+ * @arg0: First argument to the SMC.
+ * @arg1: Second argument to the SMC.
+ * @arg2: Third argument to the SMC.
+ *
+ * See SMC Calling Convention for details.
+ *
+ * Return: the return value from the SMC.
+ */
+u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2);
+
+/**
+ * kbase_invoke_smc_fid - Perform a secure monitor call
+ * @oen: Owning Entity number (SIP, STD etc).
+ * @function_number: The function number within the OEN.
+ * @smc64: use SMC64 calling convention instead of SMC32.
+ * @arg0: First argument to the SMC.
+ * @arg1: Second argument to the SMC.
+ * @arg2: Third argument to the SMC.
+ *
+ * See SMC Calling Convention for details.
+ *
+ * Return: the return value from the SMC call.
+ */
+u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64,
+ u64 arg0, u64 arg1, u64 arg2);
+
+#endif /* CONFIG_ARM64 */
+
+#endif /* _KBASE_SMC_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_softjobs.c b/drivers/gpu/arm_gpu/mali_kbase_softjobs.c
new file mode 100644
index 000000000000..29e76a944c42
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_softjobs.c
@@ -0,0 +1,1512 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <mali_kbase.h>
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include <linux/dma-buf.h>
+#include <asm/cacheflush.h>
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#include <mali_kbase_sync.h>
+#endif
+#include <linux/dma-mapping.h>
+#include <mali_base_kernel.h>
+#include <mali_kbase_hwaccess_time.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_tlstream.h>
+#include <linux/version.h>
+#include <linux/ktime.h>
+#include <linux/pfn.h>
+#include <linux/sched.h>
+
+/* Mask to check cache alignment of data structures */
+#define KBASE_CACHE_ALIGNMENT_MASK ((1<<L1_CACHE_SHIFT)-1)
+
+/**
+ * @file mali_kbase_softjobs.c
+ *
+ * This file implements the logic behind software only jobs that are
+ * executed within the driver rather than being handed over to the GPU.
+ */
+
+static void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ unsigned long lflags;
+
+ spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+ list_add_tail(&katom->queue, &kctx->waiting_soft_jobs);
+ spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ unsigned long lflags;
+
+ spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+ list_del(&katom->queue);
+ spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+static void kbasep_add_waiting_with_timeout(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+
+ /* Record the start time of this atom so we could cancel it at
+ * the right time.
+ */
+ katom->start_timestamp = ktime_get();
+
+ /* Add the atom to the waiting list before the timer is
+ * (re)started to make sure that it gets processed.
+ */
+ kbasep_add_waiting_soft_job(katom);
+
+ /* Schedule timeout of this atom after a period if it is not active */
+ if (!timer_pending(&kctx->soft_job_timeout)) {
+ int timeout_ms = atomic_read(
+ &kctx->kbdev->js_data.soft_job_timeout_ms);
+ mod_timer(&kctx->soft_job_timeout,
+ jiffies + msecs_to_jiffies(timeout_ms));
+ }
+}
+
+static int kbasep_read_soft_event_status(
+ struct kbase_context *kctx, u64 evt, unsigned char *status)
+{
+ unsigned char *mapped_evt;
+ struct kbase_vmap_struct map;
+
+ mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
+ if (!mapped_evt)
+ return -EFAULT;
+
+ *status = *mapped_evt;
+
+ kbase_vunmap(kctx, &map);
+
+ return 0;
+}
+
+static int kbasep_write_soft_event_status(
+ struct kbase_context *kctx, u64 evt, unsigned char new_status)
+{
+ unsigned char *mapped_evt;
+ struct kbase_vmap_struct map;
+
+ if ((new_status != BASE_JD_SOFT_EVENT_SET) &&
+ (new_status != BASE_JD_SOFT_EVENT_RESET))
+ return -EINVAL;
+
+ mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
+ if (!mapped_evt)
+ return -EFAULT;
+
+ *mapped_evt = new_status;
+
+ kbase_vunmap(kctx, &map);
+
+ return 0;
+}
+
+static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
+{
+ struct kbase_vmap_struct map;
+ void *user_result;
+ struct timespec ts;
+ struct base_dump_cpu_gpu_counters data;
+ u64 system_time;
+ u64 cycle_counter;
+ u64 jc = katom->jc;
+ struct kbase_context *kctx = katom->kctx;
+ int pm_active_err;
+
+ memset(&data, 0, sizeof(data));
+
+ /* Take the PM active reference as late as possible - otherwise, it could
+ * delay suspend until we process the atom (which may be at the end of a
+ * long chain of dependencies */
+ pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
+ if (pm_active_err) {
+ struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
+
+ /* We're suspended - queue this on the list of suspended jobs
+ * Use dep_item[1], because dep_item[0] was previously in use
+ * for 'waiting_soft_jobs'.
+ */
+ mutex_lock(&js_devdata->runpool_mutex);
+ list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ /* Also adding this to the list of waiting soft job */
+ kbasep_add_waiting_soft_job(katom);
+
+ return pm_active_err;
+ }
+
+ kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time,
+ &ts);
+
+ kbase_pm_context_idle(kctx->kbdev);
+
+ data.sec = ts.tv_sec;
+ data.usec = ts.tv_nsec / 1000;
+ data.system_time = system_time;
+ data.cycle_counter = cycle_counter;
+
+ /* Assume this atom will be cancelled until we know otherwise */
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+ /* GPU_WR access is checked on the range for returning the result to
+ * userspace for the following reasons:
+ * - security, this is currently how imported user bufs are checked.
+ * - userspace ddk guaranteed to assume region was mapped as GPU_WR */
+ user_result = kbase_vmap_prot(kctx, jc, sizeof(data), KBASE_REG_GPU_WR, &map);
+ if (!user_result)
+ return 0;
+
+ memcpy(user_result, &data, sizeof(data));
+
+ kbase_vunmap(kctx, &map);
+
+ /* Atom was fine - mark it as done */
+ katom->event_code = BASE_JD_EVENT_DONE;
+
+ return 0;
+}
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+/* Called by the explicit fence mechanism when a fence wait has completed */
+void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+
+ mutex_lock(&kctx->jctx.lock);
+ kbasep_remove_waiting_soft_job(katom);
+ kbase_finish_soft_job(katom);
+ if (jd_done_nolock(katom, NULL))
+ kbase_js_sched_all(kctx->kbdev);
+ mutex_unlock(&kctx->jctx.lock);
+}
+#endif
+
+static void kbasep_soft_event_complete_job(struct work_struct *work)
+{
+ struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
+ work);
+ struct kbase_context *kctx = katom->kctx;
+ int resched;
+
+ mutex_lock(&kctx->jctx.lock);
+ resched = jd_done_nolock(katom, NULL);
+ mutex_unlock(&kctx->jctx.lock);
+
+ if (resched)
+ kbase_js_sched_all(kctx->kbdev);
+}
+
+void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)
+{
+ int cancel_timer = 1;
+ struct list_head *entry, *tmp;
+ unsigned long lflags;
+
+ spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+ list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
+ struct kbase_jd_atom *katom = list_entry(
+ entry, struct kbase_jd_atom, queue);
+
+ switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+ case BASE_JD_REQ_SOFT_EVENT_WAIT:
+ if (katom->jc == evt) {
+ list_del(&katom->queue);
+
+ katom->event_code = BASE_JD_EVENT_DONE;
+ INIT_WORK(&katom->work,
+ kbasep_soft_event_complete_job);
+ queue_work(kctx->jctx.job_done_wq,
+ &katom->work);
+ } else {
+ /* There are still other waiting jobs, we cannot
+ * cancel the timer yet.
+ */
+ cancel_timer = 0;
+ }
+ break;
+#ifdef CONFIG_MALI_FENCE_DEBUG
+ case BASE_JD_REQ_SOFT_FENCE_WAIT:
+ /* Keep the timer running if fence debug is enabled and
+ * there are waiting fence jobs.
+ */
+ cancel_timer = 0;
+ break;
+#endif
+ }
+ }
+
+ if (cancel_timer)
+ del_timer(&kctx->soft_job_timeout);
+ spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+static void kbase_fence_debug_check_atom(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct device *dev = kctx->kbdev->dev;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct kbase_jd_atom *dep;
+
+ list_for_each_entry(dep, &katom->dep_head[i], dep_item[i]) {
+ if (dep->status == KBASE_JD_ATOM_STATE_UNUSED ||
+ dep->status == KBASE_JD_ATOM_STATE_COMPLETED)
+ continue;
+
+ if ((dep->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+ == BASE_JD_REQ_SOFT_FENCE_TRIGGER) {
+ /* Found blocked trigger fence. */
+ struct kbase_sync_fence_info info;
+
+ if (!kbase_sync_fence_in_info_get(dep, &info)) {
+ dev_warn(dev,
+ "\tVictim trigger atom %d fence [%p] %s: %s\n",
+ kbase_jd_atom_id(kctx, dep),
+ info.fence,
+ info.name,
+ kbase_sync_status_string(info.status));
+ }
+ }
+
+ kbase_fence_debug_check_atom(dep);
+ }
+ }
+}
+
+static void kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct device *dev = katom->kctx->kbdev->dev;
+ int timeout_ms = atomic_read(&kctx->kbdev->js_data.soft_job_timeout_ms);
+ unsigned long lflags;
+ struct kbase_sync_fence_info info;
+
+ spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+
+ if (kbase_sync_fence_in_info_get(katom, &info)) {
+ /* Fence must have signaled just after timeout. */
+ spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+ return;
+ }
+
+ dev_warn(dev, "ctx %d_%d: Atom %d still waiting for fence [%p] after %dms\n",
+ kctx->tgid, kctx->id,
+ kbase_jd_atom_id(kctx, katom),
+ info.fence, timeout_ms);
+ dev_warn(dev, "\tGuilty fence [%p] %s: %s\n",
+ info.fence, info.name,
+ kbase_sync_status_string(info.status));
+
+ /* Search for blocked trigger atoms */
+ kbase_fence_debug_check_atom(katom);
+
+ spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+
+ kbase_sync_fence_in_dump(katom);
+}
+
+struct kbase_fence_debug_work {
+ struct kbase_jd_atom *katom;
+ struct work_struct work;
+};
+
+static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work)
+{
+ struct kbase_fence_debug_work *w = container_of(work,
+ struct kbase_fence_debug_work, work);
+ struct kbase_jd_atom *katom = w->katom;
+ struct kbase_context *kctx = katom->kctx;
+
+ mutex_lock(&kctx->jctx.lock);
+ kbase_fence_debug_wait_timeout(katom);
+ mutex_unlock(&kctx->jctx.lock);
+
+ kfree(w);
+}
+
+static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom)
+{
+ struct kbase_fence_debug_work *work;
+ struct kbase_context *kctx = katom->kctx;
+
+ /* Enqueue fence debug worker. Use job_done_wq to get
+ * debug print ordered with job completion.
+ */
+ work = kzalloc(sizeof(struct kbase_fence_debug_work), GFP_ATOMIC);
+ /* Ignore allocation failure. */
+ if (work) {
+ work->katom = katom;
+ INIT_WORK(&work->work, kbase_fence_debug_wait_timeout_worker);
+ queue_work(kctx->jctx.job_done_wq, &work->work);
+ }
+}
+#endif /* CONFIG_MALI_FENCE_DEBUG */
+
+void kbasep_soft_job_timeout_worker(unsigned long data)
+{
+ struct kbase_context *kctx = (struct kbase_context *)data;
+ u32 timeout_ms = (u32)atomic_read(
+ &kctx->kbdev->js_data.soft_job_timeout_ms);
+ struct timer_list *timer = &kctx->soft_job_timeout;
+ ktime_t cur_time = ktime_get();
+ bool restarting = false;
+ unsigned long lflags;
+ struct list_head *entry, *tmp;
+
+ spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+ list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
+ struct kbase_jd_atom *katom = list_entry(entry,
+ struct kbase_jd_atom, queue);
+ s64 elapsed_time = ktime_to_ms(ktime_sub(cur_time,
+ katom->start_timestamp));
+
+ if (elapsed_time < (s64)timeout_ms) {
+ restarting = true;
+ continue;
+ }
+
+ switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+ case BASE_JD_REQ_SOFT_EVENT_WAIT:
+ /* Take it out of the list to ensure that it
+ * will be cancelled in all cases
+ */
+ list_del(&katom->queue);
+
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ INIT_WORK(&katom->work, kbasep_soft_event_complete_job);
+ queue_work(kctx->jctx.job_done_wq, &katom->work);
+ break;
+#ifdef CONFIG_MALI_FENCE_DEBUG
+ case BASE_JD_REQ_SOFT_FENCE_WAIT:
+ kbase_fence_debug_timeout(katom);
+ break;
+#endif
+ }
+ }
+
+ if (restarting)
+ mod_timer(timer, jiffies + msecs_to_jiffies(timeout_ms));
+ spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+static int kbasep_soft_event_wait(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ unsigned char status;
+
+ /* The status of this soft-job is stored in jc */
+ if (kbasep_read_soft_event_status(kctx, katom->jc, &status)) {
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ return 0;
+ }
+
+ if (status == BASE_JD_SOFT_EVENT_SET)
+ return 0; /* Event already set, nothing to do */
+
+ kbasep_add_waiting_with_timeout(katom);
+
+ return 1;
+}
+
+static void kbasep_soft_event_update_locked(struct kbase_jd_atom *katom,
+ unsigned char new_status)
+{
+ /* Complete jobs waiting on the same event */
+ struct kbase_context *kctx = katom->kctx;
+
+ if (kbasep_write_soft_event_status(kctx, katom->jc, new_status) != 0) {
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ return;
+ }
+
+ if (new_status == BASE_JD_SOFT_EVENT_SET)
+ kbasep_complete_triggered_soft_events(kctx, katom->jc);
+}
+
+/**
+ * kbase_soft_event_update() - Update soft event state
+ * @kctx: Pointer to context
+ * @event: Event to update
+ * @new_status: New status value of event
+ *
+ * Update the event, and wake up any atoms waiting for the event.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int kbase_soft_event_update(struct kbase_context *kctx,
+ u64 event,
+ unsigned char new_status)
+{
+ int err = 0;
+
+ mutex_lock(&kctx->jctx.lock);
+
+ if (kbasep_write_soft_event_status(kctx, event, new_status)) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ if (new_status == BASE_JD_SOFT_EVENT_SET)
+ kbasep_complete_triggered_soft_events(kctx, event);
+
+out:
+ mutex_unlock(&kctx->jctx.lock);
+
+ return err;
+}
+
+static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)
+{
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ if (jd_done_nolock(katom, NULL))
+ kbase_js_sched_all(katom->kctx->kbdev);
+}
+
+struct kbase_debug_copy_buffer {
+ size_t size;
+ struct page **pages;
+ int nr_pages;
+ size_t offset;
+ struct kbase_mem_phy_alloc *gpu_alloc;
+
+ struct page **extres_pages;
+ int nr_extres_pages;
+};
+
+static inline void free_user_buffer(struct kbase_debug_copy_buffer *buffer)
+{
+ struct page **pages = buffer->extres_pages;
+ int nr_pages = buffer->nr_extres_pages;
+
+ if (pages) {
+ int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *pg = pages[i];
+
+ if (pg)
+ put_page(pg);
+ }
+ kfree(pages);
+ }
+}
+
+static void kbase_debug_copy_finish(struct kbase_jd_atom *katom)
+{
+ struct kbase_debug_copy_buffer *buffers =
+ (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
+ unsigned int i;
+ unsigned int nr = katom->nr_extres;
+
+ if (!buffers)
+ return;
+
+ kbase_gpu_vm_lock(katom->kctx);
+ for (i = 0; i < nr; i++) {
+ int p;
+ struct kbase_mem_phy_alloc *gpu_alloc = buffers[i].gpu_alloc;
+
+ if (!buffers[i].pages)
+ break;
+ for (p = 0; p < buffers[i].nr_pages; p++) {
+ struct page *pg = buffers[i].pages[p];
+
+ if (pg)
+ put_page(pg);
+ }
+ kfree(buffers[i].pages);
+ if (gpu_alloc) {
+ switch (gpu_alloc->type) {
+ case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+ {
+ free_user_buffer(&buffers[i]);
+ break;
+ }
+ default:
+ /* Nothing to be done. */
+ break;
+ }
+ kbase_mem_phy_alloc_put(gpu_alloc);
+ }
+ }
+ kbase_gpu_vm_unlock(katom->kctx);
+ kfree(buffers);
+
+ katom->jc = 0;
+}
+
+static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom)
+{
+ struct kbase_debug_copy_buffer *buffers;
+ struct base_jd_debug_copy_buffer *user_buffers = NULL;
+ unsigned int i;
+ unsigned int nr = katom->nr_extres;
+ int ret = 0;
+ void __user *user_structs = (void __user *)(uintptr_t)katom->jc;
+
+ if (!user_structs)
+ return -EINVAL;
+
+ buffers = kcalloc(nr, sizeof(*buffers), GFP_KERNEL);
+ if (!buffers) {
+ ret = -ENOMEM;
+ katom->jc = 0;
+ goto out_cleanup;
+ }
+ katom->jc = (u64)(uintptr_t)buffers;
+
+ user_buffers = kmalloc_array(nr, sizeof(*user_buffers), GFP_KERNEL);
+
+ if (!user_buffers) {
+ ret = -ENOMEM;
+ goto out_cleanup;
+ }
+
+ ret = copy_from_user(user_buffers, user_structs,
+ sizeof(*user_buffers)*nr);
+ if (ret) {
+ ret = -EFAULT;
+ goto out_cleanup;
+ }
+
+ for (i = 0; i < nr; i++) {
+ u64 addr = user_buffers[i].address;
+ u64 page_addr = addr & PAGE_MASK;
+ u64 end_page_addr = addr + user_buffers[i].size - 1;
+ u64 last_page_addr = end_page_addr & PAGE_MASK;
+ int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1;
+ int pinned_pages;
+ struct kbase_va_region *reg;
+ struct base_external_resource user_extres;
+
+ if (!addr)
+ continue;
+
+ buffers[i].nr_pages = nr_pages;
+ buffers[i].offset = addr & ~PAGE_MASK;
+ if (buffers[i].offset >= PAGE_SIZE) {
+ ret = -EINVAL;
+ goto out_cleanup;
+ }
+ buffers[i].size = user_buffers[i].size;
+
+ buffers[i].pages = kcalloc(nr_pages, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!buffers[i].pages) {
+ ret = -ENOMEM;
+ goto out_cleanup;
+ }
+
+ pinned_pages = get_user_pages_fast(page_addr,
+ nr_pages,
+ 1, /* Write */
+ buffers[i].pages);
+ if (pinned_pages < 0) {
+ ret = pinned_pages;
+ goto out_cleanup;
+ }
+ if (pinned_pages != nr_pages) {
+ ret = -EINVAL;
+ goto out_cleanup;
+ }
+
+ user_extres = user_buffers[i].extres;
+ if (user_extres.ext_resource == 0ULL) {
+ ret = -EINVAL;
+ goto out_cleanup;
+ }
+
+ kbase_gpu_vm_lock(katom->kctx);
+ reg = kbase_region_tracker_find_region_enclosing_address(
+ katom->kctx, user_extres.ext_resource &
+ ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
+
+ if (NULL == reg || NULL == reg->gpu_alloc ||
+ (reg->flags & KBASE_REG_FREE)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ buffers[i].gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+ buffers[i].nr_extres_pages = reg->nr_pages;
+
+ if (reg->nr_pages*PAGE_SIZE != buffers[i].size)
+ dev_warn(katom->kctx->kbdev->dev, "Copy buffer is not of same size as the external resource to copy.\n");
+
+ switch (reg->gpu_alloc->type) {
+ case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+ {
+ struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
+ unsigned long nr_pages =
+ alloc->imported.user_buf.nr_pages;
+
+ if (alloc->imported.user_buf.mm != current->mm) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ buffers[i].extres_pages = kcalloc(nr_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!buffers[i].extres_pages) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ ret = get_user_pages_fast(
+ alloc->imported.user_buf.address,
+ nr_pages, 0,
+ buffers[i].extres_pages);
+ if (ret != nr_pages)
+ goto out_unlock;
+ ret = 0;
+ break;
+ }
+ case KBASE_MEM_TYPE_IMPORTED_UMP:
+ {
+ dev_warn(katom->kctx->kbdev->dev,
+ "UMP is not supported for debug_copy jobs\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ default:
+ /* Nothing to be done. */
+ break;
+ }
+ kbase_gpu_vm_unlock(katom->kctx);
+ }
+ kfree(user_buffers);
+
+ return ret;
+
+out_unlock:
+ kbase_gpu_vm_unlock(katom->kctx);
+
+out_cleanup:
+ kfree(buffers);
+ kfree(user_buffers);
+
+ /* Frees allocated memory for kbase_debug_copy_job struct, including
+ * members, and sets jc to 0 */
+ kbase_debug_copy_finish(katom);
+ return ret;
+}
+
+static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
+ void *extres_page, struct page **pages, unsigned int nr_pages,
+ unsigned int *target_page_nr, size_t offset, size_t *to_copy)
+{
+ void *target_page = kmap(pages[*target_page_nr]);
+ size_t chunk = PAGE_SIZE-offset;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ if (!target_page) {
+ *target_page_nr += 1;
+ dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
+ return;
+ }
+
+ chunk = min(chunk, *to_copy);
+
+ memcpy(target_page + offset, extres_page, chunk);
+ *to_copy -= chunk;
+
+ kunmap(pages[*target_page_nr]);
+
+ *target_page_nr += 1;
+ if (*target_page_nr >= nr_pages)
+ return;
+
+ target_page = kmap(pages[*target_page_nr]);
+ if (!target_page) {
+ *target_page_nr += 1;
+ dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
+ return;
+ }
+
+ KBASE_DEBUG_ASSERT(target_page);
+
+ chunk = min(offset, *to_copy);
+ memcpy(target_page, extres_page + PAGE_SIZE-offset, chunk);
+ *to_copy -= chunk;
+
+ kunmap(pages[*target_page_nr]);
+}
+
+static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
+ struct kbase_debug_copy_buffer *buf_data)
+{
+ unsigned int i;
+ unsigned int target_page_nr = 0;
+ struct page **pages = buf_data->pages;
+ u64 offset = buf_data->offset;
+ size_t extres_size = buf_data->nr_extres_pages*PAGE_SIZE;
+ size_t to_copy = min(extres_size, buf_data->size);
+ struct kbase_mem_phy_alloc *gpu_alloc = buf_data->gpu_alloc;
+ int ret = 0;
+
+ KBASE_DEBUG_ASSERT(pages != NULL);
+
+ kbase_gpu_vm_lock(kctx);
+ if (!gpu_alloc) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ switch (gpu_alloc->type) {
+ case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+ {
+ for (i = 0; i < buf_data->nr_extres_pages; i++) {
+ struct page *pg = buf_data->extres_pages[i];
+ void *extres_page = kmap(pg);
+
+ if (extres_page)
+ kbase_mem_copy_from_extres_page(kctx,
+ extres_page, pages,
+ buf_data->nr_pages,
+ &target_page_nr,
+ offset, &to_copy);
+
+ kunmap(pg);
+ if (target_page_nr >= buf_data->nr_pages)
+ break;
+ }
+ break;
+ }
+ break;
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case KBASE_MEM_TYPE_IMPORTED_UMM: {
+ struct dma_buf *dma_buf = gpu_alloc->imported.umm.dma_buf;
+
+ KBASE_DEBUG_ASSERT(dma_buf != NULL);
+ KBASE_DEBUG_ASSERT(dma_buf->size ==
+ buf_data->nr_extres_pages * PAGE_SIZE);
+
+ ret = dma_buf_begin_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
+ 0, buf_data->nr_extres_pages*PAGE_SIZE,
+#endif
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto out_unlock;
+
+ for (i = 0; i < buf_data->nr_extres_pages; i++) {
+
+ void *extres_page = dma_buf_kmap(dma_buf, i);
+
+ if (extres_page)
+ kbase_mem_copy_from_extres_page(kctx,
+ extres_page, pages,
+ buf_data->nr_pages,
+ &target_page_nr,
+ offset, &to_copy);
+
+ dma_buf_kunmap(dma_buf, i, extres_page);
+ if (target_page_nr >= buf_data->nr_pages)
+ break;
+ }
+ dma_buf_end_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
+ 0, buf_data->nr_extres_pages*PAGE_SIZE,
+#endif
+ DMA_FROM_DEVICE);
+ break;
+ }
+#endif
+ default:
+ ret = -EINVAL;
+ }
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return ret;
+
+}
+
+static int kbase_debug_copy(struct kbase_jd_atom *katom)
+{
+ struct kbase_debug_copy_buffer *buffers =
+ (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
+ unsigned int i;
+
+ for (i = 0; i < katom->nr_extres; i++) {
+ int res = kbase_mem_copy_from_extres(katom->kctx, &buffers[i]);
+
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
+{
+ __user void *data = (__user void *)(uintptr_t) katom->jc;
+ struct base_jit_alloc_info *info;
+ struct kbase_context *kctx = katom->kctx;
+ int ret;
+
+ /* Fail the job if there is no info structure */
+ if (!data) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* Copy the information for safe access and future storage */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (copy_from_user(info, data, sizeof(*info)) != 0) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ /* If the ID is zero then fail the job */
+ if (info->id == 0) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ /* Sanity check that the PA fits within the VA */
+ if (info->va_pages < info->commit_pages) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ /* Ensure the GPU address is correctly aligned */
+ if ((info->gpu_alloc_addr & 0x7) != 0) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ /* Replace the user pointer with our kernel allocated info structure */
+ katom->jc = (u64)(uintptr_t) info;
+ katom->jit_blocked = false;
+
+ lockdep_assert_held(&kctx->jctx.lock);
+ list_add_tail(&katom->jit_node, &kctx->jit_atoms_head);
+
+ /*
+ * Note:
+ * The provided info->gpu_alloc_addr isn't validated here as
+ * userland can cache allocations which means that even
+ * though the region is valid it doesn't represent the
+ * same thing it used to.
+ *
+ * Complete validation of va_pages, commit_pages and extent
+ * isn't done here as it will be done during the call to
+ * kbase_mem_alloc.
+ */
+ return 0;
+
+free_info:
+ kfree(info);
+fail:
+ katom->jc = 0;
+ return ret;
+}
+
+static u8 kbase_jit_free_get_id(struct kbase_jd_atom *katom)
+{
+ if (WARN_ON(katom->core_req != BASE_JD_REQ_SOFT_JIT_FREE))
+ return 0;
+
+ return (u8) katom->jc;
+}
+
+static int kbase_jit_allocate_process(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct base_jit_alloc_info *info;
+ struct kbase_va_region *reg;
+ struct kbase_vmap_struct mapping;
+ u64 *ptr, new_addr;
+
+ if (katom->jit_blocked) {
+ list_del(&katom->queue);
+ katom->jit_blocked = false;
+ }
+
+ info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
+
+ /* The JIT ID is still in use so fail the allocation */
+ if (kctx->jit_alloc[info->id]) {
+ katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+ return 0;
+ }
+
+ /* Create a JIT allocation */
+ reg = kbase_jit_allocate(kctx, info);
+ if (!reg) {
+ struct kbase_jd_atom *jit_atom;
+ bool can_block = false;
+
+ lockdep_assert_held(&kctx->jctx.lock);
+
+ jit_atom = list_first_entry(&kctx->jit_atoms_head,
+ struct kbase_jd_atom, jit_node);
+
+ list_for_each_entry(jit_atom, &kctx->jit_atoms_head, jit_node) {
+ if (jit_atom == katom)
+ break;
+ if (jit_atom->core_req == BASE_JD_REQ_SOFT_JIT_FREE) {
+ u8 free_id = kbase_jit_free_get_id(jit_atom);
+
+ if (free_id && kctx->jit_alloc[free_id]) {
+ /* A JIT free which is active and
+ * submitted before this atom
+ */
+ can_block = true;
+ break;
+ }
+ }
+ }
+
+ if (!can_block) {
+ /* Mark the allocation so we know it's in use even if
+ * the allocation itself fails.
+ */
+ kctx->jit_alloc[info->id] =
+ (struct kbase_va_region *) -1;
+
+ katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+ return 0;
+ }
+
+ /* There are pending frees for an active allocation
+ * so we should wait to see whether they free the memory.
+ * Add to the beginning of the list to ensure that the atom is
+ * processed only once in kbase_jit_free_finish
+ */
+ list_add(&katom->queue, &kctx->jit_pending_alloc);
+ katom->jit_blocked = true;
+
+ return 1;
+ }
+
+ /*
+ * Write the address of the JIT allocation to the user provided
+ * GPU allocation.
+ */
+ ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
+ &mapping);
+ if (!ptr) {
+ /*
+ * Leave the allocation "live" as the JIT free jit will be
+ * submitted anyway.
+ */
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ return 0;
+ }
+
+ new_addr = reg->start_pfn << PAGE_SHIFT;
+ *ptr = new_addr;
+ KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(
+ katom, info->gpu_alloc_addr, new_addr);
+ kbase_vunmap(kctx, &mapping);
+
+ katom->event_code = BASE_JD_EVENT_DONE;
+
+ /*
+ * Bind it to the user provided ID. Do this last so we can check for
+ * the JIT free racing this JIT alloc job.
+ */
+ kctx->jit_alloc[info->id] = reg;
+
+ return 0;
+}
+
+static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
+{
+ struct base_jit_alloc_info *info;
+
+ lockdep_assert_held(&katom->kctx->jctx.lock);
+
+ /* Remove atom from jit_atoms_head list */
+ list_del(&katom->jit_node);
+
+ if (katom->jit_blocked) {
+ list_del(&katom->queue);
+ katom->jit_blocked = false;
+ }
+
+ info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
+ /* Free the info structure */
+ kfree(info);
+}
+
+static int kbase_jit_free_prepare(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+
+ lockdep_assert_held(&kctx->jctx.lock);
+ list_add_tail(&katom->jit_node, &kctx->jit_atoms_head);
+
+ return 0;
+}
+
+static void kbase_jit_free_process(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ u8 id = kbase_jit_free_get_id(katom);
+
+ /*
+ * If the ID is zero or it is not in use yet then fail the job.
+ */
+ if ((id == 0) || (kctx->jit_alloc[id] == NULL)) {
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ return;
+ }
+
+ /*
+ * If the ID is valid but the allocation request failed still succeed
+ * this soft job but don't try and free the allocation.
+ */
+ if (kctx->jit_alloc[id] != (struct kbase_va_region *) -1)
+ kbase_jit_free(kctx, kctx->jit_alloc[id]);
+
+ kctx->jit_alloc[id] = NULL;
+}
+
+static void kbasep_jit_free_finish_worker(struct work_struct *work)
+{
+ struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
+ work);
+ struct kbase_context *kctx = katom->kctx;
+ int resched;
+
+ mutex_lock(&kctx->jctx.lock);
+ kbase_finish_soft_job(katom);
+ resched = jd_done_nolock(katom, NULL);
+ mutex_unlock(&kctx->jctx.lock);
+
+ if (resched)
+ kbase_js_sched_all(kctx->kbdev);
+}
+
+static void kbase_jit_free_finish(struct kbase_jd_atom *katom)
+{
+ struct list_head *i, *tmp;
+ struct kbase_context *kctx = katom->kctx;
+
+ lockdep_assert_held(&kctx->jctx.lock);
+ /* Remove this atom from the kctx->jit_atoms_head list */
+ list_del(&katom->jit_node);
+
+ list_for_each_safe(i, tmp, &kctx->jit_pending_alloc) {
+ struct kbase_jd_atom *pending_atom = list_entry(i,
+ struct kbase_jd_atom, queue);
+ if (kbase_jit_allocate_process(pending_atom) == 0) {
+ /* Atom has completed */
+ INIT_WORK(&pending_atom->work,
+ kbasep_jit_free_finish_worker);
+ queue_work(kctx->jctx.job_done_wq, &pending_atom->work);
+ }
+ }
+}
+
+static int kbase_ext_res_prepare(struct kbase_jd_atom *katom)
+{
+ __user struct base_external_resource_list *user_ext_res;
+ struct base_external_resource_list *ext_res;
+ u64 count = 0;
+ size_t copy_size;
+ int ret;
+
+ user_ext_res = (__user struct base_external_resource_list *)
+ (uintptr_t) katom->jc;
+
+ /* Fail the job if there is no info structure */
+ if (!user_ext_res) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (copy_from_user(&count, &user_ext_res->count, sizeof(u64)) != 0) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* Is the number of external resources in range? */
+ if (!count || count > BASE_EXT_RES_COUNT_MAX) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* Copy the information for safe access and future storage */
+ copy_size = sizeof(*ext_res);
+ copy_size += sizeof(struct base_external_resource) * (count - 1);
+ ext_res = kzalloc(copy_size, GFP_KERNEL);
+ if (!ext_res) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (copy_from_user(ext_res, user_ext_res, copy_size) != 0) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ /*
+ * Overwrite the count with the first value incase it was changed
+ * after the fact.
+ */
+ ext_res->count = count;
+
+ /*
+ * Replace the user pointer with our kernel allocated
+ * ext_res structure.
+ */
+ katom->jc = (u64)(uintptr_t) ext_res;
+
+ return 0;
+
+free_info:
+ kfree(ext_res);
+fail:
+ return ret;
+}
+
+static void kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)
+{
+ struct base_external_resource_list *ext_res;
+ int i;
+ bool failed = false;
+
+ ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
+ if (!ext_res)
+ goto failed_jc;
+
+ kbase_gpu_vm_lock(katom->kctx);
+
+ for (i = 0; i < ext_res->count; i++) {
+ u64 gpu_addr;
+
+ gpu_addr = ext_res->ext_res[i].ext_resource &
+ ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
+ if (map) {
+ if (!kbase_sticky_resource_acquire(katom->kctx,
+ gpu_addr))
+ goto failed_loop;
+ } else
+ if (!kbase_sticky_resource_release(katom->kctx, NULL,
+ gpu_addr))
+ failed = true;
+ }
+
+ /*
+ * In the case of unmap we continue unmapping other resources in the
+ * case of failure but will always report failure if _any_ unmap
+ * request fails.
+ */
+ if (failed)
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ else
+ katom->event_code = BASE_JD_EVENT_DONE;
+
+ kbase_gpu_vm_unlock(katom->kctx);
+
+ return;
+
+failed_loop:
+ while (--i > 0) {
+ u64 gpu_addr;
+
+ gpu_addr = ext_res->ext_res[i].ext_resource &
+ ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
+
+ kbase_sticky_resource_release(katom->kctx, NULL, gpu_addr);
+ }
+
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ kbase_gpu_vm_unlock(katom->kctx);
+
+failed_jc:
+ return;
+}
+
+static void kbase_ext_res_finish(struct kbase_jd_atom *katom)
+{
+ struct base_external_resource_list *ext_res;
+
+ ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
+ /* Free the info structure */
+ kfree(ext_res);
+}
+
+int kbase_process_soft_job(struct kbase_jd_atom *katom)
+{
+ switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+ case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
+ return kbase_dump_cpu_gpu_time(katom);
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+ katom->event_code = kbase_sync_fence_out_trigger(katom,
+ katom->event_code == BASE_JD_EVENT_DONE ?
+ 0 : -EFAULT);
+ break;
+ case BASE_JD_REQ_SOFT_FENCE_WAIT:
+ {
+ int ret = kbase_sync_fence_in_wait(katom);
+
+ if (ret == 1) {
+#ifdef CONFIG_MALI_FENCE_DEBUG
+ kbasep_add_waiting_with_timeout(katom);
+#else
+ kbasep_add_waiting_soft_job(katom);
+#endif
+ }
+ return ret;
+ }
+#endif
+
+ case BASE_JD_REQ_SOFT_REPLAY:
+ return kbase_replay_process(katom);
+ case BASE_JD_REQ_SOFT_EVENT_WAIT:
+ return kbasep_soft_event_wait(katom);
+ case BASE_JD_REQ_SOFT_EVENT_SET:
+ kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_SET);
+ break;
+ case BASE_JD_REQ_SOFT_EVENT_RESET:
+ kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_RESET);
+ break;
+ case BASE_JD_REQ_SOFT_DEBUG_COPY:
+ {
+ int res = kbase_debug_copy(katom);
+
+ if (res)
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ break;
+ }
+ case BASE_JD_REQ_SOFT_JIT_ALLOC:
+ return kbase_jit_allocate_process(katom);
+ case BASE_JD_REQ_SOFT_JIT_FREE:
+ kbase_jit_free_process(katom);
+ break;
+ case BASE_JD_REQ_SOFT_EXT_RES_MAP:
+ kbase_ext_res_process(katom, true);
+ break;
+ case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
+ kbase_ext_res_process(katom, false);
+ break;
+ }
+
+ /* Atom is complete */
+ return 0;
+}
+
+void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
+{
+ switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ case BASE_JD_REQ_SOFT_FENCE_WAIT:
+ kbase_sync_fence_in_cancel_wait(katom);
+ break;
+#endif
+ case BASE_JD_REQ_SOFT_EVENT_WAIT:
+ kbasep_soft_event_cancel_job(katom);
+ break;
+ default:
+ /* This soft-job doesn't support cancellation! */
+ KBASE_DEBUG_ASSERT(0);
+ }
+}
+
+int kbase_prepare_soft_job(struct kbase_jd_atom *katom)
+{
+ switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+ case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
+ {
+ if (0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK))
+ return -EINVAL;
+ }
+ break;
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+ {
+ struct base_fence fence;
+ int fd;
+
+ if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
+ return -EINVAL;
+
+ fd = kbase_sync_fence_out_create(katom,
+ fence.basep.stream_fd);
+ if (fd < 0)
+ return -EINVAL;
+
+ fence.basep.fd = fd;
+ if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) {
+ kbase_sync_fence_out_remove(katom);
+ kbase_sync_fence_close_fd(fd);
+ fence.basep.fd = -EINVAL;
+ return -EINVAL;
+ }
+ }
+ break;
+ case BASE_JD_REQ_SOFT_FENCE_WAIT:
+ {
+ struct base_fence fence;
+ int ret;
+
+ if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
+ return -EINVAL;
+
+ /* Get a reference to the fence object */
+ ret = kbase_sync_fence_in_from_fd(katom,
+ fence.basep.fd);
+ if (ret < 0)
+ return ret;
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ /*
+ * Set KCTX_NO_IMPLICIT_FENCE in the context the first
+ * time a soft fence wait job is observed. This will
+ * prevent the implicit dma-buf fence to conflict with
+ * the Android native sync fences.
+ */
+ if (!kbase_ctx_flag(katom->kctx, KCTX_NO_IMPLICIT_SYNC))
+ kbase_ctx_flag_set(katom->kctx, KCTX_NO_IMPLICIT_SYNC);
+#endif /* CONFIG_MALI_DMA_FENCE */
+ }
+ break;
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+ case BASE_JD_REQ_SOFT_JIT_ALLOC:
+ return kbase_jit_allocate_prepare(katom);
+ case BASE_JD_REQ_SOFT_REPLAY:
+ break;
+ case BASE_JD_REQ_SOFT_JIT_FREE:
+ return kbase_jit_free_prepare(katom);
+ case BASE_JD_REQ_SOFT_EVENT_WAIT:
+ case BASE_JD_REQ_SOFT_EVENT_SET:
+ case BASE_JD_REQ_SOFT_EVENT_RESET:
+ if (katom->jc == 0)
+ return -EINVAL;
+ break;
+ case BASE_JD_REQ_SOFT_DEBUG_COPY:
+ return kbase_debug_copy_prepare(katom);
+ case BASE_JD_REQ_SOFT_EXT_RES_MAP:
+ return kbase_ext_res_prepare(katom);
+ case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
+ return kbase_ext_res_prepare(katom);
+ default:
+ /* Unsupported soft-job */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void kbase_finish_soft_job(struct kbase_jd_atom *katom)
+{
+ switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+ case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
+ /* Nothing to do */
+ break;
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+ /* If fence has not yet been signaled, do it now */
+ kbase_sync_fence_out_trigger(katom, katom->event_code ==
+ BASE_JD_EVENT_DONE ? 0 : -EFAULT);
+ break;
+ case BASE_JD_REQ_SOFT_FENCE_WAIT:
+ /* Release katom's reference to fence object */
+ kbase_sync_fence_in_remove(katom);
+ break;
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+ case BASE_JD_REQ_SOFT_DEBUG_COPY:
+ kbase_debug_copy_finish(katom);
+ break;
+ case BASE_JD_REQ_SOFT_JIT_ALLOC:
+ kbase_jit_allocate_finish(katom);
+ break;
+ case BASE_JD_REQ_SOFT_EXT_RES_MAP:
+ kbase_ext_res_finish(katom);
+ break;
+ case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
+ kbase_ext_res_finish(katom);
+ break;
+ case BASE_JD_REQ_SOFT_JIT_FREE:
+ kbase_jit_free_finish(katom);
+ break;
+ }
+}
+
+void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
+{
+ LIST_HEAD(local_suspended_soft_jobs);
+ struct kbase_jd_atom *tmp_iter;
+ struct kbase_jd_atom *katom_iter;
+ struct kbasep_js_device_data *js_devdata;
+ bool resched = false;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ js_devdata = &kbdev->js_data;
+
+ /* Move out the entire list */
+ mutex_lock(&js_devdata->runpool_mutex);
+ list_splice_init(&js_devdata->suspended_soft_jobs_list,
+ &local_suspended_soft_jobs);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ /*
+ * Each atom must be detached from the list and ran separately -
+ * it could be re-added to the old list, but this is unlikely
+ */
+ list_for_each_entry_safe(katom_iter, tmp_iter,
+ &local_suspended_soft_jobs, dep_item[1]) {
+ struct kbase_context *kctx = katom_iter->kctx;
+
+ mutex_lock(&kctx->jctx.lock);
+
+ /* Remove from the global list */
+ list_del(&katom_iter->dep_item[1]);
+ /* Remove from the context's list of waiting soft jobs */
+ kbasep_remove_waiting_soft_job(katom_iter);
+
+ if (kbase_process_soft_job(katom_iter) == 0) {
+ kbase_finish_soft_job(katom_iter);
+ resched |= jd_done_nolock(katom_iter, NULL);
+ } else {
+ KBASE_DEBUG_ASSERT((katom_iter->core_req &
+ BASE_JD_REQ_SOFT_JOB_TYPE)
+ != BASE_JD_REQ_SOFT_REPLAY);
+ }
+
+ mutex_unlock(&kctx->jctx.lock);
+ }
+
+ if (resched)
+ kbase_js_sched_all(kbdev);
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_strings.c b/drivers/gpu/arm_gpu/mali_kbase_strings.c
new file mode 100644
index 000000000000..c98762cec244
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_strings.c
@@ -0,0 +1,23 @@
+ /*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+#include "mali_kbase_strings.h"
+
+#define KBASE_DRV_NAME "mali"
+#define KBASE_TIMELINE_NAME KBASE_DRV_NAME ".timeline"
+
+const char kbase_drv_name[] = KBASE_DRV_NAME;
+const char kbase_timeline_name[] = KBASE_TIMELINE_NAME;
diff --git a/drivers/gpu/arm_gpu/mali_kbase_strings.h b/drivers/gpu/arm_gpu/mali_kbase_strings.h
new file mode 100644
index 000000000000..41b8fdbec6a4
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_strings.h
@@ -0,0 +1,19 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+extern const char kbase_drv_name[];
+extern const char kbase_timeline_name[];
diff --git a/drivers/gpu/arm_gpu/mali_kbase_sync.h b/drivers/gpu/arm_gpu/mali_kbase_sync.h
new file mode 100644
index 000000000000..de72147d67ad
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_sync.h
@@ -0,0 +1,203 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_sync.h
+ *
+ * This file contains our internal "API" for explicit fences.
+ * It hides the implementation details of the actual explicit fence mechanism
+ * used (Android fences or sync file with DMA fences).
+ */
+
+#ifndef MALI_KBASE_SYNC_H
+#define MALI_KBASE_SYNC_H
+
+#include <linux/syscalls.h>
+#ifdef CONFIG_SYNC
+#include <sync.h>
+#endif
+#ifdef CONFIG_SYNC_FILE
+#include "mali_kbase_fence_defs.h"
+#include <linux/sync_file.h>
+#endif
+
+#include "mali_kbase.h"
+
+/**
+ * struct kbase_sync_fence_info - Information about a fence
+ * @fence: Pointer to fence (type is void*, as underlaying struct can differ)
+ * @name: The name given to this fence when it was created
+ * @status: < 0 means error, 0 means active, 1 means signaled
+ *
+ * Use kbase_sync_fence_in_info_get() or kbase_sync_fence_out_info_get()
+ * to get the information.
+ */
+struct kbase_sync_fence_info {
+ void *fence;
+ char name[32];
+ int status;
+};
+
+/**
+ * kbase_sync_fence_stream_create() - Create a stream object
+ * @name: Name of stream (only used to ease debugging/visualization)
+ * @out_fd: A file descriptor representing the created stream object
+ *
+ * Can map down to a timeline implementation in some implementations.
+ * Exposed as a file descriptor.
+ * Life-time controlled via the file descriptor:
+ * - dup to add a ref
+ * - close to remove a ref
+ *
+ * return: 0 on success, < 0 on error
+ */
+int kbase_sync_fence_stream_create(const char *name, int *const out_fd);
+
+/**
+ * kbase_sync_fence_out_create Create an explicit output fence to specified atom
+ * @katom: Atom to assign the new explicit fence to
+ * @stream_fd: File descriptor for stream object to create fence on
+ *
+ * return: Valid file descriptor to fence or < 0 on error
+ */
+int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int stream_fd);
+
+/**
+ * kbase_sync_fence_in_from_fd() Assigns an existing fence to specified atom
+ * @katom: Atom to assign the existing explicit fence to
+ * @fd: File descriptor to an existing fence
+ *
+ * Assigns an explicit input fence to atom.
+ * This can later be waited for by calling @kbase_sync_fence_in_wait
+ *
+ * return: 0 on success, < 0 on error
+ */
+int kbase_sync_fence_in_from_fd(struct kbase_jd_atom *katom, int fd);
+
+/**
+ * kbase_sync_fence_validate() - Validate a fd to be a valid fence
+ * @fd: File descriptor to check
+ *
+ * This function is only usable to catch unintentional user errors early,
+ * it does not stop malicious code changing the fd after this function returns.
+ *
+ * return 0: if fd is for a valid fence, < 0 if invalid
+ */
+int kbase_sync_fence_validate(int fd);
+
+/**
+ * kbase_sync_fence_out_trigger - Signal explicit output fence attached on katom
+ * @katom: Atom with an explicit fence to signal
+ * @result: < 0 means signal with error, 0 >= indicates success
+ *
+ * Signal output fence attached on katom and remove the fence from the atom.
+ *
+ * return: The "next" event code for atom, typically JOB_CANCELLED or EVENT_DONE
+ */
+enum base_jd_event_code
+kbase_sync_fence_out_trigger(struct kbase_jd_atom *katom, int result);
+
+/**
+ * kbase_sync_fence_in_wait() - Wait for explicit input fence to be signaled
+ * @katom: Atom with explicit fence to wait for
+ *
+ * If the fence is already signaled, then 0 is returned, and the caller must
+ * continue processing of the katom.
+ *
+ * If the fence isn't already signaled, then this kbase_sync framework will
+ * take responsibility to continue the processing once the fence is signaled.
+ *
+ * return: 0 if already signaled, otherwise 1
+ */
+int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_sync_fence_in_cancel_wait() - Cancel explicit input fence waits
+ * @katom: Atom to cancel wait for
+ *
+ * This function is fully responsible for continuing processing of this atom
+ * (remove_waiting_soft_job + finish_soft_job + jd_done + js_sched_all)
+ */
+void kbase_sync_fence_in_cancel_wait(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_sync_fence_in_remove() - Remove the input fence from the katom
+ * @katom: Atom to remove explicit input fence for
+ *
+ * This will also release the corresponding reference.
+ */
+void kbase_sync_fence_in_remove(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_sync_fence_out_remove() - Remove the output fence from the katom
+ * @katom: Atom to remove explicit output fence for
+ *
+ * This will also release the corresponding reference.
+ */
+void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_sync_fence_close_fd() - Close a file descriptor representing a fence
+ * @fd: File descriptor to close
+ */
+static inline void kbase_sync_fence_close_fd(int fd)
+{
+ sys_close(fd);
+}
+
+/**
+ * kbase_sync_fence_in_info_get() - Retrieves information about input fence
+ * @katom: Atom to get fence information from
+ * @info: Struct to be filled with fence information
+ *
+ * return: 0 on success, < 0 on error
+ */
+int kbase_sync_fence_in_info_get(struct kbase_jd_atom *katom,
+ struct kbase_sync_fence_info *info);
+
+/**
+ * kbase_sync_fence_out_info_get() - Retrieves information about output fence
+ * @katom: Atom to get fence information from
+ * @info: Struct to be filled with fence information
+ *
+ * return: 0 on success, < 0 on error
+ */
+int kbase_sync_fence_out_info_get(struct kbase_jd_atom *katom,
+ struct kbase_sync_fence_info *info);
+
+/**
+ * kbase_sync_status_string() - Get string matching @status
+ * @status: Value of fence status.
+ *
+ * return: Pointer to string describing @status.
+ */
+const char *kbase_sync_status_string(int status);
+
+/*
+ * Internal worker used to continue processing of atom.
+ */
+void kbase_sync_fence_wait_worker(struct work_struct *data);
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+/**
+ * kbase_sync_fence_in_dump() Trigger a debug dump of atoms input fence state
+ * @katom: Atom to trigger fence debug dump for
+ */
+void kbase_sync_fence_in_dump(struct kbase_jd_atom *katom);
+#endif
+
+#endif /* MALI_KBASE_SYNC_H */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_sync_android.c b/drivers/gpu/arm_gpu/mali_kbase_sync_android.c
new file mode 100644
index 000000000000..d7349dcae69a
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_sync_android.c
@@ -0,0 +1,537 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Code for supporting explicit Android fences (CONFIG_SYNC)
+ * Known to be good for kernels 4.5 and earlier.
+ * Replaced with CONFIG_SYNC_FILE for 4.9 and later kernels
+ * (see mali_kbase_sync_file.c)
+ */
+
+#include <linux/sched.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/anon_inodes.h>
+#include <linux/version.h>
+#include "sync.h"
+#include <mali_kbase.h>
+#include <mali_kbase_sync.h>
+
+struct mali_sync_timeline {
+ struct sync_timeline timeline;
+ atomic_t counter;
+ atomic_t signaled;
+};
+
+struct mali_sync_pt {
+ struct sync_pt pt;
+ int order;
+ int result;
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+/* For backwards compatibility with kernels before 3.17. After 3.17
+ * sync_pt_parent is included in the kernel. */
+static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
+{
+ return pt->parent;
+}
+#endif
+
+static struct mali_sync_timeline *to_mali_sync_timeline(
+ struct sync_timeline *timeline)
+{
+ return container_of(timeline, struct mali_sync_timeline, timeline);
+}
+
+static struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
+{
+ return container_of(pt, struct mali_sync_pt, pt);
+}
+
+static struct sync_pt *timeline_dup(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+ struct mali_sync_pt *new_mpt;
+ struct sync_pt *new_pt = sync_pt_create(sync_pt_parent(pt),
+ sizeof(struct mali_sync_pt));
+
+ if (!new_pt)
+ return NULL;
+
+ new_mpt = to_mali_sync_pt(new_pt);
+ new_mpt->order = mpt->order;
+ new_mpt->result = mpt->result;
+
+ return new_pt;
+}
+
+static int timeline_has_signaled(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+ struct mali_sync_timeline *mtl = to_mali_sync_timeline(
+ sync_pt_parent(pt));
+ int result = mpt->result;
+
+ int diff = atomic_read(&mtl->signaled) - mpt->order;
+
+ if (diff >= 0)
+ return (result < 0) ? result : 1;
+
+ return 0;
+}
+
+static int timeline_compare(struct sync_pt *a, struct sync_pt *b)
+{
+ struct mali_sync_pt *ma = container_of(a, struct mali_sync_pt, pt);
+ struct mali_sync_pt *mb = container_of(b, struct mali_sync_pt, pt);
+
+ int diff = ma->order - mb->order;
+
+ if (diff == 0)
+ return 0;
+
+ return (diff < 0) ? -1 : 1;
+}
+
+static void timeline_value_str(struct sync_timeline *timeline, char *str,
+ int size)
+{
+ struct mali_sync_timeline *mtl = to_mali_sync_timeline(timeline);
+
+ snprintf(str, size, "%d", atomic_read(&mtl->signaled));
+}
+
+static void pt_value_str(struct sync_pt *pt, char *str, int size)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+
+ snprintf(str, size, "%d(%d)", mpt->order, mpt->result);
+}
+
+static struct sync_timeline_ops mali_timeline_ops = {
+ .driver_name = "Mali",
+ .dup = timeline_dup,
+ .has_signaled = timeline_has_signaled,
+ .compare = timeline_compare,
+ .timeline_value_str = timeline_value_str,
+ .pt_value_str = pt_value_str,
+};
+
+/* Allocates a timeline for Mali
+ *
+ * One timeline should be allocated per API context.
+ */
+static struct sync_timeline *mali_sync_timeline_alloc(const char *name)
+{
+ struct sync_timeline *tl;
+ struct mali_sync_timeline *mtl;
+
+ tl = sync_timeline_create(&mali_timeline_ops,
+ sizeof(struct mali_sync_timeline), name);
+ if (!tl)
+ return NULL;
+
+ /* Set the counter in our private struct */
+ mtl = to_mali_sync_timeline(tl);
+ atomic_set(&mtl->counter, 0);
+ atomic_set(&mtl->signaled, 0);
+
+ return tl;
+}
+
+static int kbase_stream_close(struct inode *inode, struct file *file)
+{
+ struct sync_timeline *tl;
+
+ tl = (struct sync_timeline *)file->private_data;
+ sync_timeline_destroy(tl);
+ return 0;
+}
+
+static const struct file_operations stream_fops = {
+ .owner = THIS_MODULE,
+ .release = kbase_stream_close,
+};
+
+int kbase_sync_fence_stream_create(const char *name, int *const out_fd)
+{
+ struct sync_timeline *tl;
+
+ if (!out_fd)
+ return -EINVAL;
+
+ tl = mali_sync_timeline_alloc(name);
+ if (!tl)
+ return -EINVAL;
+
+ *out_fd = anon_inode_getfd(name, &stream_fops, tl, O_RDONLY|O_CLOEXEC);
+
+ if (*out_fd < 0) {
+ sync_timeline_destroy(tl);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Allocates a sync point within the timeline.
+ *
+ * The timeline must be the one allocated by kbase_sync_timeline_alloc
+ *
+ * Sync points must be triggered in *exactly* the same order as they are
+ * allocated.
+ */
+static struct sync_pt *kbase_sync_pt_alloc(struct sync_timeline *parent)
+{
+ struct sync_pt *pt = sync_pt_create(parent,
+ sizeof(struct mali_sync_pt));
+ struct mali_sync_timeline *mtl = to_mali_sync_timeline(parent);
+ struct mali_sync_pt *mpt;
+
+ if (!pt)
+ return NULL;
+
+ mpt = to_mali_sync_pt(pt);
+ mpt->order = atomic_inc_return(&mtl->counter);
+ mpt->result = 0;
+
+ return pt;
+}
+
+int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int tl_fd)
+{
+ struct sync_timeline *tl;
+ struct sync_pt *pt;
+ struct sync_fence *fence;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
+ struct files_struct *files;
+ struct fdtable *fdt;
+#endif
+ int fd;
+ struct file *tl_file;
+
+ tl_file = fget(tl_fd);
+ if (tl_file == NULL)
+ return -EBADF;
+
+ if (tl_file->f_op != &stream_fops) {
+ fd = -EBADF;
+ goto out;
+ }
+
+ tl = tl_file->private_data;
+
+ pt = kbase_sync_pt_alloc(tl);
+ if (!pt) {
+ fd = -EFAULT;
+ goto out;
+ }
+
+ fence = sync_fence_create("mali_fence", pt);
+ if (!fence) {
+ sync_pt_free(pt);
+ fd = -EFAULT;
+ goto out;
+ }
+
+ /* from here the fence owns the sync_pt */
+
+ /* create a fd representing the fence */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ sync_fence_put(fence);
+ goto out;
+ }
+#else
+ fd = get_unused_fd();
+ if (fd < 0) {
+ sync_fence_put(fence);
+ goto out;
+ }
+
+ files = current->files;
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+ __set_close_on_exec(fd, fdt);
+#else
+ FD_SET(fd, fdt->close_on_exec);
+#endif
+ spin_unlock(&files->file_lock);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) */
+
+ /* bind fence to the new fd */
+ sync_fence_install(fence, fd);
+
+ katom->fence = sync_fence_fdget(fd);
+ if (katom->fence == NULL) {
+ /* The only way the fence can be NULL is if userspace closed it
+ * for us, so we don't need to clear it up */
+ fd = -EINVAL;
+ goto out;
+ }
+
+out:
+ fput(tl_file);
+
+ return fd;
+}
+
+int kbase_sync_fence_in_from_fd(struct kbase_jd_atom *katom, int fd)
+{
+ katom->fence = sync_fence_fdget(fd);
+ return katom->fence ? 0 : -ENOENT;
+}
+
+int kbase_sync_fence_validate(int fd)
+{
+ struct sync_fence *fence;
+
+ fence = sync_fence_fdget(fd);
+ if (!fence)
+ return -EINVAL;
+
+ sync_fence_put(fence);
+ return 0;
+}
+
+/* Returns true if the specified timeline is allocated by Mali */
+static int kbase_sync_timeline_is_ours(struct sync_timeline *timeline)
+{
+ return timeline->ops == &mali_timeline_ops;
+}
+
+/* Signals a particular sync point
+ *
+ * Sync points must be triggered in *exactly* the same order as they are
+ * allocated.
+ *
+ * If they are signaled in the wrong order then a message will be printed in
+ * debug builds and otherwise attempts to signal order sync_pts will be ignored.
+ *
+ * result can be negative to indicate error, any other value is interpreted as
+ * success.
+ */
+static void kbase_sync_signal_pt(struct sync_pt *pt, int result)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+ struct mali_sync_timeline *mtl = to_mali_sync_timeline(
+ sync_pt_parent(pt));
+ int signaled;
+ int diff;
+
+ mpt->result = result;
+
+ do {
+ signaled = atomic_read(&mtl->signaled);
+
+ diff = signaled - mpt->order;
+
+ if (diff > 0) {
+ /* The timeline is already at or ahead of this point.
+ * This should not happen unless userspace has been
+ * signaling fences out of order, so warn but don't
+ * violate the sync_pt API.
+ * The warning is only in debug builds to prevent
+ * a malicious user being able to spam dmesg.
+ */
+#ifdef CONFIG_MALI_DEBUG
+ pr_err("Fences were triggered in a different order to allocation!");
+#endif /* CONFIG_MALI_DEBUG */
+ return;
+ }
+ } while (atomic_cmpxchg(&mtl->signaled,
+ signaled, mpt->order) != signaled);
+}
+
+enum base_jd_event_code
+kbase_sync_fence_out_trigger(struct kbase_jd_atom *katom, int result)
+{
+ struct sync_pt *pt;
+ struct sync_timeline *timeline;
+
+ if (!katom->fence)
+ return BASE_JD_EVENT_JOB_CANCELLED;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ if (!list_is_singular(&katom->fence->pt_list_head)) {
+#else
+ if (katom->fence->num_fences != 1) {
+#endif
+ /* Not exactly one item in the list - so it didn't (directly)
+ * come from us */
+ return BASE_JD_EVENT_JOB_CANCELLED;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ pt = list_first_entry(&katom->fence->pt_list_head,
+ struct sync_pt, pt_list);
+#else
+ pt = container_of(katom->fence->cbs[0].sync_pt, struct sync_pt, base);
+#endif
+ timeline = sync_pt_parent(pt);
+
+ if (!kbase_sync_timeline_is_ours(timeline)) {
+ /* Fence has a sync_pt which isn't ours! */
+ return BASE_JD_EVENT_JOB_CANCELLED;
+ }
+
+ kbase_sync_signal_pt(pt, result);
+
+ sync_timeline_signal(timeline);
+
+ kbase_sync_fence_out_remove(katom);
+
+ return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
+}
+
+static inline int kbase_fence_get_status(struct sync_fence *fence)
+{
+ if (!fence)
+ return -ENOENT;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ return fence->status;
+#else
+ return atomic_read(&fence->status);
+#endif
+}
+
+static void kbase_fence_wait_callback(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter)
+{
+ struct kbase_jd_atom *katom = container_of(waiter,
+ struct kbase_jd_atom, sync_waiter);
+ struct kbase_context *kctx = katom->kctx;
+
+ /* Propagate the fence status to the atom.
+ * If negative then cancel this atom and its dependencies.
+ */
+ if (kbase_fence_get_status(fence) < 0)
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+ /* To prevent a potential deadlock we schedule the work onto the
+ * job_done_wq workqueue
+ *
+ * The issue is that we may signal the timeline while holding
+ * kctx->jctx.lock and the callbacks are run synchronously from
+ * sync_timeline_signal. So we simply defer the work.
+ */
+
+ INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
+ queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom)
+{
+ int ret;
+
+ sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);
+
+ ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);
+
+ if (ret == 1) {
+ /* Already signaled */
+ return 0;
+ }
+
+ if (ret < 0) {
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ /* We should cause the dependent jobs in the bag to be failed,
+ * to do this we schedule the work queue to complete this job */
+ INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
+ queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
+ }
+
+ return 1;
+}
+
+void kbase_sync_fence_in_cancel_wait(struct kbase_jd_atom *katom)
+{
+ if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
+ /* The wait wasn't cancelled - leave the cleanup for
+ * kbase_fence_wait_callback */
+ return;
+ }
+
+ /* Wait was cancelled - zap the atoms */
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+ kbasep_remove_waiting_soft_job(katom);
+ kbase_finish_soft_job(katom);
+
+ if (jd_done_nolock(katom, NULL))
+ kbase_js_sched_all(katom->kctx->kbdev);
+}
+
+void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom)
+{
+ if (katom->fence) {
+ sync_fence_put(katom->fence);
+ katom->fence = NULL;
+ }
+}
+
+void kbase_sync_fence_in_remove(struct kbase_jd_atom *katom)
+{
+ if (katom->fence) {
+ sync_fence_put(katom->fence);
+ katom->fence = NULL;
+ }
+}
+
+int kbase_sync_fence_in_info_get(struct kbase_jd_atom *katom,
+ struct kbase_sync_fence_info *info)
+{
+ if (!katom->fence)
+ return -ENOENT;
+
+ info->fence = katom->fence;
+ info->status = kbase_fence_get_status(katom->fence);
+ strlcpy(info->name, katom->fence->name, sizeof(info->name));
+
+ return 0;
+}
+
+int kbase_sync_fence_out_info_get(struct kbase_jd_atom *katom,
+ struct kbase_sync_fence_info *info)
+{
+ if (!katom->fence)
+ return -ENOENT;
+
+ info->fence = katom->fence;
+ info->status = kbase_fence_get_status(katom->fence);
+ strlcpy(info->name, katom->fence->name, sizeof(info->name));
+
+ return 0;
+}
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+void kbase_sync_fence_in_dump(struct kbase_jd_atom *katom)
+{
+ /* Dump out the full state of all the Android sync fences.
+ * The function sync_dump() isn't exported to modules, so force
+ * sync_fence_wait() to time out to trigger sync_dump().
+ */
+ if (katom->fence)
+ sync_fence_wait(katom->fence, 1);
+}
+#endif
diff --git a/drivers/gpu/arm_gpu/mali_kbase_sync_common.c b/drivers/gpu/arm_gpu/mali_kbase_sync_common.c
new file mode 100644
index 000000000000..457def296684
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_sync_common.c
@@ -0,0 +1,43 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * @file mali_kbase_sync_common.c
+ *
+ * Common code for our explicit fence functionality
+ */
+
+#include <linux/workqueue.h>
+#include "mali_kbase.h"
+
+void kbase_sync_fence_wait_worker(struct work_struct *data)
+{
+ struct kbase_jd_atom *katom;
+
+ katom = container_of(data, struct kbase_jd_atom, work);
+ kbase_soft_event_wait_callback(katom);
+}
+
+const char *kbase_sync_status_string(int status)
+{
+ if (status == 0)
+ return "signaled";
+ else if (status > 0)
+ return "active";
+ else
+ return "error";
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_sync_file.c b/drivers/gpu/arm_gpu/mali_kbase_sync_file.c
new file mode 100644
index 000000000000..45f9489a9e80
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_sync_file.c
@@ -0,0 +1,339 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Code for supporting explicit Linux fences (CONFIG_SYNC_FILE)
+ * Introduced in kernel 4.9.
+ * Android explicit fences (CONFIG_SYNC) can be used for older kernels
+ * (see mali_kbase_sync_android.c)
+ */
+
+#include <linux/sched.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/anon_inodes.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <linux/sync_file.h>
+#include <linux/slab.h>
+#include "mali_kbase_fence_defs.h"
+#include "mali_kbase_sync.h"
+#include "mali_kbase_fence.h"
+#include "mali_kbase.h"
+
+static const struct file_operations stream_fops = {
+ .owner = THIS_MODULE
+};
+
+int kbase_sync_fence_stream_create(const char *name, int *const out_fd)
+{
+ if (!out_fd)
+ return -EINVAL;
+
+ *out_fd = anon_inode_getfd(name, &stream_fops, NULL,
+ O_RDONLY | O_CLOEXEC);
+ if (*out_fd < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int stream_fd)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence *fence;
+#else
+ struct dma_fence *fence;
+#endif
+ struct sync_file *sync_file;
+ int fd;
+
+ fence = kbase_fence_out_new(katom);
+ if (!fence)
+ return -ENOMEM;
+
+ /* Take an extra reference to the fence on behalf of the katom.
+ * This is needed because sync_file_create() will take ownership of
+ * one of these refs */
+ dma_fence_get(fence);
+
+ /* create a sync_file fd representing the fence */
+ sync_file = sync_file_create(fence);
+ if (!sync_file) {
+ dma_fence_put(fence);
+ kbase_fence_out_remove(katom);
+ return -ENOMEM;
+ }
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ fput(sync_file->file);
+ kbase_fence_out_remove(katom);
+ return fd;
+ }
+
+ fd_install(fd, sync_file->file);
+
+ return fd;
+}
+
+int kbase_sync_fence_in_from_fd(struct kbase_jd_atom *katom, int fd)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence *fence = sync_file_get_fence(fd);
+#else
+ struct dma_fence *fence = sync_file_get_fence(fd);
+#endif
+
+ if (!fence)
+ return -ENOENT;
+
+ kbase_fence_fence_in_set(katom, fence);
+
+ return 0;
+}
+
+int kbase_sync_fence_validate(int fd)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence *fence = sync_file_get_fence(fd);
+#else
+ struct dma_fence *fence = sync_file_get_fence(fd);
+#endif
+
+ if (!fence)
+ return -EINVAL;
+
+ dma_fence_put(fence);
+
+ return 0; /* valid */
+}
+
+enum base_jd_event_code
+kbase_sync_fence_out_trigger(struct kbase_jd_atom *katom, int result)
+{
+ int res;
+
+ if (!kbase_fence_out_is_ours(katom)) {
+ /* Not our fence */
+ return BASE_JD_EVENT_JOB_CANCELLED;
+ }
+
+ res = kbase_fence_out_signal(katom, result);
+ if (unlikely(res < 0)) {
+ dev_warn(katom->kctx->kbdev->dev,
+ "fence_signal() failed with %d\n", res);
+ }
+
+ kbase_sync_fence_out_remove(katom);
+
+ return (result != 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+static void kbase_fence_wait_callback(struct fence *fence,
+ struct fence_cb *cb)
+#else
+static void kbase_fence_wait_callback(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
+#endif
+{
+ struct kbase_fence_cb *kcb = container_of(cb,
+ struct kbase_fence_cb,
+ fence_cb);
+ struct kbase_jd_atom *katom = kcb->katom;
+ struct kbase_context *kctx = katom->kctx;
+
+ /* Cancel atom if fence is erroneous */
+ if (dma_fence_is_signaled(kcb->fence) && kcb->fence->error < 0)
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+ if (kbase_fence_dep_count_dec_and_test(katom)) {
+ /* We take responsibility of handling this */
+ kbase_fence_dep_count_set(katom, -1);
+
+ /* To prevent a potential deadlock we schedule the work onto the
+ * job_done_wq workqueue
+ *
+ * The issue is that we may signal the timeline while holding
+ * kctx->jctx.lock and the callbacks are run synchronously from
+ * sync_timeline_signal. So we simply defer the work.
+ */
+ INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
+ queue_work(kctx->jctx.job_done_wq, &katom->work);
+ }
+}
+
+int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom)
+{
+ int err;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence *fence;
+#else
+ struct dma_fence *fence;
+#endif
+
+ fence = kbase_fence_in_get(katom);
+ if (!fence)
+ return 0; /* no input fence to wait for, good to go! */
+
+ kbase_fence_dep_count_set(katom, 1);
+
+ err = kbase_fence_add_callback(katom, fence, kbase_fence_wait_callback);
+
+ kbase_fence_put(fence);
+
+ if (likely(!err)) {
+ /* Test if the callbacks are already triggered */
+ if (kbase_fence_dep_count_dec_and_test(katom)) {
+ kbase_fence_free_callbacks(katom);
+ kbase_fence_dep_count_set(katom, -1);
+ return 0; /* Already signaled, good to go right now */
+ }
+
+ /* Callback installed, so we just need to wait for it... */
+ } else {
+ /* Failure */
+ kbase_fence_free_callbacks(katom);
+ kbase_fence_dep_count_set(katom, -1);
+
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+ /* We should cause the dependent jobs in the bag to be failed,
+ * to do this we schedule the work queue to complete this job */
+
+ INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
+ queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
+ }
+
+ return 1; /* completion to be done later by callback/worker */
+}
+
+void kbase_sync_fence_in_cancel_wait(struct kbase_jd_atom *katom)
+{
+ if (!kbase_fence_free_callbacks(katom)) {
+ /* The wait wasn't cancelled -
+ * leave the cleanup for kbase_fence_wait_callback */
+ return;
+ }
+
+ /* Take responsibility of completion */
+ kbase_fence_dep_count_set(katom, -1);
+
+ /* Wait was cancelled - zap the atoms */
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+ kbasep_remove_waiting_soft_job(katom);
+ kbase_finish_soft_job(katom);
+
+ if (jd_done_nolock(katom, NULL))
+ kbase_js_sched_all(katom->kctx->kbdev);
+}
+
+void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom)
+{
+ kbase_fence_out_remove(katom);
+}
+
+void kbase_sync_fence_in_remove(struct kbase_jd_atom *katom)
+{
+ kbase_fence_free_callbacks(katom);
+ kbase_fence_in_remove(katom);
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+static void kbase_sync_fence_info_get(struct fence *fence,
+ struct kbase_sync_fence_info *info)
+#else
+static void kbase_sync_fence_info_get(struct dma_fence *fence,
+ struct kbase_sync_fence_info *info)
+#endif
+{
+ info->fence = fence;
+
+ /* translate into CONFIG_SYNC status:
+ * < 0 : error
+ * 0 : active
+ * 1 : signaled
+ */
+ if (dma_fence_is_signaled(fence)) {
+ if (fence->error < 0)
+ info->status = fence->error; /* signaled with error */
+ else
+ info->status = 1; /* signaled with success */
+ } else {
+ info->status = 0; /* still active (unsignaled) */
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+ scnprintf(info->name, sizeof(info->name), "%u#%u",
+ fence->context, fence->seqno);
+#else
+ scnprintf(info->name, sizeof(info->name), "%llu#%u",
+ fence->context, fence->seqno);
+#endif
+}
+
+int kbase_sync_fence_in_info_get(struct kbase_jd_atom *katom,
+ struct kbase_sync_fence_info *info)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence *fence;
+#else
+ struct dma_fence *fence;
+#endif
+
+ fence = kbase_fence_in_get(katom);
+ if (!fence)
+ return -ENOENT;
+
+ kbase_sync_fence_info_get(fence, info);
+
+ kbase_fence_put(fence);
+
+ return 0;
+}
+
+int kbase_sync_fence_out_info_get(struct kbase_jd_atom *katom,
+ struct kbase_sync_fence_info *info)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+ struct fence *fence;
+#else
+ struct dma_fence *fence;
+#endif
+
+ fence = kbase_fence_out_get(katom);
+ if (!fence)
+ return -ENOENT;
+
+ kbase_sync_fence_info_get(fence, info);
+
+ kbase_fence_put(fence);
+
+ return 0;
+}
+
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+void kbase_sync_fence_in_dump(struct kbase_jd_atom *katom)
+{
+ /* Not implemented */
+}
+#endif
diff --git a/drivers/gpu/arm_gpu/mali_kbase_tlstream.c b/drivers/gpu/arm_gpu/mali_kbase_tlstream.c
new file mode 100644
index 000000000000..c952993f3448
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_tlstream.c
@@ -0,0 +1,2572 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/anon_inodes.h>
+#include <linux/atomic.h>
+#include <linux/file.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringify.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_jm.h>
+#include <mali_kbase_tlstream.h>
+
+/*****************************************************************************/
+
+/* The version of swtrace protocol used in timeline stream. */
+#define SWTRACE_VERSION 3
+
+/* The maximum expected length of string in tracepoint descriptor. */
+#define STRLEN_MAX 64 /* bytes */
+
+/* The number of nanoseconds in a second. */
+#define NSECS_IN_SEC 1000000000ull /* ns */
+
+/* The period of autoflush checker execution in milliseconds. */
+#define AUTOFLUSH_INTERVAL 1000 /* ms */
+
+/* The maximum size of a single packet used by timeline. */
+#define PACKET_SIZE 4096 /* bytes */
+
+/* The number of packets used by one timeline stream. */
+#define PACKET_COUNT 16
+
+/* The number of bytes reserved for packet header.
+ * These value must be defined according to MIPE documentation. */
+#define PACKET_HEADER_SIZE 8 /* bytes */
+
+/* The number of bytes reserved for packet sequence number.
+ * These value must be defined according to MIPE documentation. */
+#define PACKET_NUMBER_SIZE 4 /* bytes */
+
+/* Packet header - first word.
+ * These values must be defined according to MIPE documentation. */
+#define PACKET_STREAMID_POS 0
+#define PACKET_STREAMID_LEN 8
+#define PACKET_RSVD1_POS (PACKET_STREAMID_POS + PACKET_STREAMID_LEN)
+#define PACKET_RSVD1_LEN 8
+#define PACKET_TYPE_POS (PACKET_RSVD1_POS + PACKET_RSVD1_LEN)
+#define PACKET_TYPE_LEN 3
+#define PACKET_CLASS_POS (PACKET_TYPE_POS + PACKET_TYPE_LEN)
+#define PACKET_CLASS_LEN 7
+#define PACKET_FAMILY_POS (PACKET_CLASS_POS + PACKET_CLASS_LEN)
+#define PACKET_FAMILY_LEN 6
+
+/* Packet header - second word
+ * These values must be defined according to MIPE documentation. */
+#define PACKET_LENGTH_POS 0
+#define PACKET_LENGTH_LEN 24
+#define PACKET_SEQBIT_POS (PACKET_LENGTH_POS + PACKET_LENGTH_LEN)
+#define PACKET_SEQBIT_LEN 1
+#define PACKET_RSVD2_POS (PACKET_SEQBIT_POS + PACKET_SEQBIT_LEN)
+#define PACKET_RSVD2_LEN 7
+
+/* Types of streams generated by timeline.
+ * Order is significant! Header streams must precede respective body streams. */
+enum tl_stream_type {
+ TL_STREAM_TYPE_OBJ_HEADER,
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ TL_STREAM_TYPE_OBJ,
+ TL_STREAM_TYPE_AUX_HEADER,
+ TL_STREAM_TYPE_AUX,
+
+ TL_STREAM_TYPE_COUNT
+};
+
+/* Timeline packet family ids.
+ * Values are significant! Check MIPE documentation. */
+enum tl_packet_family {
+ TL_PACKET_FAMILY_CTRL = 0, /* control packets */
+ TL_PACKET_FAMILY_TL = 1, /* timeline packets */
+
+ TL_PACKET_FAMILY_COUNT
+};
+
+/* Packet classes used in timeline streams.
+ * Values are significant! Check MIPE documentation. */
+enum tl_packet_class {
+ TL_PACKET_CLASS_OBJ = 0, /* timeline objects packet */
+ TL_PACKET_CLASS_AUX = 1, /* auxiliary events packet */
+};
+
+/* Packet types used in timeline streams.
+ * Values are significant! Check MIPE documentation. */
+enum tl_packet_type {
+ TL_PACKET_TYPE_HEADER = 0, /* stream's header/directory */
+ TL_PACKET_TYPE_BODY = 1, /* stream's body */
+ TL_PACKET_TYPE_SUMMARY = 2, /* stream's summary */
+};
+
+/* Message ids of trace events that are recorded in the timeline stream. */
+enum tl_msg_id_obj {
+ /* Timeline object events. */
+ KBASE_TL_NEW_CTX,
+ KBASE_TL_NEW_GPU,
+ KBASE_TL_NEW_LPU,
+ KBASE_TL_NEW_ATOM,
+ KBASE_TL_NEW_AS,
+ KBASE_TL_DEL_CTX,
+ KBASE_TL_DEL_ATOM,
+ KBASE_TL_LIFELINK_LPU_GPU,
+ KBASE_TL_LIFELINK_AS_GPU,
+ KBASE_TL_RET_CTX_LPU,
+ KBASE_TL_RET_ATOM_CTX,
+ KBASE_TL_RET_ATOM_LPU,
+ KBASE_TL_NRET_CTX_LPU,
+ KBASE_TL_NRET_ATOM_CTX,
+ KBASE_TL_NRET_ATOM_LPU,
+ KBASE_TL_RET_AS_CTX,
+ KBASE_TL_NRET_AS_CTX,
+ KBASE_TL_RET_ATOM_AS,
+ KBASE_TL_NRET_ATOM_AS,
+ KBASE_TL_DEP_ATOM_ATOM,
+ KBASE_TL_NDEP_ATOM_ATOM,
+ KBASE_TL_RDEP_ATOM_ATOM,
+ KBASE_TL_ATTRIB_ATOM_CONFIG,
+ KBASE_TL_ATTRIB_ATOM_PRIORITY,
+ KBASE_TL_ATTRIB_ATOM_STATE,
+ KBASE_TL_ATTRIB_ATOM_PRIORITY_CHANGE,
+ KBASE_TL_ATTRIB_ATOM_JIT,
+ KBASE_TL_ATTRIB_AS_CONFIG,
+ KBASE_TL_EVENT_LPU_SOFTSTOP,
+ KBASE_TL_EVENT_ATOM_SOFTSTOP_EX,
+ KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE,
+
+ /* Job dump specific events. */
+ KBASE_JD_GPU_SOFT_RESET
+};
+
+/* Message ids of trace events that are recorded in the auxiliary stream. */
+enum tl_msg_id_aux {
+ KBASE_AUX_PM_STATE,
+ KBASE_AUX_PAGEFAULT,
+ KBASE_AUX_PAGESALLOC,
+ KBASE_AUX_DEVFREQ_TARGET,
+ KBASE_AUX_PROTECTED_ENTER_START,
+ KBASE_AUX_PROTECTED_ENTER_END,
+ KBASE_AUX_PROTECTED_LEAVE_START,
+ KBASE_AUX_PROTECTED_LEAVE_END
+};
+
+/*****************************************************************************/
+
+/**
+ * struct tl_stream - timeline stream structure
+ * @lock: message order lock
+ * @buffer: array of buffers
+ * @wbi: write buffer index
+ * @rbi: read buffer index
+ * @numbered: if non-zero stream's packets are sequentially numbered
+ * @autoflush_counter: counter tracking stream's autoflush state
+ *
+ * This structure holds information needed to construct proper packets in the
+ * timeline stream. Each message in sequence must bear timestamp that is greater
+ * to one in previous message in the same stream. For this reason lock is held
+ * throughout the process of message creation. Each stream contains set of
+ * buffers. Each buffer will hold one MIPE packet. In case there is no free
+ * space required to store incoming message the oldest buffer is discarded.
+ * Each packet in timeline body stream has sequence number embedded (this value
+ * must increment monotonically and is used by packets receiver to discover
+ * buffer overflows.
+ * Autoflush counter is set to negative number when there is no data pending
+ * for flush and it is set to zero on every update of the buffer. Autoflush
+ * timer will increment the counter by one on every expiry. In case there will
+ * be no activity on the buffer during two consecutive timer expiries, stream
+ * buffer will be flushed.
+ */
+struct tl_stream {
+ spinlock_t lock;
+
+ struct {
+ atomic_t size; /* number of bytes in buffer */
+ char data[PACKET_SIZE]; /* buffer's data */
+ } buffer[PACKET_COUNT];
+
+ atomic_t wbi;
+ atomic_t rbi;
+
+ int numbered;
+ atomic_t autoflush_counter;
+};
+
+/**
+ * struct tp_desc - tracepoint message descriptor structure
+ * @id: tracepoint ID identifying message in stream
+ * @id_str: human readable version of tracepoint ID
+ * @name: tracepoint description
+ * @arg_types: tracepoint's arguments types declaration
+ * @arg_names: comma separated list of tracepoint's arguments names
+ */
+struct tp_desc {
+ u32 id;
+ const char *id_str;
+ const char *name;
+ const char *arg_types;
+ const char *arg_names;
+};
+
+/*****************************************************************************/
+
+/* Configuration of timeline streams generated by kernel.
+ * Kernel emit only streams containing either timeline object events or
+ * auxiliary events. All streams have stream id value of 1 (as opposed to user
+ * space streams that have value of 0). */
+static const struct {
+ enum tl_packet_family pkt_family;
+ enum tl_packet_class pkt_class;
+ enum tl_packet_type pkt_type;
+ unsigned int stream_id;
+} tl_stream_cfg[TL_STREAM_TYPE_COUNT] = {
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_HEADER, 1},
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_SUMMARY, 1},
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_BODY, 1},
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_HEADER, 1},
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_BODY, 1}
+};
+
+/* The timeline streams generated by kernel. */
+static struct tl_stream *tl_stream[TL_STREAM_TYPE_COUNT];
+
+/* Autoflush timer. */
+static struct timer_list autoflush_timer;
+
+/* If non-zero autoflush timer is active. */
+static atomic_t autoflush_timer_active;
+
+/* Reader lock. Only one reader is allowed to have access to the timeline
+ * streams at any given time. */
+static DEFINE_MUTEX(tl_reader_lock);
+
+/* Timeline stream event queue. */
+static DECLARE_WAIT_QUEUE_HEAD(tl_event_queue);
+
+/* The timeline stream file operations functions. */
+static ssize_t kbasep_tlstream_read(
+ struct file *filp,
+ char __user *buffer,
+ size_t size,
+ loff_t *f_pos);
+static unsigned int kbasep_tlstream_poll(struct file *filp, poll_table *wait);
+static int kbasep_tlstream_release(struct inode *inode, struct file *filp);
+
+/* The timeline stream file operations structure. */
+static const struct file_operations kbasep_tlstream_fops = {
+ .release = kbasep_tlstream_release,
+ .read = kbasep_tlstream_read,
+ .poll = kbasep_tlstream_poll,
+};
+
+/* Descriptors of timeline messages transmitted in object events stream. */
+static const struct tp_desc tp_desc_obj[] = {
+ {
+ KBASE_TL_NEW_CTX,
+ __stringify(KBASE_TL_NEW_CTX),
+ "object ctx is created",
+ "@pII",
+ "ctx,ctx_nr,tgid"
+ },
+ {
+ KBASE_TL_NEW_GPU,
+ __stringify(KBASE_TL_NEW_GPU),
+ "object gpu is created",
+ "@pII",
+ "gpu,gpu_id,core_count"
+ },
+ {
+ KBASE_TL_NEW_LPU,
+ __stringify(KBASE_TL_NEW_LPU),
+ "object lpu is created",
+ "@pII",
+ "lpu,lpu_nr,lpu_fn"
+ },
+ {
+ KBASE_TL_NEW_ATOM,
+ __stringify(KBASE_TL_NEW_ATOM),
+ "object atom is created",
+ "@pI",
+ "atom,atom_nr"
+ },
+ {
+ KBASE_TL_NEW_AS,
+ __stringify(KBASE_TL_NEW_AS),
+ "address space object is created",
+ "@pI",
+ "address_space,as_nr"
+ },
+ {
+ KBASE_TL_DEL_CTX,
+ __stringify(KBASE_TL_DEL_CTX),
+ "context is destroyed",
+ "@p",
+ "ctx"
+ },
+ {
+ KBASE_TL_DEL_ATOM,
+ __stringify(KBASE_TL_DEL_ATOM),
+ "atom is destroyed",
+ "@p",
+ "atom"
+ },
+ {
+ KBASE_TL_LIFELINK_LPU_GPU,
+ __stringify(KBASE_TL_LIFELINK_LPU_GPU),
+ "lpu is deleted with gpu",
+ "@pp",
+ "lpu,gpu"
+ },
+ {
+ KBASE_TL_LIFELINK_AS_GPU,
+ __stringify(KBASE_TL_LIFELINK_AS_GPU),
+ "address space is deleted with gpu",
+ "@pp",
+ "address_space,gpu"
+ },
+ {
+ KBASE_TL_RET_CTX_LPU,
+ __stringify(KBASE_TL_RET_CTX_LPU),
+ "context is retained by lpu",
+ "@pp",
+ "ctx,lpu"
+ },
+ {
+ KBASE_TL_RET_ATOM_CTX,
+ __stringify(KBASE_TL_RET_ATOM_CTX),
+ "atom is retained by context",
+ "@pp",
+ "atom,ctx"
+ },
+ {
+ KBASE_TL_RET_ATOM_LPU,
+ __stringify(KBASE_TL_RET_ATOM_LPU),
+ "atom is retained by lpu",
+ "@pps",
+ "atom,lpu,attrib_match_list"
+ },
+ {
+ KBASE_TL_NRET_CTX_LPU,
+ __stringify(KBASE_TL_NRET_CTX_LPU),
+ "context is released by lpu",
+ "@pp",
+ "ctx,lpu"
+ },
+ {
+ KBASE_TL_NRET_ATOM_CTX,
+ __stringify(KBASE_TL_NRET_ATOM_CTX),
+ "atom is released by context",
+ "@pp",
+ "atom,ctx"
+ },
+ {
+ KBASE_TL_NRET_ATOM_LPU,
+ __stringify(KBASE_TL_NRET_ATOM_LPU),
+ "atom is released by lpu",
+ "@pp",
+ "atom,lpu"
+ },
+ {
+ KBASE_TL_RET_AS_CTX,
+ __stringify(KBASE_TL_RET_AS_CTX),
+ "address space is retained by context",
+ "@pp",
+ "address_space,ctx"
+ },
+ {
+ KBASE_TL_NRET_AS_CTX,
+ __stringify(KBASE_TL_NRET_AS_CTX),
+ "address space is released by context",
+ "@pp",
+ "address_space,ctx"
+ },
+ {
+ KBASE_TL_RET_ATOM_AS,
+ __stringify(KBASE_TL_RET_ATOM_AS),
+ "atom is retained by address space",
+ "@pp",
+ "atom,address_space"
+ },
+ {
+ KBASE_TL_NRET_ATOM_AS,
+ __stringify(KBASE_TL_NRET_ATOM_AS),
+ "atom is released by address space",
+ "@pp",
+ "atom,address_space"
+ },
+ {
+ KBASE_TL_DEP_ATOM_ATOM,
+ __stringify(KBASE_TL_DEP_ATOM_ATOM),
+ "atom2 depends on atom1",
+ "@pp",
+ "atom1,atom2"
+ },
+ {
+ KBASE_TL_NDEP_ATOM_ATOM,
+ __stringify(KBASE_TL_NDEP_ATOM_ATOM),
+ "atom2 no longer depends on atom1",
+ "@pp",
+ "atom1,atom2"
+ },
+ {
+ KBASE_TL_RDEP_ATOM_ATOM,
+ __stringify(KBASE_TL_RDEP_ATOM_ATOM),
+ "resolved dependecy of atom2 depending on atom1",
+ "@pp",
+ "atom1,atom2"
+ },
+ {
+ KBASE_TL_ATTRIB_ATOM_CONFIG,
+ __stringify(KBASE_TL_ATTRIB_ATOM_CONFIG),
+ "atom job slot attributes",
+ "@pLLI",
+ "atom,descriptor,affinity,config"
+ },
+ {
+ KBASE_TL_ATTRIB_ATOM_PRIORITY,
+ __stringify(KBASE_TL_ATTRIB_ATOM_PRIORITY),
+ "atom priority",
+ "@pI",
+ "atom,prio"
+ },
+ {
+ KBASE_TL_ATTRIB_ATOM_STATE,
+ __stringify(KBASE_TL_ATTRIB_ATOM_STATE),
+ "atom state",
+ "@pI",
+ "atom,state"
+ },
+ {
+ KBASE_TL_ATTRIB_ATOM_PRIORITY_CHANGE,
+ __stringify(KBASE_TL_ATTRIB_ATOM_PRIORITY_CHANGE),
+ "atom caused priority change",
+ "@p",
+ "atom"
+ },
+ {
+ KBASE_TL_ATTRIB_ATOM_JIT,
+ __stringify(KBASE_TL_ATTRIB_ATOM_JIT),
+ "jit done for atom",
+ "@pLL",
+ "atom,edit_addr,new_addr"
+ },
+ {
+ KBASE_TL_ATTRIB_AS_CONFIG,
+ __stringify(KBASE_TL_ATTRIB_AS_CONFIG),
+ "address space attributes",
+ "@pLLL",
+ "address_space,transtab,memattr,transcfg"
+ },
+ {
+ KBASE_TL_EVENT_LPU_SOFTSTOP,
+ __stringify(KBASE_TL_EVENT_LPU_SOFTSTOP),
+ "softstop event on given lpu",
+ "@p",
+ "lpu"
+ },
+ {
+ KBASE_TL_EVENT_ATOM_SOFTSTOP_EX,
+ __stringify(KBASE_TL_EVENT_ATOM_SOFTSTOP_EX),
+ "atom softstopped",
+ "@p",
+ "atom"
+ },
+ {
+ KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE,
+ __stringify(KBASE_TL_EVENT_SOFTSTOP_ISSUE),
+ "atom softstop issued",
+ "@p",
+ "atom"
+ },
+ {
+ KBASE_JD_GPU_SOFT_RESET,
+ __stringify(KBASE_JD_GPU_SOFT_RESET),
+ "gpu soft reset",
+ "@p",
+ "gpu"
+ },
+};
+
+/* Descriptors of timeline messages transmitted in auxiliary events stream. */
+static const struct tp_desc tp_desc_aux[] = {
+ {
+ KBASE_AUX_PM_STATE,
+ __stringify(KBASE_AUX_PM_STATE),
+ "PM state",
+ "@IL",
+ "core_type,core_state_bitset"
+ },
+ {
+ KBASE_AUX_PAGEFAULT,
+ __stringify(KBASE_AUX_PAGEFAULT),
+ "Page fault",
+ "@IL",
+ "ctx_nr,page_cnt_change"
+ },
+ {
+ KBASE_AUX_PAGESALLOC,
+ __stringify(KBASE_AUX_PAGESALLOC),
+ "Total alloc pages change",
+ "@IL",
+ "ctx_nr,page_cnt"
+ },
+ {
+ KBASE_AUX_DEVFREQ_TARGET,
+ __stringify(KBASE_AUX_DEVFREQ_TARGET),
+ "New device frequency target",
+ "@L",
+ "target_freq"
+ },
+ {
+ KBASE_AUX_PROTECTED_ENTER_START,
+ __stringify(KBASE_AUX_PROTECTED_ENTER_START),
+ "enter protected mode start",
+ "@p",
+ "gpu"
+ },
+ {
+ KBASE_AUX_PROTECTED_ENTER_END,
+ __stringify(KBASE_AUX_PROTECTED_ENTER_END),
+ "enter protected mode end",
+ "@p",
+ "gpu"
+ },
+ {
+ KBASE_AUX_PROTECTED_LEAVE_START,
+ __stringify(KBASE_AUX_PROTECTED_LEAVE_START),
+ "leave protected mode start",
+ "@p",
+ "gpu"
+ },
+ {
+ KBASE_AUX_PROTECTED_LEAVE_END,
+ __stringify(KBASE_AUX_PROTECTED_LEAVE_END),
+ "leave protected mode end",
+ "@p",
+ "gpu"
+ }
+};
+
+#if MALI_UNIT_TEST
+/* Number of bytes read by user. */
+static atomic_t tlstream_bytes_collected = {0};
+
+/* Number of bytes generated by tracepoint messages. */
+static atomic_t tlstream_bytes_generated = {0};
+#endif /* MALI_UNIT_TEST */
+
+/*****************************************************************************/
+
+/* Indicator of whether the timeline stream file descriptor is used. */
+atomic_t kbase_tlstream_enabled = {0};
+
+/*****************************************************************************/
+
+/**
+ * kbasep_tlstream_get_timestamp - return timestamp
+ *
+ * Function returns timestamp value based on raw monotonic timer. Value will
+ * wrap around zero in case of overflow.
+ * Return: timestamp value
+ */
+static u64 kbasep_tlstream_get_timestamp(void)
+{
+ struct timespec ts;
+ u64 timestamp;
+
+ getrawmonotonic(&ts);
+ timestamp = (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec;
+ return timestamp;
+}
+
+/**
+ * kbasep_tlstream_write_bytes - write data to message buffer
+ * @buffer: buffer where data will be written
+ * @pos: position in the buffer where to place data
+ * @bytes: pointer to buffer holding data
+ * @len: length of data to be written
+ *
+ * Return: updated position in the buffer
+ */
+static size_t kbasep_tlstream_write_bytes(
+ char *buffer,
+ size_t pos,
+ const void *bytes,
+ size_t len)
+{
+ KBASE_DEBUG_ASSERT(buffer);
+ KBASE_DEBUG_ASSERT(bytes);
+
+ memcpy(&buffer[pos], bytes, len);
+
+ return pos + len;
+}
+
+/**
+ * kbasep_tlstream_write_string - write string to message buffer
+ * @buffer: buffer where data will be written
+ * @pos: position in the buffer where to place data
+ * @string: pointer to buffer holding the source string
+ * @max_write_size: number of bytes that can be stored in buffer
+ *
+ * Return: updated position in the buffer
+ */
+static size_t kbasep_tlstream_write_string(
+ char *buffer,
+ size_t pos,
+ const char *string,
+ size_t max_write_size)
+{
+ u32 string_len;
+
+ KBASE_DEBUG_ASSERT(buffer);
+ KBASE_DEBUG_ASSERT(string);
+ /* Timeline string consists of at least string length and nul
+ * terminator. */
+ KBASE_DEBUG_ASSERT(max_write_size >= sizeof(string_len) + sizeof(char));
+ max_write_size -= sizeof(string_len);
+
+ string_len = strlcpy(
+ &buffer[pos + sizeof(string_len)],
+ string,
+ max_write_size);
+ string_len += sizeof(char);
+
+ /* Make sure that the source string fit into the buffer. */
+ KBASE_DEBUG_ASSERT(string_len <= max_write_size);
+
+ /* Update string length. */
+ memcpy(&buffer[pos], &string_len, sizeof(string_len));
+
+ return pos + sizeof(string_len) + string_len;
+}
+
+/**
+ * kbasep_tlstream_write_timestamp - write timestamp to message buffer
+ * @buffer: buffer where data will be written
+ * @pos: position in the buffer where to place data
+ *
+ * Return: updated position in the buffer
+ */
+static size_t kbasep_tlstream_write_timestamp(void *buffer, size_t pos)
+{
+ u64 timestamp = kbasep_tlstream_get_timestamp();
+
+ return kbasep_tlstream_write_bytes(
+ buffer, pos,
+ &timestamp, sizeof(timestamp));
+}
+
+/**
+ * kbasep_tlstream_put_bits - put bits in a word
+ * @word: pointer to the words being modified
+ * @value: value that shall be written to given position
+ * @bitpos: position where value shall be written (in bits)
+ * @bitlen: length of value (in bits)
+ */
+static void kbasep_tlstream_put_bits(
+ u32 *word,
+ u32 value,
+ unsigned int bitpos,
+ unsigned int bitlen)
+{
+ const u32 mask = ((1 << bitlen) - 1) << bitpos;
+
+ KBASE_DEBUG_ASSERT(word);
+ KBASE_DEBUG_ASSERT((0 != bitlen) && (32 >= bitlen));
+ KBASE_DEBUG_ASSERT((bitpos + bitlen) <= 32);
+
+ *word &= ~mask;
+ *word |= ((value << bitpos) & mask);
+}
+
+/**
+ * kbasep_tlstream_packet_header_setup - setup the packet header
+ * @buffer: pointer to the buffer
+ * @pkt_family: packet's family
+ * @pkt_type: packet's type
+ * @pkt_class: packet's class
+ * @stream_id: stream id
+ * @numbered: non-zero if this stream is numbered
+ *
+ * Function sets up immutable part of packet header in the given buffer.
+ */
+static void kbasep_tlstream_packet_header_setup(
+ char *buffer,
+ enum tl_packet_family pkt_family,
+ enum tl_packet_class pkt_class,
+ enum tl_packet_type pkt_type,
+ unsigned int stream_id,
+ int numbered)
+{
+ u32 word0 = 0;
+ u32 word1 = 0;
+
+ KBASE_DEBUG_ASSERT(buffer);
+ KBASE_DEBUG_ASSERT(pkt_family == TL_PACKET_FAMILY_TL);
+ KBASE_DEBUG_ASSERT(
+ (pkt_type == TL_PACKET_TYPE_HEADER) ||
+ (pkt_type == TL_PACKET_TYPE_SUMMARY) ||
+ (pkt_type == TL_PACKET_TYPE_BODY));
+ KBASE_DEBUG_ASSERT(
+ (pkt_class == TL_PACKET_CLASS_OBJ) ||
+ (pkt_class == TL_PACKET_CLASS_AUX));
+
+ kbasep_tlstream_put_bits(
+ &word0, pkt_family,
+ PACKET_FAMILY_POS, PACKET_FAMILY_LEN);
+ kbasep_tlstream_put_bits(
+ &word0, pkt_class,
+ PACKET_CLASS_POS, PACKET_CLASS_LEN);
+ kbasep_tlstream_put_bits(
+ &word0, pkt_type,
+ PACKET_TYPE_POS, PACKET_TYPE_LEN);
+ kbasep_tlstream_put_bits(
+ &word0, stream_id,
+ PACKET_STREAMID_POS, PACKET_STREAMID_LEN);
+
+ if (numbered)
+ kbasep_tlstream_put_bits(
+ &word1, 1,
+ PACKET_SEQBIT_POS, PACKET_SEQBIT_LEN);
+
+ memcpy(&buffer[0], &word0, sizeof(word0));
+ memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1));
+}
+
+/**
+ * kbasep_tlstream_packet_header_update - update the packet header
+ * @buffer: pointer to the buffer
+ * @data_size: amount of data carried in this packet
+ *
+ * Function updates mutable part of packet header in the given buffer.
+ * Note that value of data_size must not including size of the header.
+ */
+static void kbasep_tlstream_packet_header_update(
+ char *buffer,
+ size_t data_size)
+{
+ u32 word0;
+ u32 word1;
+
+ KBASE_DEBUG_ASSERT(buffer);
+ CSTD_UNUSED(word0);
+
+ memcpy(&word1, &buffer[sizeof(word0)], sizeof(word1));
+
+ kbasep_tlstream_put_bits(
+ &word1, data_size,
+ PACKET_LENGTH_POS, PACKET_LENGTH_LEN);
+
+ memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1));
+}
+
+/**
+ * kbasep_tlstream_packet_number_update - update the packet number
+ * @buffer: pointer to the buffer
+ * @counter: value of packet counter for this packet's stream
+ *
+ * Function updates packet number embedded within the packet placed in the
+ * given buffer.
+ */
+static void kbasep_tlstream_packet_number_update(char *buffer, u32 counter)
+{
+ KBASE_DEBUG_ASSERT(buffer);
+
+ memcpy(&buffer[PACKET_HEADER_SIZE], &counter, sizeof(counter));
+}
+
+/**
+ * kbasep_timeline_stream_reset - reset stream
+ * @stream: pointer to the stream structure
+ *
+ * Function discards all pending messages and resets packet counters.
+ */
+static void kbasep_timeline_stream_reset(struct tl_stream *stream)
+{
+ unsigned int i;
+
+ for (i = 0; i < PACKET_COUNT; i++) {
+ if (stream->numbered)
+ atomic_set(
+ &stream->buffer[i].size,
+ PACKET_HEADER_SIZE +
+ PACKET_NUMBER_SIZE);
+ else
+ atomic_set(&stream->buffer[i].size, PACKET_HEADER_SIZE);
+ }
+
+ atomic_set(&stream->wbi, 0);
+ atomic_set(&stream->rbi, 0);
+}
+
+/**
+ * kbasep_timeline_stream_init - initialize timeline stream
+ * @stream: pointer to the stream structure
+ * @stream_type: stream type
+ */
+static void kbasep_timeline_stream_init(
+ struct tl_stream *stream,
+ enum tl_stream_type stream_type)
+{
+ unsigned int i;
+
+ KBASE_DEBUG_ASSERT(stream);
+ KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+
+ spin_lock_init(&stream->lock);
+
+ /* All packets carrying tracepoints shall be numbered. */
+ if (TL_PACKET_TYPE_BODY == tl_stream_cfg[stream_type].pkt_type)
+ stream->numbered = 1;
+ else
+ stream->numbered = 0;
+
+ for (i = 0; i < PACKET_COUNT; i++)
+ kbasep_tlstream_packet_header_setup(
+ stream->buffer[i].data,
+ tl_stream_cfg[stream_type].pkt_family,
+ tl_stream_cfg[stream_type].pkt_class,
+ tl_stream_cfg[stream_type].pkt_type,
+ tl_stream_cfg[stream_type].stream_id,
+ stream->numbered);
+
+ kbasep_timeline_stream_reset(tl_stream[stream_type]);
+}
+
+/**
+ * kbasep_timeline_stream_term - terminate timeline stream
+ * @stream: pointer to the stream structure
+ */
+static void kbasep_timeline_stream_term(struct tl_stream *stream)
+{
+ KBASE_DEBUG_ASSERT(stream);
+}
+
+/**
+ * kbasep_tlstream_msgbuf_submit - submit packet to the user space
+ * @stream: pointer to the stream structure
+ * @wb_idx_raw: write buffer index
+ * @wb_size: length of data stored in current buffer
+ *
+ * Function updates currently written buffer with packet header. Then write
+ * index is incremented and buffer is handled to user space. Parameters
+ * of new buffer are returned using provided arguments.
+ *
+ * Return: length of data in new buffer
+ *
+ * Warning: User must update the stream structure with returned value.
+ */
+static size_t kbasep_tlstream_msgbuf_submit(
+ struct tl_stream *stream,
+ unsigned int wb_idx_raw,
+ unsigned int wb_size)
+{
+ unsigned int rb_idx_raw = atomic_read(&stream->rbi);
+ unsigned int wb_idx = wb_idx_raw % PACKET_COUNT;
+
+ /* Set stream as flushed. */
+ atomic_set(&stream->autoflush_counter, -1);
+
+ kbasep_tlstream_packet_header_update(
+ stream->buffer[wb_idx].data,
+ wb_size - PACKET_HEADER_SIZE);
+
+ if (stream->numbered)
+ kbasep_tlstream_packet_number_update(
+ stream->buffer[wb_idx].data,
+ wb_idx_raw);
+
+ /* Increasing write buffer index will expose this packet to the reader.
+ * As stream->lock is not taken on reader side we must make sure memory
+ * is updated correctly before this will happen. */
+ smp_wmb();
+ wb_idx_raw++;
+ atomic_set(&stream->wbi, wb_idx_raw);
+
+ /* Inform user that packets are ready for reading. */
+ wake_up_interruptible(&tl_event_queue);
+
+ /* Detect and mark overflow in this stream. */
+ if (PACKET_COUNT == wb_idx_raw - rb_idx_raw) {
+ /* Reader side depends on this increment to correctly handle
+ * overflows. The value shall be updated only if it was not
+ * modified by the reader. The data holding buffer will not be
+ * updated before stream->lock is released, however size of the
+ * buffer will. Make sure this increment is globally visible
+ * before information about selected write buffer size. */
+ atomic_cmpxchg(&stream->rbi, rb_idx_raw, rb_idx_raw + 1);
+ }
+
+ wb_size = PACKET_HEADER_SIZE;
+ if (stream->numbered)
+ wb_size += PACKET_NUMBER_SIZE;
+
+ return wb_size;
+}
+
+/**
+ * kbasep_tlstream_msgbuf_acquire - lock selected stream and reserves buffer
+ * @stream_type: type of the stream that shall be locked
+ * @msg_size: message size
+ * @flags: pointer to store flags passed back on stream release
+ *
+ * Function will lock the stream and reserve the number of bytes requested
+ * in msg_size for the user.
+ *
+ * Return: pointer to the buffer where message can be stored
+ *
+ * Warning: Stream must be released with kbasep_tlstream_msgbuf_release().
+ * Only atomic operations are allowed while stream is locked
+ * (i.e. do not use any operation that may sleep).
+ */
+static char *kbasep_tlstream_msgbuf_acquire(
+ enum tl_stream_type stream_type,
+ size_t msg_size,
+ unsigned long *flags) __acquires(&stream->lock)
+{
+ struct tl_stream *stream;
+ unsigned int wb_idx_raw;
+ unsigned int wb_idx;
+ size_t wb_size;
+
+ KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+ KBASE_DEBUG_ASSERT(
+ PACKET_SIZE - PACKET_HEADER_SIZE - PACKET_NUMBER_SIZE >=
+ msg_size);
+
+ stream = tl_stream[stream_type];
+
+ spin_lock_irqsave(&stream->lock, *flags);
+
+ wb_idx_raw = atomic_read(&stream->wbi);
+ wb_idx = wb_idx_raw % PACKET_COUNT;
+ wb_size = atomic_read(&stream->buffer[wb_idx].size);
+
+ /* Select next buffer if data will not fit into current one. */
+ if (PACKET_SIZE < wb_size + msg_size) {
+ wb_size = kbasep_tlstream_msgbuf_submit(
+ stream, wb_idx_raw, wb_size);
+ wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
+ }
+
+ /* Reserve space in selected buffer. */
+ atomic_set(&stream->buffer[wb_idx].size, wb_size + msg_size);
+
+#if MALI_UNIT_TEST
+ atomic_add(msg_size, &tlstream_bytes_generated);
+#endif /* MALI_UNIT_TEST */
+
+ return &stream->buffer[wb_idx].data[wb_size];
+}
+
+/**
+ * kbasep_tlstream_msgbuf_release - unlock selected stream
+ * @stream_type: type of the stream that shall be locked
+ * @flags: value obtained during stream acquire
+ *
+ * Function releases stream that has been previously locked with a call to
+ * kbasep_tlstream_msgbuf_acquire().
+ */
+static void kbasep_tlstream_msgbuf_release(
+ enum tl_stream_type stream_type,
+ unsigned long flags) __releases(&stream->lock)
+{
+ struct tl_stream *stream;
+
+ KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+
+ stream = tl_stream[stream_type];
+
+ /* Mark stream as containing unflushed data. */
+ atomic_set(&stream->autoflush_counter, 0);
+
+ spin_unlock_irqrestore(&stream->lock, flags);
+}
+
+/*****************************************************************************/
+
+/**
+ * kbasep_tlstream_flush_stream - flush stream
+ * @stype: type of stream to be flushed
+ *
+ * Flush pending data in timeline stream.
+ */
+static void kbasep_tlstream_flush_stream(enum tl_stream_type stype)
+{
+ struct tl_stream *stream = tl_stream[stype];
+ unsigned long flags;
+ unsigned int wb_idx_raw;
+ unsigned int wb_idx;
+ size_t wb_size;
+ size_t min_size = PACKET_HEADER_SIZE;
+
+ if (stream->numbered)
+ min_size += PACKET_NUMBER_SIZE;
+
+ spin_lock_irqsave(&stream->lock, flags);
+
+ wb_idx_raw = atomic_read(&stream->wbi);
+ wb_idx = wb_idx_raw % PACKET_COUNT;
+ wb_size = atomic_read(&stream->buffer[wb_idx].size);
+
+ if (wb_size > min_size) {
+ wb_size = kbasep_tlstream_msgbuf_submit(
+ stream, wb_idx_raw, wb_size);
+ wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
+ atomic_set(&stream->buffer[wb_idx].size, wb_size);
+ }
+ spin_unlock_irqrestore(&stream->lock, flags);
+}
+
+/**
+ * kbasep_tlstream_autoflush_timer_callback - autoflush timer callback
+ * @data: unused
+ *
+ * Timer is executed periodically to check if any of the stream contains
+ * buffer ready to be submitted to user space.
+ */
+static void kbasep_tlstream_autoflush_timer_callback(unsigned long data)
+{
+ enum tl_stream_type stype;
+ int rcode;
+
+ CSTD_UNUSED(data);
+
+ for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++) {
+ struct tl_stream *stream = tl_stream[stype];
+ unsigned long flags;
+ unsigned int wb_idx_raw;
+ unsigned int wb_idx;
+ size_t wb_size;
+ size_t min_size = PACKET_HEADER_SIZE;
+
+ int af_cnt = atomic_read(&stream->autoflush_counter);
+
+ /* Check if stream contain unflushed data. */
+ if (0 > af_cnt)
+ continue;
+
+ /* Check if stream should be flushed now. */
+ if (af_cnt != atomic_cmpxchg(
+ &stream->autoflush_counter,
+ af_cnt,
+ af_cnt + 1))
+ continue;
+ if (!af_cnt)
+ continue;
+
+ /* Autoflush this stream. */
+ if (stream->numbered)
+ min_size += PACKET_NUMBER_SIZE;
+
+ spin_lock_irqsave(&stream->lock, flags);
+
+ wb_idx_raw = atomic_read(&stream->wbi);
+ wb_idx = wb_idx_raw % PACKET_COUNT;
+ wb_size = atomic_read(&stream->buffer[wb_idx].size);
+
+ if (wb_size > min_size) {
+ wb_size = kbasep_tlstream_msgbuf_submit(
+ stream, wb_idx_raw, wb_size);
+ wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
+ atomic_set(&stream->buffer[wb_idx].size,
+ wb_size);
+ }
+ spin_unlock_irqrestore(&stream->lock, flags);
+ }
+
+ if (atomic_read(&autoflush_timer_active))
+ rcode = mod_timer(
+ &autoflush_timer,
+ jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
+ CSTD_UNUSED(rcode);
+}
+
+/**
+ * kbasep_tlstream_packet_pending - check timeline streams for pending packets
+ * @stype: pointer to variable where stream type will be placed
+ * @rb_idx_raw: pointer to variable where read buffer index will be placed
+ *
+ * Function checks all streams for pending packets. It will stop as soon as
+ * packet ready to be submitted to user space is detected. Variables under
+ * pointers, passed as the parameters to this function will be updated with
+ * values pointing to right stream and buffer.
+ *
+ * Return: non-zero if any of timeline streams has at last one packet ready
+ */
+static int kbasep_tlstream_packet_pending(
+ enum tl_stream_type *stype,
+ unsigned int *rb_idx_raw)
+{
+ int pending = 0;
+
+ KBASE_DEBUG_ASSERT(stype);
+ KBASE_DEBUG_ASSERT(rb_idx_raw);
+
+ for (
+ *stype = 0;
+ (*stype < TL_STREAM_TYPE_COUNT) && !pending;
+ (*stype)++) {
+ if (NULL != tl_stream[*stype]) {
+ *rb_idx_raw = atomic_read(&tl_stream[*stype]->rbi);
+ /* Read buffer index may be updated by writer in case of
+ * overflow. Read and write buffer indexes must be
+ * loaded in correct order. */
+ smp_rmb();
+ if (atomic_read(&tl_stream[*stype]->wbi) != *rb_idx_raw)
+ pending = 1;
+ }
+ }
+ (*stype)--;
+
+ return pending;
+}
+
+/**
+ * kbasep_tlstream_read - copy data from streams to buffer provided by user
+ * @filp: pointer to file structure (unused)
+ * @buffer: pointer to the buffer provided by user
+ * @size: maximum amount of data that can be stored in the buffer
+ * @f_pos: pointer to file offset (unused)
+ *
+ * Return: number of bytes stored in the buffer
+ */
+static ssize_t kbasep_tlstream_read(
+ struct file *filp,
+ char __user *buffer,
+ size_t size,
+ loff_t *f_pos)
+{
+ ssize_t copy_len = 0;
+
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(f_pos);
+
+ if (!buffer)
+ return -EINVAL;
+
+ if ((0 > *f_pos) || (PACKET_SIZE > size))
+ return -EINVAL;
+
+ mutex_lock(&tl_reader_lock);
+
+ while (copy_len < size) {
+ enum tl_stream_type stype;
+ unsigned int rb_idx_raw = 0;
+ unsigned int rb_idx;
+ size_t rb_size;
+
+ /* If we don't have any data yet, wait for packet to be
+ * submitted. If we already read some packets and there is no
+ * packet pending return back to user. */
+ if (0 < copy_len) {
+ if (!kbasep_tlstream_packet_pending(
+ &stype,
+ &rb_idx_raw))
+ break;
+ } else {
+ if (wait_event_interruptible(
+ tl_event_queue,
+ kbasep_tlstream_packet_pending(
+ &stype,
+ &rb_idx_raw))) {
+ copy_len = -ERESTARTSYS;
+ break;
+ }
+ }
+
+ /* Check if this packet fits into the user buffer.
+ * If so copy its content. */
+ rb_idx = rb_idx_raw % PACKET_COUNT;
+ rb_size = atomic_read(&tl_stream[stype]->buffer[rb_idx].size);
+ if (rb_size > size - copy_len)
+ break;
+ if (copy_to_user(
+ &buffer[copy_len],
+ tl_stream[stype]->buffer[rb_idx].data,
+ rb_size)) {
+ copy_len = -EFAULT;
+ break;
+ }
+
+ /* If the rbi still points to the packet we just processed
+ * then there was no overflow so we add the copied size to
+ * copy_len and move rbi on to the next packet
+ */
+ smp_rmb();
+ if (atomic_read(&tl_stream[stype]->rbi) == rb_idx_raw) {
+ copy_len += rb_size;
+ atomic_inc(&tl_stream[stype]->rbi);
+
+#if MALI_UNIT_TEST
+ atomic_add(rb_size, &tlstream_bytes_collected);
+#endif /* MALI_UNIT_TEST */
+ }
+ }
+
+ mutex_unlock(&tl_reader_lock);
+
+ return copy_len;
+}
+
+/**
+ * kbasep_tlstream_poll - poll timeline stream for packets
+ * @filp: pointer to file structure
+ * @wait: pointer to poll table
+ * Return: POLLIN if data can be read without blocking, otherwise zero
+ */
+static unsigned int kbasep_tlstream_poll(struct file *filp, poll_table *wait)
+{
+ enum tl_stream_type stream_type;
+ unsigned int rb_idx;
+
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(wait);
+
+ poll_wait(filp, &tl_event_queue, wait);
+ if (kbasep_tlstream_packet_pending(&stream_type, &rb_idx))
+ return POLLIN;
+ return 0;
+}
+
+/**
+ * kbasep_tlstream_release - release timeline stream descriptor
+ * @inode: pointer to inode structure
+ * @filp: pointer to file structure
+ *
+ * Return always return zero
+ */
+static int kbasep_tlstream_release(struct inode *inode, struct file *filp)
+{
+ KBASE_DEBUG_ASSERT(inode);
+ KBASE_DEBUG_ASSERT(filp);
+ CSTD_UNUSED(inode);
+ CSTD_UNUSED(filp);
+
+ /* Stop autoflush timer before releasing access to streams. */
+ atomic_set(&autoflush_timer_active, 0);
+ del_timer_sync(&autoflush_timer);
+
+ atomic_set(&kbase_tlstream_enabled, 0);
+ return 0;
+}
+
+/**
+ * kbasep_tlstream_timeline_header - prepare timeline header stream packet
+ * @stream_type: type of the stream that will carry header data
+ * @tp_desc: pointer to array with tracepoint descriptors
+ * @tp_count: number of descriptors in the given array
+ *
+ * Functions fills in information about tracepoints stored in body stream
+ * associated with this header stream.
+ */
+static void kbasep_tlstream_timeline_header(
+ enum tl_stream_type stream_type,
+ const struct tp_desc *tp_desc,
+ u32 tp_count)
+{
+ const u8 tv = SWTRACE_VERSION; /* protocol version */
+ const u8 ps = sizeof(void *); /* pointer size */
+ size_t msg_size = sizeof(tv) + sizeof(ps) + sizeof(tp_count);
+ char *buffer;
+ size_t pos = 0;
+ unsigned long flags;
+ unsigned int i;
+
+ KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+ KBASE_DEBUG_ASSERT(tp_desc);
+
+ /* Calculate the size of the timeline message. */
+ for (i = 0; i < tp_count; i++) {
+ msg_size += sizeof(tp_desc[i].id);
+ msg_size +=
+ strnlen(tp_desc[i].id_str, STRLEN_MAX) +
+ sizeof(char) + sizeof(u32);
+ msg_size +=
+ strnlen(tp_desc[i].name, STRLEN_MAX) +
+ sizeof(char) + sizeof(u32);
+ msg_size +=
+ strnlen(tp_desc[i].arg_types, STRLEN_MAX) +
+ sizeof(char) + sizeof(u32);
+ msg_size +=
+ strnlen(tp_desc[i].arg_names, STRLEN_MAX) +
+ sizeof(char) + sizeof(u32);
+ }
+
+ KBASE_DEBUG_ASSERT(PACKET_SIZE - PACKET_HEADER_SIZE >= msg_size);
+
+ buffer = kbasep_tlstream_msgbuf_acquire(stream_type, msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &tv, sizeof(tv));
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &ps, sizeof(ps));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &tp_count, sizeof(tp_count));
+
+ for (i = 0; i < tp_count; i++) {
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos,
+ &tp_desc[i].id, sizeof(tp_desc[i].id));
+ pos = kbasep_tlstream_write_string(
+ buffer, pos,
+ tp_desc[i].id_str, msg_size - pos);
+ pos = kbasep_tlstream_write_string(
+ buffer, pos,
+ tp_desc[i].name, msg_size - pos);
+ pos = kbasep_tlstream_write_string(
+ buffer, pos,
+ tp_desc[i].arg_types, msg_size - pos);
+ pos = kbasep_tlstream_write_string(
+ buffer, pos,
+ tp_desc[i].arg_names, msg_size - pos);
+ }
+
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(stream_type, flags);
+
+ /* We don't expect any more data to be read in this stream.
+ * As header stream must be read before its associated body stream,
+ * make this packet visible to the user straightaway. */
+ kbasep_tlstream_flush_stream(stream_type);
+}
+
+/*****************************************************************************/
+
+int kbase_tlstream_init(void)
+{
+ enum tl_stream_type i;
+
+ /* Prepare stream structures. */
+ for (i = 0; i < TL_STREAM_TYPE_COUNT; i++) {
+ tl_stream[i] = kmalloc(sizeof(**tl_stream), GFP_KERNEL);
+ if (!tl_stream[i])
+ break;
+ kbasep_timeline_stream_init(tl_stream[i], i);
+ }
+ if (TL_STREAM_TYPE_COUNT > i) {
+ for (; i > 0; i--) {
+ kbasep_timeline_stream_term(tl_stream[i - 1]);
+ kfree(tl_stream[i - 1]);
+ }
+ return -ENOMEM;
+ }
+
+ /* Initialize autoflush timer. */
+ atomic_set(&autoflush_timer_active, 0);
+ setup_timer(&autoflush_timer,
+ kbasep_tlstream_autoflush_timer_callback,
+ 0);
+
+ return 0;
+}
+
+void kbase_tlstream_term(void)
+{
+ enum tl_stream_type i;
+
+ for (i = 0; i < TL_STREAM_TYPE_COUNT; i++) {
+ kbasep_timeline_stream_term(tl_stream[i]);
+ kfree(tl_stream[i]);
+ }
+}
+
+static void kbase_create_timeline_objects(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ unsigned int lpu_id;
+ unsigned int as_nr;
+ struct kbasep_kctx_list_element *element;
+
+ /* Create LPU objects. */
+ for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+ u32 *lpu =
+ &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+ KBASE_TLSTREAM_TL_SUMMARY_NEW_LPU(lpu, lpu_id, *lpu);
+ }
+
+ /* Create Address Space objects. */
+ for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+ KBASE_TLSTREAM_TL_SUMMARY_NEW_AS(&kbdev->as[as_nr], as_nr);
+
+ /* Create GPU object and make it retain all LPUs and address spaces. */
+ KBASE_TLSTREAM_TL_SUMMARY_NEW_GPU(
+ kbdev,
+ kbdev->gpu_props.props.raw_props.gpu_id,
+ kbdev->gpu_props.num_cores);
+
+ for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+ void *lpu =
+ &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+ KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_LPU_GPU(lpu, kbdev);
+ }
+ for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+ KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_AS_GPU(
+ &kbdev->as[as_nr],
+ kbdev);
+
+ /* Create object for each known context. */
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_for_each_entry(element, &kbdev->kctx_list, link) {
+ KBASE_TLSTREAM_TL_SUMMARY_NEW_CTX(
+ element->kctx,
+ (u32)(element->kctx->id),
+ (u32)(element->kctx->tgid));
+ }
+ /* Before releasing the lock, reset body stream buffers.
+ * This will prevent context creation message to be directed to both
+ * summary and body stream.
+ */
+ kbase_tlstream_reset_body_streams();
+ mutex_unlock(&kbdev->kctx_list_lock);
+ /* Static object are placed into summary packet that needs to be
+ * transmitted first. Flush all streams to make it available to
+ * user space.
+ */
+ kbase_tlstream_flush_streams();
+}
+
+int kbase_tlstream_acquire(struct kbase_context *kctx, u32 flags)
+{
+ int ret;
+ u32 tlstream_enabled = TLSTREAM_ENABLED | flags;
+
+ if (0 == atomic_cmpxchg(&kbase_tlstream_enabled, 0, tlstream_enabled)) {
+ int rcode;
+
+ ret = anon_inode_getfd(
+ "[mali_tlstream]",
+ &kbasep_tlstream_fops,
+ kctx,
+ O_RDONLY | O_CLOEXEC);
+ if (ret < 0) {
+ atomic_set(&kbase_tlstream_enabled, 0);
+ return ret;
+ }
+
+ /* Reset and initialize header streams. */
+ kbasep_timeline_stream_reset(
+ tl_stream[TL_STREAM_TYPE_OBJ_HEADER]);
+ kbasep_timeline_stream_reset(
+ tl_stream[TL_STREAM_TYPE_OBJ_SUMMARY]);
+ kbasep_timeline_stream_reset(
+ tl_stream[TL_STREAM_TYPE_AUX_HEADER]);
+ kbasep_tlstream_timeline_header(
+ TL_STREAM_TYPE_OBJ_HEADER,
+ tp_desc_obj,
+ ARRAY_SIZE(tp_desc_obj));
+ kbasep_tlstream_timeline_header(
+ TL_STREAM_TYPE_AUX_HEADER,
+ tp_desc_aux,
+ ARRAY_SIZE(tp_desc_aux));
+
+ /* Start autoflush timer. */
+ atomic_set(&autoflush_timer_active, 1);
+ rcode = mod_timer(
+ &autoflush_timer,
+ jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
+ CSTD_UNUSED(rcode);
+
+ /* If job dumping is enabled, readjust the software event's
+ * timeout as the default value of 3 seconds is often
+ * insufficient. */
+ if (flags & BASE_TLSTREAM_JOB_DUMPING_ENABLED) {
+ dev_info(kctx->kbdev->dev,
+ "Job dumping is enabled, readjusting the software event's timeout\n");
+ atomic_set(&kctx->kbdev->js_data.soft_job_timeout_ms,
+ 1800000);
+ }
+
+ /* Summary stream was cleared during acquire.
+ * Create static timeline objects that will be
+ * read by client.
+ */
+ kbase_create_timeline_objects(kctx);
+
+ } else {
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+void kbase_tlstream_flush_streams(void)
+{
+ enum tl_stream_type stype;
+
+ for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++)
+ kbasep_tlstream_flush_stream(stype);
+}
+
+void kbase_tlstream_reset_body_streams(void)
+{
+ kbasep_timeline_stream_reset(
+ tl_stream[TL_STREAM_TYPE_OBJ]);
+ kbasep_timeline_stream_reset(
+ tl_stream[TL_STREAM_TYPE_AUX]);
+}
+
+#if MALI_UNIT_TEST
+void kbase_tlstream_stats(u32 *bytes_collected, u32 *bytes_generated)
+{
+ KBASE_DEBUG_ASSERT(bytes_collected);
+ KBASE_DEBUG_ASSERT(bytes_generated);
+ *bytes_collected = atomic_read(&tlstream_bytes_collected);
+ *bytes_generated = atomic_read(&tlstream_bytes_generated);
+}
+#endif /* MALI_UNIT_TEST */
+
+/*****************************************************************************/
+
+void __kbase_tlstream_tl_summary_new_ctx(void *context, u32 nr, u32 tgid)
+{
+ const u32 msg_id = KBASE_TL_NEW_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(nr) +
+ sizeof(tgid);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &nr, sizeof(nr));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &tgid, sizeof(tgid));
+
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_new_gpu(void *gpu, u32 id, u32 core_count)
+{
+ const u32 msg_id = KBASE_TL_NEW_GPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(gpu) + sizeof(id) +
+ sizeof(core_count);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &gpu, sizeof(gpu));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &id, sizeof(id));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &core_count, sizeof(core_count));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_new_lpu(void *lpu, u32 nr, u32 fn)
+{
+ const u32 msg_id = KBASE_TL_NEW_LPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(lpu) + sizeof(nr) +
+ sizeof(fn);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &nr, sizeof(nr));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &fn, sizeof(fn));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_lifelink_lpu_gpu(void *lpu, void *gpu)
+{
+ const u32 msg_id = KBASE_TL_LIFELINK_LPU_GPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(lpu) + sizeof(gpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &gpu, sizeof(gpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_new_as(void *as, u32 nr)
+{
+ const u32 msg_id = KBASE_TL_NEW_AS;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(nr);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &nr, sizeof(nr));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_lifelink_as_gpu(void *as, void *gpu)
+{
+ const u32 msg_id = KBASE_TL_LIFELINK_AS_GPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(gpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &gpu, sizeof(gpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+/*****************************************************************************/
+
+void __kbase_tlstream_tl_new_ctx(void *context, u32 nr, u32 tgid)
+{
+ const u32 msg_id = KBASE_TL_NEW_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(nr) +
+ sizeof(tgid);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &nr, sizeof(nr));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &tgid, sizeof(tgid));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_new_atom(void *atom, u32 nr)
+{
+ const u32 msg_id = KBASE_TL_NEW_ATOM;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64) + sizeof(atom) +
+ sizeof(nr);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &nr, sizeof(nr));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_del_ctx(void *context)
+{
+ const u32 msg_id = KBASE_TL_DEL_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(context);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_del_atom(void *atom)
+{
+ const u32 msg_id = KBASE_TL_DEL_ATOM;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_ctx_lpu(void *context, void *lpu)
+{
+ const u32 msg_id = KBASE_TL_RET_CTX_LPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(lpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_ctx(void *atom, void *context)
+{
+ const u32 msg_id = KBASE_TL_RET_ATOM_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(context);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_lpu(
+ void *atom, void *lpu, const char *attrib_match_list)
+{
+ const u32 msg_id = KBASE_TL_RET_ATOM_LPU;
+ const size_t msg_s0 = sizeof(u32) + sizeof(char) +
+ strnlen(attrib_match_list, STRLEN_MAX);
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) +
+ sizeof(atom) + sizeof(lpu) + msg_s0;
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ pos = kbasep_tlstream_write_string(
+ buffer, pos, attrib_match_list, msg_s0);
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_ctx_lpu(void *context, void *lpu)
+{
+ const u32 msg_id = KBASE_TL_NRET_CTX_LPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(lpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_ctx(void *atom, void *context)
+{
+ const u32 msg_id = KBASE_TL_NRET_ATOM_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(context);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_dep_atom_atom(void *atom1, void *atom2)
+{
+ const u32 msg_id = KBASE_TL_DEP_ATOM_ATOM;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom1) + sizeof(atom2);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom1, sizeof(atom1));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom2, sizeof(atom2));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ndep_atom_atom(void *atom1, void *atom2)
+{
+ const u32 msg_id = KBASE_TL_NDEP_ATOM_ATOM;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom1) + sizeof(atom2);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom1, sizeof(atom1));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom2, sizeof(atom2));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_rdep_atom_atom(void *atom1, void *atom2)
+{
+ const u32 msg_id = KBASE_TL_RDEP_ATOM_ATOM;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom1) + sizeof(atom2);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom1, sizeof(atom1));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom2, sizeof(atom2));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_lpu(void *atom, void *lpu)
+{
+ const u32 msg_id = KBASE_TL_NRET_ATOM_LPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(lpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_as_ctx(void *as, void *ctx)
+{
+ const u32 msg_id = KBASE_TL_RET_AS_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(ctx);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &ctx, sizeof(ctx));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_as_ctx(void *as, void *ctx)
+{
+ const u32 msg_id = KBASE_TL_NRET_AS_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(ctx);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &ctx, sizeof(ctx));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_as(void *atom, void *as)
+{
+ const u32 msg_id = KBASE_TL_RET_ATOM_AS;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(as);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_as(void *atom, void *as)
+{
+ const u32 msg_id = KBASE_TL_NRET_ATOM_AS;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(as);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_config(
+ void *atom, u64 jd, u64 affinity, u32 config)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_CONFIG;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) +
+ sizeof(jd) + sizeof(affinity) + sizeof(config);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &jd, sizeof(jd));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &affinity, sizeof(affinity));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &config, sizeof(config));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_priority(void *atom, u32 prio)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(prio);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &prio, sizeof(prio));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_state(void *atom, u32 state)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_STATE;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(state);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &state, sizeof(state));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_priority_change(void *atom)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY_CHANGE;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jit(
+ void *atom, u64 edit_addr, u64 new_addr)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JIT;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom)
+ + sizeof(edit_addr) + sizeof(new_addr);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &edit_addr, sizeof(edit_addr));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &new_addr, sizeof(new_addr));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_as_config(
+ void *as, u64 transtab, u64 memattr, u64 transcfg)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_AS_CONFIG;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(as) +
+ sizeof(transtab) + sizeof(memattr) + sizeof(transcfg);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &transtab, sizeof(transtab));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &memattr, sizeof(memattr));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &transcfg, sizeof(transcfg));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_event_lpu_softstop(void *lpu)
+{
+ const u32 msg_id = KBASE_TL_EVENT_LPU_SOFTSTOP;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(lpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softstop_ex(void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_EX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softstop_issue(void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_jd_gpu_soft_reset(void *gpu)
+{
+ const u32 msg_id = KBASE_JD_GPU_SOFT_RESET;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &gpu, sizeof(gpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+/*****************************************************************************/
+
+void __kbase_tlstream_aux_pm_state(u32 core_type, u64 state)
+{
+ const u32 msg_id = KBASE_AUX_PM_STATE;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(core_type) +
+ sizeof(state);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_AUX,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &core_type, sizeof(core_type));
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &state, sizeof(state));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_pagefault(u32 ctx_nr, u64 page_count_change)
+{
+ const u32 msg_id = KBASE_AUX_PAGEFAULT;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(ctx_nr) +
+ sizeof(page_count_change);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_AUX, msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &ctx_nr, sizeof(ctx_nr));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos,
+ &page_count_change, sizeof(page_count_change));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_pagesalloc(u32 ctx_nr, u64 page_count)
+{
+ const u32 msg_id = KBASE_AUX_PAGESALLOC;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(ctx_nr) +
+ sizeof(page_count);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_AUX, msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &ctx_nr, sizeof(ctx_nr));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &page_count, sizeof(page_count));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_devfreq_target(u64 target_freq)
+{
+ const u32 msg_id = KBASE_AUX_DEVFREQ_TARGET;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(target_freq);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_AUX, msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &target_freq, sizeof(target_freq));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_protected_enter_start(void *gpu)
+{
+ const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_START;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_AUX,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &gpu, sizeof(gpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+void __kbase_tlstream_aux_protected_enter_end(void *gpu)
+{
+ const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_END;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_AUX,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &gpu, sizeof(gpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_protected_leave_start(void *gpu)
+{
+ const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_START;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_AUX,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &gpu, sizeof(gpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+void __kbase_tlstream_aux_protected_leave_end(void *gpu)
+{
+ const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_END;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_AUX,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &gpu, sizeof(gpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_tlstream.h b/drivers/gpu/arm_gpu/mali_kbase_tlstream.h
new file mode 100644
index 000000000000..c0a1117d5f25
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_tlstream.h
@@ -0,0 +1,623 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#if !defined(_KBASE_TLSTREAM_H)
+#define _KBASE_TLSTREAM_H
+
+#include <mali_kbase.h>
+
+/*****************************************************************************/
+
+/**
+ * kbase_tlstream_init - initialize timeline infrastructure in kernel
+ * Return: zero on success, negative number on error
+ */
+int kbase_tlstream_init(void);
+
+/**
+ * kbase_tlstream_term - terminate timeline infrastructure in kernel
+ *
+ * Timeline need have to been previously enabled with kbase_tlstream_init().
+ */
+void kbase_tlstream_term(void);
+
+/**
+ * kbase_tlstream_acquire - acquire timeline stream file descriptor
+ * @kctx: kernel common context
+ * @flags: timeline stream flags
+ *
+ * This descriptor is meant to be used by userspace timeline to gain access to
+ * kernel timeline stream. This stream is later broadcasted by user space to the
+ * timeline client.
+ * Only one entity can own the descriptor at any given time. Descriptor shall be
+ * closed if unused. If descriptor cannot be obtained (i.e. when it is already
+ * being used) return will be a negative value.
+ *
+ * Return: file descriptor on success, negative number on error
+ */
+int kbase_tlstream_acquire(struct kbase_context *kctx, u32 flags);
+
+/**
+ * kbase_tlstream_flush_streams - flush timeline streams.
+ *
+ * Function will flush pending data in all timeline streams.
+ */
+void kbase_tlstream_flush_streams(void);
+
+/**
+ * kbase_tlstream_reset_body_streams - reset timeline body streams.
+ *
+ * Function will discard pending data in all timeline body streams.
+ */
+void kbase_tlstream_reset_body_streams(void);
+
+#if MALI_UNIT_TEST
+/**
+ * kbase_tlstream_test - start timeline stream data generator
+ * @tpw_count: number of trace point writers in each context
+ * @msg_delay: time delay in milliseconds between trace points written by one
+ * writer
+ * @msg_count: number of trace points written by one writer
+ * @aux_msg: if non-zero aux messages will be included
+ *
+ * This test starts a requested number of asynchronous writers in both IRQ and
+ * thread context. Each writer will generate required number of test
+ * tracepoints (tracepoints with embedded information about writer that
+ * should be verified by user space reader). Tracepoints will be emitted in
+ * all timeline body streams. If aux_msg is non-zero writer will also
+ * generate not testable tracepoints (tracepoints without information about
+ * writer). These tracepoints are used to check correctness of remaining
+ * timeline message generating functions. Writer will wait requested time
+ * between generating another set of messages. This call blocks until all
+ * writers finish.
+ */
+void kbase_tlstream_test(
+ unsigned int tpw_count,
+ unsigned int msg_delay,
+ unsigned int msg_count,
+ int aux_msg);
+
+/**
+ * kbase_tlstream_stats - read timeline stream statistics
+ * @bytes_collected: will hold number of bytes read by the user
+ * @bytes_generated: will hold number of bytes generated by trace points
+ */
+void kbase_tlstream_stats(u32 *bytes_collected, u32 *bytes_generated);
+#endif /* MALI_UNIT_TEST */
+
+/*****************************************************************************/
+
+#define TL_ATOM_STATE_IDLE 0
+#define TL_ATOM_STATE_READY 1
+#define TL_ATOM_STATE_DONE 2
+#define TL_ATOM_STATE_POSTED 3
+
+void __kbase_tlstream_tl_summary_new_ctx(void *context, u32 nr, u32 tgid);
+void __kbase_tlstream_tl_summary_new_gpu(void *gpu, u32 id, u32 core_count);
+void __kbase_tlstream_tl_summary_new_lpu(void *lpu, u32 nr, u32 fn);
+void __kbase_tlstream_tl_summary_lifelink_lpu_gpu(void *lpu, void *gpu);
+void __kbase_tlstream_tl_summary_new_as(void *as, u32 nr);
+void __kbase_tlstream_tl_summary_lifelink_as_gpu(void *as, void *gpu);
+void __kbase_tlstream_tl_new_ctx(void *context, u32 nr, u32 tgid);
+void __kbase_tlstream_tl_new_atom(void *atom, u32 nr);
+void __kbase_tlstream_tl_del_ctx(void *context);
+void __kbase_tlstream_tl_del_atom(void *atom);
+void __kbase_tlstream_tl_ret_ctx_lpu(void *context, void *lpu);
+void __kbase_tlstream_tl_ret_atom_ctx(void *atom, void *context);
+void __kbase_tlstream_tl_ret_atom_lpu(
+ void *atom, void *lpu, const char *attrib_match_list);
+void __kbase_tlstream_tl_nret_ctx_lpu(void *context, void *lpu);
+void __kbase_tlstream_tl_nret_atom_ctx(void *atom, void *context);
+void __kbase_tlstream_tl_nret_atom_lpu(void *atom, void *lpu);
+void __kbase_tlstream_tl_ret_as_ctx(void *as, void *ctx);
+void __kbase_tlstream_tl_nret_as_ctx(void *as, void *ctx);
+void __kbase_tlstream_tl_ret_atom_as(void *atom, void *as);
+void __kbase_tlstream_tl_nret_atom_as(void *atom, void *as);
+void __kbase_tlstream_tl_dep_atom_atom(void *atom1, void *atom2);
+void __kbase_tlstream_tl_ndep_atom_atom(void *atom1, void *atom2);
+void __kbase_tlstream_tl_rdep_atom_atom(void *atom1, void *atom2);
+void __kbase_tlstream_tl_attrib_atom_config(
+ void *atom, u64 jd, u64 affinity, u32 config);
+void __kbase_tlstream_tl_attrib_atom_priority(void *atom, u32 prio);
+void __kbase_tlstream_tl_attrib_atom_state(void *atom, u32 state);
+void __kbase_tlstream_tl_attrib_atom_priority_change(void *atom);
+void __kbase_tlstream_tl_attrib_atom_jit(
+ void *atom, u64 edit_addr, u64 new_addr);
+void __kbase_tlstream_tl_attrib_as_config(
+ void *as, u64 transtab, u64 memattr, u64 transcfg);
+void __kbase_tlstream_tl_event_atom_softstop_ex(void *atom);
+void __kbase_tlstream_tl_event_lpu_softstop(void *lpu);
+void __kbase_tlstream_tl_event_atom_softstop_issue(void *atom);
+void __kbase_tlstream_jd_gpu_soft_reset(void *gpu);
+void __kbase_tlstream_aux_pm_state(u32 core_type, u64 state);
+void __kbase_tlstream_aux_pagefault(u32 ctx_nr, u64 page_count_change);
+void __kbase_tlstream_aux_pagesalloc(u32 ctx_nr, u64 page_count);
+void __kbase_tlstream_aux_devfreq_target(u64 target_freq);
+void __kbase_tlstream_aux_protected_enter_start(void *gpu);
+void __kbase_tlstream_aux_protected_enter_end(void *gpu);
+void __kbase_tlstream_aux_protected_leave_start(void *gpu);
+void __kbase_tlstream_aux_protected_leave_end(void *gpu);
+
+#define TLSTREAM_ENABLED (1 << 31)
+
+extern atomic_t kbase_tlstream_enabled;
+
+#define __TRACE_IF_ENABLED(trace_name, ...) \
+ do { \
+ int enabled = atomic_read(&kbase_tlstream_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_##trace_name(__VA_ARGS__); \
+ } while (0)
+
+#define __TRACE_IF_ENABLED_LATENCY(trace_name, ...) \
+ do { \
+ int enabled = atomic_read(&kbase_tlstream_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_##trace_name(__VA_ARGS__); \
+ } while (0)
+
+#define __TRACE_IF_ENABLED_JD(trace_name, ...) \
+ do { \
+ int enabled = atomic_read(&kbase_tlstream_enabled); \
+ if (enabled & BASE_TLSTREAM_JOB_DUMPING_ENABLED) \
+ __kbase_tlstream_##trace_name(__VA_ARGS__); \
+ } while (0)
+
+/*****************************************************************************/
+
+/**
+ * KBASE_TLSTREAM_TL_SUMMARY_NEW_CTX - create context object in timeline
+ * summary
+ * @context: name of the context object
+ * @nr: context number
+ * @tgid: thread Group Id
+ *
+ * Function emits a timeline message informing about context creation. Context
+ * is created with context number (its attribute), that can be used to link
+ * kbase context with userspace context.
+ * This message is directed to timeline summary stream.
+ */
+#define KBASE_TLSTREAM_TL_SUMMARY_NEW_CTX(context, nr, tgid) \
+ __TRACE_IF_ENABLED(tl_summary_new_ctx, context, nr, tgid)
+
+/**
+ * KBASE_TLSTREAM_TL_SUMMARY_NEW_GPU - create GPU object in timeline summary
+ * @gpu: name of the GPU object
+ * @id: id value of this GPU
+ * @core_count: number of cores this GPU hosts
+ *
+ * Function emits a timeline message informing about GPU creation. GPU is
+ * created with two attributes: id and core count.
+ * This message is directed to timeline summary stream.
+ */
+#define KBASE_TLSTREAM_TL_SUMMARY_NEW_GPU(gpu, id, core_count) \
+ __TRACE_IF_ENABLED(tl_summary_new_gpu, gpu, id, core_count)
+
+/**
+ * KBASE_TLSTREAM_TL_SUMMARY_NEW_LPU - create LPU object in timeline summary
+ * @lpu: name of the Logical Processing Unit object
+ * @nr: sequential number assigned to this LPU
+ * @fn: property describing this LPU's functional abilities
+ *
+ * Function emits a timeline message informing about LPU creation. LPU is
+ * created with two attributes: number linking this LPU with GPU's job slot
+ * and function bearing information about this LPU abilities.
+ * This message is directed to timeline summary stream.
+ */
+#define KBASE_TLSTREAM_TL_SUMMARY_NEW_LPU(lpu, nr, fn) \
+ __TRACE_IF_ENABLED(tl_summary_new_lpu, lpu, nr, fn)
+
+/**
+ * KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_LPU_GPU - lifelink LPU object to GPU
+ * @lpu: name of the Logical Processing Unit object
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message informing that LPU object shall be deleted
+ * along with GPU object.
+ * This message is directed to timeline summary stream.
+ */
+#define KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_LPU_GPU(lpu, gpu) \
+ __TRACE_IF_ENABLED(tl_summary_lifelink_lpu_gpu, lpu, gpu)
+
+/**
+ * KBASE_TLSTREAM_TL_SUMMARY_NEW_AS - create address space object in timeline summary
+ * @as: name of the address space object
+ * @nr: sequential number assigned to this address space
+ *
+ * Function emits a timeline message informing about address space creation.
+ * Address space is created with one attribute: number identifying this
+ * address space.
+ * This message is directed to timeline summary stream.
+ */
+#define KBASE_TLSTREAM_TL_SUMMARY_NEW_AS(as, nr) \
+ __TRACE_IF_ENABLED(tl_summary_new_as, as, nr)
+
+/**
+ * KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_AS_GPU - lifelink address space object to GPU
+ * @as: name of the address space object
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message informing that address space object
+ * shall be deleted along with GPU object.
+ * This message is directed to timeline summary stream.
+ */
+#define KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_AS_GPU(as, gpu) \
+ __TRACE_IF_ENABLED(tl_summary_lifelink_as_gpu, as, gpu)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_CTX - create context object in timeline
+ * @context: name of the context object
+ * @nr: context number
+ * @tgid: thread Group Id
+ *
+ * Function emits a timeline message informing about context creation. Context
+ * is created with context number (its attribute), that can be used to link
+ * kbase context with userspace context.
+ */
+#define KBASE_TLSTREAM_TL_NEW_CTX(context, nr, tgid) \
+ __TRACE_IF_ENABLED(tl_new_ctx, context, nr, tgid)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_ATOM - create atom object in timeline
+ * @atom: name of the atom object
+ * @nr: sequential number assigned to this atom
+ *
+ * Function emits a timeline message informing about atom creation. Atom is
+ * created with atom number (its attribute) that links it with actual work
+ * bucket id understood by hardware.
+ */
+#define KBASE_TLSTREAM_TL_NEW_ATOM(atom, nr) \
+ __TRACE_IF_ENABLED(tl_new_atom, atom, nr)
+
+/**
+ * KBASE_TLSTREAM_TL_DEL_CTX - destroy context object in timeline
+ * @context: name of the context object
+ *
+ * Function emits a timeline message informing that context object ceased to
+ * exist.
+ */
+#define KBASE_TLSTREAM_TL_DEL_CTX(context) \
+ __TRACE_IF_ENABLED(tl_del_ctx, context)
+
+/**
+ * KBASE_TLSTREAM_TL_DEL_ATOM - destroy atom object in timeline
+ * @atom: name of the atom object
+ *
+ * Function emits a timeline message informing that atom object ceased to
+ * exist.
+ */
+#define KBASE_TLSTREAM_TL_DEL_ATOM(atom) \
+ __TRACE_IF_ENABLED(tl_del_atom, atom)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_CTX_LPU - retain context by LPU
+ * @context: name of the context object
+ * @lpu: name of the Logical Processing Unit object
+ *
+ * Function emits a timeline message informing that context is being held
+ * by LPU and must not be deleted unless it is released.
+ */
+#define KBASE_TLSTREAM_TL_RET_CTX_LPU(context, lpu) \
+ __TRACE_IF_ENABLED(tl_ret_ctx_lpu, context, lpu)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_CTX - retain atom by context
+ * @atom: name of the atom object
+ * @context: name of the context object
+ *
+ * Function emits a timeline message informing that atom object is being held
+ * by context and must not be deleted unless it is released.
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_CTX(atom, context) \
+ __TRACE_IF_ENABLED(tl_ret_atom_ctx, atom, context)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_LPU - retain atom by LPU
+ * @atom: name of the atom object
+ * @lpu: name of the Logical Processing Unit object
+ * @attrib_match_list: list containing match operator attributes
+ *
+ * Function emits a timeline message informing that atom object is being held
+ * by LPU and must not be deleted unless it is released.
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_LPU(atom, lpu, attrib_match_list) \
+ __TRACE_IF_ENABLED(tl_ret_atom_lpu, atom, lpu, attrib_match_list)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_CTX_LPU - release context by LPU
+ * @context: name of the context object
+ * @lpu: name of the Logical Processing Unit object
+ *
+ * Function emits a timeline message informing that context is being released
+ * by LPU object.
+ */
+#define KBASE_TLSTREAM_TL_NRET_CTX_LPU(context, lpu) \
+ __TRACE_IF_ENABLED(tl_nret_ctx_lpu, context, lpu)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_CTX - release atom by context
+ * @atom: name of the atom object
+ * @context: name of the context object
+ *
+ * Function emits a timeline message informing that atom object is being
+ * released by context.
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_CTX(atom, context) \
+ __TRACE_IF_ENABLED(tl_nret_atom_ctx, atom, context)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_LPU - release atom by LPU
+ * @atom: name of the atom object
+ * @lpu: name of the Logical Processing Unit object
+ *
+ * Function emits a timeline message informing that atom object is being
+ * released by LPU.
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_LPU(atom, lpu) \
+ __TRACE_IF_ENABLED(tl_nret_atom_lpu, atom, lpu)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_AS_CTX - lifelink address space object to context
+ * @as: name of the address space object
+ * @ctx: name of the context object
+ *
+ * Function emits a timeline message informing that address space object
+ * is being held by the context object.
+ */
+#define KBASE_TLSTREAM_TL_RET_AS_CTX(as, ctx) \
+ __TRACE_IF_ENABLED(tl_ret_as_ctx, as, ctx)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_AS_CTX - release address space by context
+ * @as: name of the address space object
+ * @ctx: name of the context object
+ *
+ * Function emits a timeline message informing that address space object
+ * is being released by atom.
+ */
+#define KBASE_TLSTREAM_TL_NRET_AS_CTX(as, ctx) \
+ __TRACE_IF_ENABLED(tl_nret_as_ctx, as, ctx)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_AS - retain atom by address space
+ * @atom: name of the atom object
+ * @as: name of the address space object
+ *
+ * Function emits a timeline message informing that atom object is being held
+ * by address space and must not be deleted unless it is released.
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_AS(atom, as) \
+ __TRACE_IF_ENABLED(tl_ret_atom_as, atom, as)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_AS - release atom by address space
+ * @atom: name of the atom object
+ * @as: name of the address space object
+ *
+ * Function emits a timeline message informing that atom object is being
+ * released by address space.
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_AS(atom, as) \
+ __TRACE_IF_ENABLED(tl_nret_atom_as, atom, as)
+
+/**
+ * KBASE_TLSTREAM_TL_DEP_ATOM_ATOM - parent atom depends on child atom
+ * @atom1: name of the child atom object
+ * @atom2: name of the parent atom object that depends on child atom
+ *
+ * Function emits a timeline message informing that parent atom waits for
+ * child atom object to be completed before start its execution.
+ */
+#define KBASE_TLSTREAM_TL_DEP_ATOM_ATOM(atom1, atom2) \
+ __TRACE_IF_ENABLED(tl_dep_atom_atom, atom1, atom2)
+
+/**
+ * KBASE_TLSTREAM_TL_NDEP_ATOM_ATOM - dependency between atoms resolved
+ * @atom1: name of the child atom object
+ * @atom2: name of the parent atom object that depended on child atom
+ *
+ * Function emits a timeline message informing that parent atom execution
+ * dependency on child atom has been resolved.
+ */
+#define KBASE_TLSTREAM_TL_NDEP_ATOM_ATOM(atom1, atom2) \
+ __TRACE_IF_ENABLED(tl_ndep_atom_atom, atom1, atom2)
+
+/**
+ * KBASE_TLSTREAM_TL_RDEP_ATOM_ATOM - information about already resolved dependency between atoms
+ * @atom1: name of the child atom object
+ * @atom2: name of the parent atom object that depended on child atom
+ *
+ * Function emits a timeline message informing that parent atom execution
+ * dependency on child atom has been resolved.
+ */
+#define KBASE_TLSTREAM_TL_RDEP_ATOM_ATOM(atom1, atom2) \
+ __TRACE_IF_ENABLED(tl_rdep_atom_atom, atom1, atom2)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG - atom job slot attributes
+ * @atom: name of the atom object
+ * @jd: job descriptor address
+ * @affinity: job affinity
+ * @config: job config
+ *
+ * Function emits a timeline message containing atom attributes.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG(atom, jd, affinity, config) \
+ __TRACE_IF_ENABLED(tl_attrib_atom_config, atom, jd, affinity, config)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY - atom priority
+ * @atom: name of the atom object
+ * @prio: atom priority
+ *
+ * Function emits a timeline message containing atom priority.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY(atom, prio) \
+ __TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_priority, atom, prio)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE - atom state
+ * @atom: name of the atom object
+ * @state: atom state
+ *
+ * Function emits a timeline message containing atom state.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(atom, state) \
+ __TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_state, atom, state)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY_CHANGE - atom caused priority change
+ * @atom: name of the atom object
+ *
+ * Function emits a timeline message signalling priority change
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY_CHANGE(atom) \
+ __TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_priority_change, atom)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT - jit happened on atom
+ * @atom: atom identifier
+ * @edit_addr: address edited by jit
+ * @new_addr: address placed into the edited location
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(atom, edit_addr, new_addr) \
+ __TRACE_IF_ENABLED_JD(tl_attrib_atom_jit, atom, edit_addr, new_addr)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG - address space attributes
+ * @as: assigned address space
+ * @transtab: configuration of the TRANSTAB register
+ * @memattr: configuration of the MEMATTR register
+ * @transcfg: configuration of the TRANSCFG register (or zero if not present)
+ *
+ * Function emits a timeline message containing address space attributes.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(as, transtab, memattr, transcfg) \
+ __TRACE_IF_ENABLED(tl_attrib_as_config, as, transtab, memattr, transcfg)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ex
+ * @atom: atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX(atom) \
+ __TRACE_IF_ENABLED(tl_event_atom_softstop_ex, atom)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_LPU_softstop
+ * @lpu: name of the LPU object
+ */
+#define KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP(lpu) \
+ __TRACE_IF_ENABLED(tl_event_lpu_softstop, lpu)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_issue
+ * @atom: atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE(atom) \
+ __TRACE_IF_ENABLED(tl_event_atom_softstop_issue, atom)
+
+/**
+ * KBASE_TLSTREAM_JD_GPU_SOFT_RESET - The GPU is being soft reset
+ * @gpu: name of the GPU object
+ *
+ * This imperative tracepoint is specific to job dumping.
+ * Function emits a timeline message indicating GPU soft reset.
+ */
+#define KBASE_TLSTREAM_JD_GPU_SOFT_RESET(gpu) \
+ __TRACE_IF_ENABLED(jd_gpu_soft_reset, gpu)
+
+
+/**
+ * KBASE_TLSTREAM_AUX_PM_STATE - timeline message: power management state
+ * @core_type: core type (shader, tiler, l2 cache, l3 cache)
+ * @state: 64bits bitmask reporting power state of the cores (1-ON, 0-OFF)
+ */
+#define KBASE_TLSTREAM_AUX_PM_STATE(core_type, state) \
+ __TRACE_IF_ENABLED(aux_pm_state, core_type, state)
+
+/**
+ * KBASE_TLSTREAM_AUX_PAGEFAULT - timeline message: MMU page fault event
+ * resulting in new pages being mapped
+ * @ctx_nr: kernel context number
+ * @page_count_change: number of pages to be added
+ */
+#define KBASE_TLSTREAM_AUX_PAGEFAULT(ctx_nr, page_count_change) \
+ __TRACE_IF_ENABLED(aux_pagefault, ctx_nr, page_count_change)
+
+/**
+ * KBASE_TLSTREAM_AUX_PAGESALLOC - timeline message: total number of allocated
+ * pages is changed
+ * @ctx_nr: kernel context number
+ * @page_count: number of pages used by the context
+ */
+#define KBASE_TLSTREAM_AUX_PAGESALLOC(ctx_nr, page_count) \
+ __TRACE_IF_ENABLED(aux_pagesalloc, ctx_nr, page_count)
+
+/**
+ * KBASE_TLSTREAM_AUX_DEVFREQ_TARGET - timeline message: new target DVFS
+ * frequency
+ * @target_freq: new target frequency
+ */
+#define KBASE_TLSTREAM_AUX_DEVFREQ_TARGET(target_freq) \
+ __TRACE_IF_ENABLED(aux_devfreq_target, target_freq)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START - The GPU has started transitioning
+ * to protected mode
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message indicating the GPU is starting to
+ * transition to protected mode.
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START(gpu) \
+ __TRACE_IF_ENABLED_LATENCY(aux_protected_enter_start, gpu)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END - The GPU has finished transitioning
+ * to protected mode
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message indicating the GPU has finished
+ * transitioning to protected mode.
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(gpu) \
+ __TRACE_IF_ENABLED_LATENCY(aux_protected_enter_end, gpu)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START - The GPU has started transitioning
+ * to non-protected mode
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message indicating the GPU is starting to
+ * transition to non-protected mode.
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START(gpu) \
+ __TRACE_IF_ENABLED_LATENCY(aux_protected_leave_start, gpu)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END - The GPU has finished transitioning
+ * to non-protected mode
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message indicating the GPU has finished
+ * transitioning to non-protected mode.
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(gpu) \
+ __TRACE_IF_ENABLED_LATENCY(aux_protected_leave_end, gpu)
+
+#endif /* _KBASE_TLSTREAM_H */
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_trace_defs.h b/drivers/gpu/arm_gpu/mali_kbase_trace_defs.h
new file mode 100644
index 000000000000..e2e0544208ce
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_trace_defs.h
@@ -0,0 +1,264 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/* ***** IMPORTANT: THIS IS NOT A NORMAL HEADER FILE *****
+ * ***** DO NOT INCLUDE DIRECTLY *****
+ * ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
+
+/*
+ * The purpose of this header file is just to contain a list of trace code idenitifers
+ *
+ * Each identifier is wrapped in a macro, so that its string form and enum form can be created
+ *
+ * Each macro is separated with a comma, to allow insertion into an array initializer or enum definition block.
+ *
+ * This allows automatic creation of an enum and a corresponding array of strings
+ *
+ * Before #including, the includer MUST #define KBASE_TRACE_CODE_MAKE_CODE.
+ * After #including, the includer MUST #under KBASE_TRACE_CODE_MAKE_CODE.
+ *
+ * e.g.:
+ * #define KBASE_TRACE_CODE( X ) KBASE_TRACE_CODE_ ## X
+ * typedef enum
+ * {
+ * #define KBASE_TRACE_CODE_MAKE_CODE( X ) KBASE_TRACE_CODE( X )
+ * #include "mali_kbase_trace_defs.h"
+ * #undef KBASE_TRACE_CODE_MAKE_CODE
+ * } kbase_trace_code;
+ *
+ * IMPORTANT: THIS FILE MUST NOT BE USED FOR ANY OTHER PURPOSE OTHER THAN THE ABOVE
+ *
+ *
+ * The use of the macro here is:
+ * - KBASE_TRACE_CODE_MAKE_CODE( X )
+ *
+ * Which produces:
+ * - For an enum, KBASE_TRACE_CODE_X
+ * - For a string, "X"
+ *
+ *
+ * For example:
+ * - KBASE_TRACE_CODE_MAKE_CODE( JM_JOB_COMPLETE ) expands to:
+ * - KBASE_TRACE_CODE_JM_JOB_COMPLETE for the enum
+ * - "JM_JOB_COMPLETE" for the string
+ * - To use it to trace an event, do:
+ * - KBASE_TRACE_ADD( kbdev, JM_JOB_COMPLETE, subcode, kctx, uatom, val );
+ */
+
+#if 0 /* Dummy section to avoid breaking formatting */
+int dummy_array[] = {
+#endif
+
+/*
+ * Core events
+ */
+ /* no info_val, no gpu_addr, no atom */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_DESTROY),
+ /* no info_val, no gpu_addr, no atom */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_HWINSTR_TERM),
+ /* info_val == GPU_IRQ_STATUS register */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ),
+ /* info_val == bits cleared */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_CLEAR),
+ /* info_val == GPU_IRQ_STATUS register */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_DONE),
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_SOFT_RESET),
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_HARD_RESET),
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_CLEAR),
+ /* GPU addr==dump address */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_SAMPLE),
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_CLEAN_INV_CACHES),
+/*
+ * Job Slot management events
+ */
+ /* info_val==irq rawstat at start */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ),
+ /* info_val==jobs processed */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ_END),
+/* In the following:
+ *
+ * - ctx is set if a corresponding job found (NULL otherwise, e.g. some soft-stop cases)
+ * - uatom==kernel-side mapped uatom address (for correlation with user-side)
+ */
+ /* info_val==exit code; gpu_addr==chain gpuaddr */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_JOB_DONE),
+ /* gpu_addr==JS_HEAD_NEXT written, info_val==lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT),
+ /* gpu_addr is as follows:
+ * - If JS_STATUS active after soft-stop, val==gpu addr written to
+ * JS_HEAD on submit
+ * - otherwise gpu_addr==0 */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_0),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_1),
+ /* gpu_addr==JS_HEAD read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP),
+ /* gpu_addr==JS_HEAD read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_0),
+ /* gpu_addr==JS_HEAD read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_1),
+ /* gpu_addr==JS_TAIL read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_UPDATE_HEAD),
+/* gpu_addr is as follows:
+ * - If JS_STATUS active before soft-stop, val==JS_HEAD
+ * - otherwise gpu_addr==0
+ */
+ /* gpu_addr==JS_HEAD read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_CHECK_HEAD),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS_DONE),
+ /* info_val == is_scheduled */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_NON_SCHEDULED),
+ /* info_val == is_scheduled */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_SCHEDULED),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_DONE),
+ /* info_val == nr jobs submitted */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_SOFT_OR_HARD_STOP),
+ /* gpu_addr==JS_HEAD_NEXT last written */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_EVICT),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT_AFTER_RESET),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_BEGIN_RESET_WORKER),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_END_RESET_WORKER),
+/*
+ * Job dispatch events
+ */
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_DONE),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER_END),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_TRY_RUN_NEXT_JOB),
+ /* gpu_addr==0, info_val==0, uatom==0 */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_ZAP_CONTEXT),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL_WORKER),
+/*
+ * Scheduler Core events
+ */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX_NOLOCK),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_ADD_JOB),
+ /* gpu_addr==last value written/would be written to JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_REMOVE_JOB),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_RELEASE_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_TRY_SCHEDULE_HEAD_CTX),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_TRY_RUN_NEXT_JOB),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_RETRY_NEEDED),
+ /* kctx is the one being evicted, info_val == kctx to put in */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_FAST_START_EVICTS_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_SUBMIT_TO_BLOCKED),
+ /* info_val == lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_CURRENT),
+ /* info_val == lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_CORES_FAILED),
+ /* info_val == lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_INUSE_FAILED),
+ /* info_val == lower 32 bits of rechecked affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED),
+ /* info_val == lower 32 bits of rechecked affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED),
+ /* info_val == lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_AFFINITY_WOULD_VIOLATE),
+ /* info_val == the ctx attribute now on ctx */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_CTX),
+ /* info_val == the ctx attribute now on runpool */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_RUNPOOL),
+ /* info_val == the ctx attribute now off ctx */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_CTX),
+ /* info_val == the ctx attribute now off runpool */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_RUNPOOL),
+/*
+ * Scheduler Policy events
+ */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_INIT_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TERM_CTX),
+ /* info_val == whether it was evicted */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TRY_EVICT_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_FOREACH_CTX_JOBS),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_HEAD_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_ADD_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_REMOVE_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB_IRQ),
+ /* gpu_addr==JS_HEAD to write if the job were run */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_JOB),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_START),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_END),
+/*
+ * Power Management Events
+ */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERING_UP),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERED_UP),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_L2),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_L2),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_L2),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE_TILER),
+ /* PM_DESIRED_REACHED: gpu_addr == pm.gpu_in_desired_state */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_SHADER_INUSE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_TILER_INUSE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_SHADER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_TILER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_SHADER_INUSE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_TILER_INUSE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_UNREQUEST_CHANGE_SHADER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_UNREQUEST_CHANGE_TILER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_SHADER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_TILER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_WAKE_WAITERS),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_ACTIVE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_IDLE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_ON),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_OFF),
+ /* info_val == policy number, or -1 for "Already changing" */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_SET_POLICY),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CA_SET_POLICY),
+ /* info_val == policy number */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_INIT),
+ /* info_val == policy number */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_TERM),
+/* Unused code just to make it easier to not have a comma at the end.
+ * All other codes MUST come before this */
+ KBASE_TRACE_CODE_MAKE_CODE(DUMMY)
+
+#if 0 /* Dummy section to avoid breaking formatting */
+};
+#endif
+
+/* ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_trace_timeline.c b/drivers/gpu/arm_gpu/mali_kbase_trace_timeline.c
new file mode 100644
index 000000000000..5830e87f0818
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_trace_timeline.c
@@ -0,0 +1,236 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_jm.h>
+#include <mali_kbase_hwaccess_jm.h>
+
+#define CREATE_TRACE_POINTS
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+#include "mali_timeline.h"
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_atoms_in_flight);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_atom);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_slot_active);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_slot_action);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_power_active);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_l2_power_active);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_pm_event);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_slot_atom);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_pm_checktrans);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_context_active);
+
+struct kbase_trace_timeline_desc {
+ char *enum_str;
+ char *desc;
+ char *format;
+ char *format_desc;
+};
+
+static struct kbase_trace_timeline_desc kbase_trace_timeline_desc_table[] = {
+ #define KBASE_TIMELINE_TRACE_CODE(enum_val, desc, format, format_desc) { #enum_val, desc, format, format_desc }
+ #include "mali_kbase_trace_timeline_defs.h"
+ #undef KBASE_TIMELINE_TRACE_CODE
+};
+
+#define KBASE_NR_TRACE_CODES ARRAY_SIZE(kbase_trace_timeline_desc_table)
+
+static void *kbasep_trace_timeline_seq_start(struct seq_file *s, loff_t *pos)
+{
+ if (*pos >= KBASE_NR_TRACE_CODES)
+ return NULL;
+
+ return &kbase_trace_timeline_desc_table[*pos];
+}
+
+static void kbasep_trace_timeline_seq_stop(struct seq_file *s, void *data)
+{
+}
+
+static void *kbasep_trace_timeline_seq_next(struct seq_file *s, void *data, loff_t *pos)
+{
+ (*pos)++;
+
+ if (*pos == KBASE_NR_TRACE_CODES)
+ return NULL;
+
+ return &kbase_trace_timeline_desc_table[*pos];
+}
+
+static int kbasep_trace_timeline_seq_show(struct seq_file *s, void *data)
+{
+ struct kbase_trace_timeline_desc *trace_desc = data;
+
+ seq_printf(s, "%s#%s#%s#%s\n", trace_desc->enum_str, trace_desc->desc, trace_desc->format, trace_desc->format_desc);
+ return 0;
+}
+
+
+static const struct seq_operations kbasep_trace_timeline_seq_ops = {
+ .start = kbasep_trace_timeline_seq_start,
+ .next = kbasep_trace_timeline_seq_next,
+ .stop = kbasep_trace_timeline_seq_stop,
+ .show = kbasep_trace_timeline_seq_show,
+};
+
+static int kbasep_trace_timeline_debugfs_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &kbasep_trace_timeline_seq_ops);
+}
+
+static const struct file_operations kbasep_trace_timeline_debugfs_fops = {
+ .open = kbasep_trace_timeline_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev)
+{
+ debugfs_create_file("mali_timeline_defs",
+ S_IRUGO, kbdev->mali_debugfs_directory, NULL,
+ &kbasep_trace_timeline_debugfs_fops);
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (kbdev->timeline.slot_atoms_submitted[js] > 0) {
+ KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 1);
+ } else {
+ base_atom_id atom_number = kbase_jd_atom_id(kctx, katom);
+
+ KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 1);
+ KBASE_TIMELINE_JOB_START(kctx, js, atom_number);
+ }
+ ++kbdev->timeline.slot_atoms_submitted[js];
+
+ KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]);
+}
+
+void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js,
+ kbasep_js_atom_done_code done_code)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT) {
+ KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 0);
+ } else {
+ /* Job finished in JS_HEAD */
+ base_atom_id atom_number = kbase_jd_atom_id(kctx, katom);
+
+ KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 0);
+ KBASE_TIMELINE_JOB_STOP(kctx, js, atom_number);
+
+ /* see if we need to trace the job in JS_NEXT moving to JS_HEAD */
+ if (kbase_backend_nr_atoms_submitted(kbdev, js)) {
+ struct kbase_jd_atom *next_katom;
+ struct kbase_context *next_kctx;
+
+ /* Peek the next atom - note that the atom in JS_HEAD will already
+ * have been dequeued */
+ next_katom = kbase_backend_inspect_head(kbdev, js);
+ WARN_ON(!next_katom);
+ next_kctx = next_katom->kctx;
+ KBASE_TIMELINE_JOB_START_NEXT(next_kctx, js, 0);
+ KBASE_TIMELINE_JOB_START_HEAD(next_kctx, js, 1);
+ KBASE_TIMELINE_JOB_START(next_kctx, js, kbase_jd_atom_id(next_kctx, next_katom));
+ }
+ }
+
+ --kbdev->timeline.slot_atoms_submitted[js];
+
+ KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]);
+}
+
+void kbase_timeline_pm_send_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event_sent)
+{
+ int uid = 0;
+ int old_uid;
+
+ /* If a producer already exists for the event, try to use their UID (multiple-producers) */
+ uid = atomic_read(&kbdev->timeline.pm_event_uid[event_sent]);
+ old_uid = uid;
+
+ /* Get a new non-zero UID if we don't have one yet */
+ while (!uid)
+ uid = atomic_inc_return(&kbdev->timeline.pm_event_uid_counter);
+
+ /* Try to use this UID */
+ if (old_uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event_sent], old_uid, uid))
+ /* If it changed, raced with another producer: we've lost this UID */
+ uid = 0;
+
+ KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_sent, uid);
+}
+
+void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
+{
+ int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]);
+
+ if (uid != 0) {
+ if (uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event], uid, 0))
+ /* If it changed, raced with another consumer: we've lost this UID */
+ uid = 0;
+
+ KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid);
+ }
+}
+
+void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
+{
+ int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]);
+
+ if (uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event], uid, 0))
+ /* If it changed, raced with another consumer: we've lost this UID */
+ uid = 0;
+
+ KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid);
+}
+
+void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ /* Simply log the start of the transition */
+ kbdev->timeline.l2_transitioning = true;
+ KBASE_TIMELINE_POWERING_L2(kbdev);
+}
+
+void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ /* Simply log the end of the transition */
+ if (kbdev->timeline.l2_transitioning) {
+ kbdev->timeline.l2_transitioning = false;
+ KBASE_TIMELINE_POWERED_L2(kbdev);
+ }
+}
+
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_trace_timeline.h b/drivers/gpu/arm_gpu/mali_kbase_trace_timeline.h
new file mode 100644
index 000000000000..619072f3215c
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_trace_timeline.h
@@ -0,0 +1,363 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#if !defined(_KBASE_TRACE_TIMELINE_H)
+#define _KBASE_TRACE_TIMELINE_H
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+
+enum kbase_trace_timeline_code {
+ #define KBASE_TIMELINE_TRACE_CODE(enum_val, desc, format, format_desc) enum_val
+ #include "mali_kbase_trace_timeline_defs.h"
+ #undef KBASE_TIMELINE_TRACE_CODE
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+/** Initialize Timeline DebugFS entries */
+void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
+
+#else /* CONFIG_DEBUG_FS */
+
+#define kbasep_trace_timeline_debugfs_init CSTD_NOP
+
+#endif /* CONFIG_DEBUG_FS */
+
+/* mali_timeline.h defines kernel tracepoints used by the KBASE_TIMELINE
+ * functions.
+ * Output is timestamped by either sched_clock() (default), local_clock(), or
+ * cpu_clock(), depending on /sys/kernel/debug/tracing/trace_clock */
+#include "mali_timeline.h"
+
+/* Trace number of atoms in flight for kctx (atoms either not completed, or in
+ process of being returned to user */
+#define KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, count) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_atoms_in_flight(ts.tv_sec, ts.tv_nsec, \
+ (int)kctx->timeline.owner_tgid, \
+ count); \
+ } while (0)
+
+/* Trace atom_id being Ready to Run */
+#define KBASE_TIMELINE_ATOM_READY(kctx, atom_id) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_atom(ts.tv_sec, ts.tv_nsec, \
+ CTX_FLOW_ATOM_READY, \
+ (int)kctx->timeline.owner_tgid, \
+ atom_id); \
+ } while (0)
+
+/* Trace number of atoms submitted to job slot js
+ *
+ * NOTE: This uses a different tracepoint to the head/next/soft-stop actions,
+ * so that those actions can be filtered out separately from this
+ *
+ * This is because this is more useful, as we can use it to calculate general
+ * utilization easily and accurately */
+#define KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, count) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_slot_active(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_SLOT_ACTIVE, \
+ (int)kctx->timeline.owner_tgid, \
+ js, count); \
+ } while (0)
+
+
+/* Trace atoms present in JS_NEXT */
+#define KBASE_TIMELINE_JOB_START_NEXT(kctx, js, count) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_SLOT_NEXT, \
+ (int)kctx->timeline.owner_tgid, \
+ js, count); \
+ } while (0)
+
+/* Trace atoms present in JS_HEAD */
+#define KBASE_TIMELINE_JOB_START_HEAD(kctx, js, count) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_SLOT_HEAD, \
+ (int)kctx->timeline.owner_tgid, \
+ js, count); \
+ } while (0)
+
+/* Trace that a soft stop/evict from next is being attempted on a slot */
+#define KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, count) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_SLOT_STOPPING, \
+ (kctx) ? (int)kctx->timeline.owner_tgid : 0, \
+ js, count); \
+ } while (0)
+
+
+
+/* Trace state of overall GPU power */
+#define KBASE_TIMELINE_GPU_POWER(kbdev, active) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_POWER_ACTIVE, active); \
+ } while (0)
+
+/* Trace state of tiler power */
+#define KBASE_TIMELINE_POWER_TILER(kbdev, bitmap) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_POWER_TILER_ACTIVE, \
+ hweight64(bitmap)); \
+ } while (0)
+
+/* Trace number of shaders currently powered */
+#define KBASE_TIMELINE_POWER_SHADER(kbdev, bitmap) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_POWER_SHADER_ACTIVE, \
+ hweight64(bitmap)); \
+ } while (0)
+
+/* Trace state of L2 power */
+#define KBASE_TIMELINE_POWER_L2(kbdev, bitmap) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_POWER_L2_ACTIVE, \
+ hweight64(bitmap)); \
+ } while (0)
+
+/* Trace state of L2 cache*/
+#define KBASE_TIMELINE_POWERING_L2(kbdev) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec, \
+ SW_FLOW_GPU_POWER_L2_POWERING, \
+ 1); \
+ } while (0)
+
+#define KBASE_TIMELINE_POWERED_L2(kbdev) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec, \
+ SW_FLOW_GPU_POWER_L2_ACTIVE, \
+ 1); \
+ } while (0)
+
+/* Trace kbase_pm_send_event message send */
+#define KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_type, pm_event_id) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_pm_event(ts.tv_sec, ts.tv_nsec, \
+ SW_FLOW_PM_SEND_EVENT, \
+ event_type, pm_event_id); \
+ } while (0)
+
+/* Trace kbase_pm_worker message receive */
+#define KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event_type, pm_event_id) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_pm_event(ts.tv_sec, ts.tv_nsec, \
+ SW_FLOW_PM_HANDLE_EVENT, \
+ event_type, pm_event_id); \
+ } while (0)
+
+
+/* Trace atom_id starting in JS_HEAD */
+#define KBASE_TIMELINE_JOB_START(kctx, js, _consumerof_atom_number) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_slot_atom(ts.tv_sec, ts.tv_nsec, \
+ HW_START_GPU_JOB_CHAIN_SW_APPROX, \
+ (int)kctx->timeline.owner_tgid, \
+ js, _consumerof_atom_number); \
+ } while (0)
+
+/* Trace atom_id stopping on JS_HEAD */
+#define KBASE_TIMELINE_JOB_STOP(kctx, js, _producerof_atom_number_completed) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_slot_atom(ts.tv_sec, ts.tv_nsec, \
+ HW_STOP_GPU_JOB_CHAIN_SW_APPROX, \
+ (int)kctx->timeline.owner_tgid, \
+ js, _producerof_atom_number_completed); \
+ } while (0)
+
+/** Trace beginning/end of a call to kbase_pm_check_transitions_nolock from a
+ * certin caller */
+#define KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_pm_checktrans(ts.tv_sec, ts.tv_nsec, \
+ trace_code, 1); \
+ } while (0)
+
+/* Trace number of contexts active */
+#define KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, count) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_context_active(ts.tv_sec, ts.tv_nsec, \
+ count); \
+ } while (0)
+
+/* NOTE: kbase_timeline_pm_cores_func() is in mali_kbase_pm_policy.c */
+
+/**
+ * Trace that an atom is starting on a job slot
+ *
+ * The caller must be holding hwaccess_lock
+ */
+void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js);
+
+/**
+ * Trace that an atom has done on a job slot
+ *
+ * 'Done' in this sense can occur either because:
+ * - the atom in JS_HEAD finished
+ * - the atom in JS_NEXT was evicted
+ *
+ * Whether the atom finished or was evicted is passed in @a done_code
+ *
+ * It is assumed that the atom has already been removed from the submit slot,
+ * with either:
+ * - kbasep_jm_dequeue_submit_slot()
+ * - kbasep_jm_dequeue_tail_submit_slot()
+ *
+ * The caller must be holding hwaccess_lock
+ */
+void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js,
+ kbasep_js_atom_done_code done_code);
+
+
+/** Trace a pm event starting */
+void kbase_timeline_pm_send_event(struct kbase_device *kbdev,
+ enum kbase_timeline_pm_event event_sent);
+
+/** Trace a pm event finishing */
+void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event);
+
+/** Check whether a pm event was present, and if so trace finishing it */
+void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event);
+
+/** Trace L2 power-up start */
+void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev);
+
+/** Trace L2 power-up done */
+void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev);
+
+#else
+
+#define KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, count) CSTD_NOP()
+
+#define KBASE_TIMELINE_ATOM_READY(kctx, atom_id) CSTD_NOP()
+
+#define KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, count) CSTD_NOP()
+
+#define KBASE_TIMELINE_JOB_START_NEXT(kctx, js, count) CSTD_NOP()
+
+#define KBASE_TIMELINE_JOB_START_HEAD(kctx, js, count) CSTD_NOP()
+
+#define KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, count) CSTD_NOP()
+
+#define KBASE_TIMELINE_GPU_POWER(kbdev, active) CSTD_NOP()
+
+#define KBASE_TIMELINE_POWER_TILER(kbdev, bitmap) CSTD_NOP()
+
+#define KBASE_TIMELINE_POWER_SHADER(kbdev, bitmap) CSTD_NOP()
+
+#define KBASE_TIMELINE_POWER_L2(kbdev, active) CSTD_NOP()
+
+#define KBASE_TIMELINE_POWERING_L2(kbdev) CSTD_NOP()
+
+#define KBASE_TIMELINE_POWERED_L2(kbdev) CSTD_NOP()
+
+#define KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_type, pm_event_id) CSTD_NOP()
+
+#define KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event_type, pm_event_id) CSTD_NOP()
+
+#define KBASE_TIMELINE_JOB_START(kctx, js, _consumerof_atom_number) CSTD_NOP()
+
+#define KBASE_TIMELINE_JOB_STOP(kctx, js, _producerof_atom_number_completed) CSTD_NOP()
+
+#define KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code) CSTD_NOP()
+
+#define KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, count) CSTD_NOP()
+
+static inline void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+}
+
+static inline void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js,
+ kbasep_js_atom_done_code done_code)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+}
+
+static inline void kbase_timeline_pm_send_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event_sent)
+{
+}
+
+static inline void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
+{
+}
+
+static inline void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
+{
+}
+
+static inline void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev)
+{
+}
+
+static inline void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev)
+{
+}
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
+
+#endif /* _KBASE_TRACE_TIMELINE_H */
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_trace_timeline_defs.h b/drivers/gpu/arm_gpu/mali_kbase_trace_timeline_defs.h
new file mode 100644
index 000000000000..156a95a67f4a
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_trace_timeline_defs.h
@@ -0,0 +1,140 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/* ***** IMPORTANT: THIS IS NOT A NORMAL HEADER FILE *****
+ * ***** DO NOT INCLUDE DIRECTLY *****
+ * ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
+
+/*
+ * Conventions on Event Names:
+ *
+ * - The prefix determines something about how the timeline should be
+ * displayed, and is split up into various parts, separated by underscores:
+ * - 'SW' and 'HW' as the first part will be used to determine whether a
+ * timeline is to do with Software or Hardware - effectively, separate
+ * 'channels' for Software and Hardware
+ * - 'START', 'STOP', 'ENTER', 'LEAVE' can be used in the second part, and
+ * signify related pairs of events - these are optional.
+ * - 'FLOW' indicates a generic event, which can use dependencies
+ * - This gives events such as:
+ * - 'SW_ENTER_FOO'
+ * - 'SW_LEAVE_FOO'
+ * - 'SW_FLOW_BAR_1'
+ * - 'SW_FLOW_BAR_2'
+ * - 'HW_START_BAZ'
+ * - 'HW_STOP_BAZ'
+ * - And an unadorned HW event:
+ * - 'HW_BAZ_FROZBOZ'
+ */
+
+/*
+ * Conventions on parameter names:
+ * - anything with 'instance' in the name will have a separate timeline based
+ * on that instances.
+ * - underscored-prefixed parameters will by hidden by default on timelines
+ *
+ * Hence:
+ * - Different job slots have their own 'instance', based on the instance value
+ * - Per-context info (e.g. atoms on a context) have their own 'instance'
+ * (i.e. each context should be on a different timeline)
+ *
+ * Note that globally-shared resources can be tagged with a tgid, but we don't
+ * want an instance per context:
+ * - There's no point having separate Job Slot timelines for each context, that
+ * would be confusing - there's only really 3 job slots!
+ * - There's no point having separate Shader-powered timelines for each
+ * context, that would be confusing - all shader cores (whether it be 4, 8,
+ * etc) are shared in the system.
+ */
+
+ /*
+ * CTX events
+ */
+ /* Separate timelines for each context 'instance'*/
+ KBASE_TIMELINE_TRACE_CODE(CTX_SET_NR_ATOMS_IN_FLIGHT, "CTX: Atoms in flight", "%d,%d", "_instance_tgid,_value_number_of_atoms"),
+ KBASE_TIMELINE_TRACE_CODE(CTX_FLOW_ATOM_READY, "CTX: Atoms Ready to Run", "%d,%d,%d", "_instance_tgid,_consumerof_atom_number,_producerof_atom_number_ready"),
+
+ /*
+ * SW Events
+ */
+ /* Separate timelines for each slot 'instance' */
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_ACTIVE, "SW: GPU slot active", "%d,%d,%d", "_tgid,_instance_slot,_value_number_of_atoms"),
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_NEXT, "SW: GPU atom in NEXT", "%d,%d,%d", "_tgid,_instance_slot,_value_is_an_atom_in_next"),
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_HEAD, "SW: GPU atom in HEAD", "%d,%d,%d", "_tgid,_instance_slot,_value_is_an_atom_in_head"),
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_STOPPING, "SW: Try Soft-Stop on GPU slot", "%d,%d,%d", "_tgid,_instance_slot,_value_is_slot_stopping"),
+ /* Shader and overall power is shared - can't have separate instances of
+ * it, just tagging with the context */
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_ACTIVE, "SW: GPU power active", "%d,%d", "_tgid,_value_is_power_active"),
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_TILER_ACTIVE, "SW: GPU tiler powered", "%d,%d", "_tgid,_value_number_of_tilers"),
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_SHADER_ACTIVE, "SW: GPU shaders powered", "%d,%d", "_tgid,_value_number_of_shaders"),
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_L2_ACTIVE, "SW: GPU L2 powered", "%d,%d", "_tgid,_value_number_of_l2"),
+
+ /* SW Power event messaging. _event_type is one from the kbase_pm_event enum */
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_SEND_EVENT, "SW: PM Send Event", "%d,%d,%d", "_tgid,_event_type,_writerof_pm_event_id"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_HANDLE_EVENT, "SW: PM Handle Event", "%d,%d,%d", "_tgid,_event_type,_finalconsumerof_pm_event_id"),
+ /* SW L2 power events */
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_GPU_POWER_L2_POWERING, "SW: GPU L2 powering", "%d,%d", "_tgid,_writerof_l2_transitioning"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_GPU_POWER_L2_ACTIVE, "SW: GPU L2 powering done", "%d,%d", "_tgid,_finalconsumerof_l2_transitioning"),
+
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_CONTEXT_ACTIVE, "SW: Context Active", "%d,%d", "_tgid,_value_active"),
+
+ /*
+ * BEGIN: Significant SW Functions that call kbase_pm_check_transitions_nolock()
+ */
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START, "SW: PM CheckTrans from kbase_pm_do_poweroff", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_poweroff"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END, "SW: PM CheckTrans from kbase_pm_do_poweroff", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_poweroff"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START, "SW: PM CheckTrans from kbase_pm_do_poweron", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_poweron"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END, "SW: PM CheckTrans from kbase_pm_do_poweron", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_poweron"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START, "SW: PM CheckTrans from kbase_gpu_interrupt", "%d,%d", "_tgid,_writerof_pm_checktrans_gpu_interrupt"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END, "SW: PM CheckTrans from kbase_gpu_interrupt", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_gpu_interrupt"),
+
+ /*
+ * Significant Indirect callers of kbase_pm_check_transitions_nolock()
+ */
+ /* kbase_pm_request_cores */
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_START, "SW: PM CheckTrans from kbase_pm_request_cores(shader)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_shader"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_END, "SW: PM CheckTrans from kbase_pm_request_cores(shader)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_shader"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_START, "SW: PM CheckTrans from kbase_pm_request_cores(tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_END, "SW: PM CheckTrans from kbase_pm_request_cores(tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_START, "SW: PM CheckTrans from kbase_pm_request_cores(shader+tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_shader_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_END, "SW: PM CheckTrans from kbase_pm_request_cores(shader+tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_shader_tiler"),
+ /* kbase_pm_release_cores */
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_START, "SW: PM CheckTrans from kbase_pm_release_cores(shader)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_shader"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_END, "SW: PM CheckTrans from kbase_pm_release_cores(shader)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_shader"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_START, "SW: PM CheckTrans from kbase_pm_release_cores(tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_END, "SW: PM CheckTrans from kbase_pm_release_cores(tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_START, "SW: PM CheckTrans from kbase_pm_release_cores(shader+tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_shader_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_END, "SW: PM CheckTrans from kbase_pm_release_cores(shader+tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_shader_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START, "SW: PM CheckTrans from kbasep_pm_do_shader_poweroff_callback", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_shader_poweroff_callback"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END, "SW: PM CheckTrans from kbasep_pm_do_shader_poweroff_callback", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_shader_poweroff_callback"),
+ /*
+ * END: SW Functions that call kbase_pm_check_transitions_nolock()
+ */
+
+ /*
+ * HW Events
+ */
+ KBASE_TIMELINE_TRACE_CODE(HW_MMU_FAULT,
+"HW: MMU Fault", "%d,%d,%d", "_tgid,fault_type,fault_stage,asid"),
+ KBASE_TIMELINE_TRACE_CODE(HW_START_GPU_JOB_CHAIN_SW_APPROX,
+"HW: Job Chain start (SW approximated)", "%d,%d,%d",
+"_tgid,job_slot,_consumerof_atom_number_ready"),
+ KBASE_TIMELINE_TRACE_CODE(HW_STOP_GPU_JOB_CHAIN_SW_APPROX,
+"HW: Job Chain stop (SW approximated)", "%d,%d,%d",
+"_tgid,job_slot,_producerof_atom_number_completed")
diff --git a/drivers/gpu/arm_gpu/mali_kbase_uku.h b/drivers/gpu/arm_gpu/mali_kbase_uku.h
new file mode 100644
index 000000000000..2a69da7394ba
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_uku.h
@@ -0,0 +1,532 @@
+/*
+ *
+ * (C) COPYRIGHT 2008-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_UKU_H_
+#define _KBASE_UKU_H_
+
+#include "mali_uk.h"
+#include "mali_base_kernel.h"
+
+/* This file needs to support being included from kernel and userside (which use different defines) */
+#if defined(CONFIG_MALI_ERROR_INJECT) || MALI_ERROR_INJECT_ON
+#define SUPPORT_MALI_ERROR_INJECT
+#endif /* defined(CONFIG_MALI_ERROR_INJECT) || MALI_ERROR_INJECT_ON */
+#if defined(CONFIG_MALI_NO_MALI)
+#define SUPPORT_MALI_NO_MALI
+#elif defined(MALI_NO_MALI)
+#if MALI_NO_MALI
+#define SUPPORT_MALI_NO_MALI
+#endif
+#endif
+
+#if defined(SUPPORT_MALI_NO_MALI) || defined(SUPPORT_MALI_ERROR_INJECT)
+#include "backend/gpu/mali_kbase_model_dummy.h"
+#endif
+
+#include "mali_kbase_gpuprops_types.h"
+
+/*
+ * 10.1:
+ * - Do mmap in kernel for SAME_VA memory allocations rather then
+ * calling back into the kernel as a 2nd stage of the allocation request.
+ *
+ * 10.2:
+ * - Add KBASE_FUNC_MEM_JIT_INIT which allows clients to request a custom VA
+ * region for use with JIT (ignored on 32-bit platforms)
+ *
+ * 10.3:
+ * - base_jd_core_req typedef-ed to u32 (instead of to u16)
+ * - two flags added: BASE_JD_REQ_SKIP_CACHE_STAT / _END
+ *
+ * 10.4:
+ * - Removed KBASE_FUNC_EXT_BUFFER_LOCK used only in internal tests
+ *
+ * 10.5:
+ * - Reverted to performing mmap in user space so that tools like valgrind work.
+ *
+ * 10.6:
+ * - Add flags input variable to KBASE_FUNC_TLSTREAM_ACQUIRE
+ */
+#define BASE_UK_VERSION_MAJOR 10
+#define BASE_UK_VERSION_MINOR 6
+
+#define LINUX_UK_BASE_MAGIC 0x80
+
+struct kbase_uk_mem_alloc {
+ union uk_header header;
+ /* IN */
+ u64 va_pages;
+ u64 commit_pages;
+ u64 extent;
+ /* IN/OUT */
+ u64 flags;
+ /* OUT */
+ u64 gpu_va;
+ u16 va_alignment;
+ u8 padding[6];
+};
+
+struct kbase_uk_mem_free {
+ union uk_header header;
+ /* IN */
+ u64 gpu_addr;
+ /* OUT */
+};
+
+struct kbase_uk_mem_alias {
+ union uk_header header;
+ /* IN/OUT */
+ u64 flags;
+ /* IN */
+ u64 stride;
+ u64 nents;
+ u64 ai;
+ /* OUT */
+ u64 gpu_va;
+ u64 va_pages;
+};
+
+struct kbase_uk_mem_import {
+ union uk_header header;
+ /* IN */
+ u64 phandle;
+ u32 type;
+ u32 padding;
+ /* IN/OUT */
+ u64 flags;
+ /* OUT */
+ u64 gpu_va;
+ u64 va_pages;
+};
+
+struct kbase_uk_mem_flags_change {
+ union uk_header header;
+ /* IN */
+ u64 gpu_va;
+ u64 flags;
+ u64 mask;
+};
+
+struct kbase_uk_job_submit {
+ union uk_header header;
+ /* IN */
+ u64 addr;
+ u32 nr_atoms;
+ u32 stride; /* bytes between atoms, i.e. sizeof(base_jd_atom_v2) */
+ /* OUT */
+};
+
+struct kbase_uk_post_term {
+ union uk_header header;
+};
+
+struct kbase_uk_sync_now {
+ union uk_header header;
+
+ /* IN */
+ struct base_syncset sset;
+
+ /* OUT */
+};
+
+struct kbase_uk_hwcnt_setup {
+ union uk_header header;
+
+ /* IN */
+ u64 dump_buffer;
+ u32 jm_bm;
+ u32 shader_bm;
+ u32 tiler_bm;
+ u32 unused_1; /* keep for backwards compatibility */
+ u32 mmu_l2_bm;
+ u32 padding;
+ /* OUT */
+};
+
+/**
+ * struct kbase_uk_hwcnt_reader_setup - User/Kernel space data exchange structure
+ * @header: UK structure header
+ * @buffer_count: requested number of dumping buffers
+ * @jm_bm: counters selection bitmask (JM)
+ * @shader_bm: counters selection bitmask (Shader)
+ * @tiler_bm: counters selection bitmask (Tiler)
+ * @mmu_l2_bm: counters selection bitmask (MMU_L2)
+ * @fd: dumping notification file descriptor
+ *
+ * This structure sets up HWC dumper/reader for this context.
+ * Multiple instances can be created for single context.
+ */
+struct kbase_uk_hwcnt_reader_setup {
+ union uk_header header;
+
+ /* IN */
+ u32 buffer_count;
+ u32 jm_bm;
+ u32 shader_bm;
+ u32 tiler_bm;
+ u32 mmu_l2_bm;
+
+ /* OUT */
+ s32 fd;
+};
+
+struct kbase_uk_hwcnt_dump {
+ union uk_header header;
+};
+
+struct kbase_uk_hwcnt_clear {
+ union uk_header header;
+};
+
+struct kbase_uk_fence_validate {
+ union uk_header header;
+ /* IN */
+ s32 fd;
+ u32 padding;
+ /* OUT */
+};
+
+struct kbase_uk_stream_create {
+ union uk_header header;
+ /* IN */
+ char name[32];
+ /* OUT */
+ s32 fd;
+ u32 padding;
+};
+
+struct kbase_uk_gpuprops {
+ union uk_header header;
+
+ /* IN */
+ struct mali_base_gpu_props props;
+ /* OUT */
+};
+
+struct kbase_uk_mem_query {
+ union uk_header header;
+ /* IN */
+ u64 gpu_addr;
+#define KBASE_MEM_QUERY_COMMIT_SIZE 1
+#define KBASE_MEM_QUERY_VA_SIZE 2
+#define KBASE_MEM_QUERY_FLAGS 3
+ u64 query;
+ /* OUT */
+ u64 value;
+};
+
+struct kbase_uk_mem_commit {
+ union uk_header header;
+ /* IN */
+ u64 gpu_addr;
+ u64 pages;
+ /* OUT */
+ u32 result_subcode;
+ u32 padding;
+};
+
+struct kbase_uk_find_cpu_offset {
+ union uk_header header;
+ /* IN */
+ u64 gpu_addr;
+ u64 cpu_addr;
+ u64 size;
+ /* OUT */
+ u64 offset;
+};
+
+#define KBASE_GET_VERSION_BUFFER_SIZE 64
+struct kbase_uk_get_ddk_version {
+ union uk_header header;
+ /* OUT */
+ char version_buffer[KBASE_GET_VERSION_BUFFER_SIZE];
+ u32 version_string_size;
+ u32 padding;
+};
+
+struct kbase_uk_disjoint_query {
+ union uk_header header;
+ /* OUT */
+ u32 counter;
+ u32 padding;
+};
+
+struct kbase_uk_set_flags {
+ union uk_header header;
+ /* IN */
+ u32 create_flags;
+ u32 padding;
+};
+
+#if MALI_UNIT_TEST
+#define TEST_ADDR_COUNT 4
+#define KBASE_TEST_BUFFER_SIZE 128
+struct kbase_exported_test_data {
+ u64 test_addr[TEST_ADDR_COUNT]; /**< memory address */
+ u32 test_addr_pages[TEST_ADDR_COUNT]; /**< memory size in pages */
+ u64 kctx; /**< base context created by process */
+ u64 mm; /**< pointer to process address space */
+ u8 buffer1[KBASE_TEST_BUFFER_SIZE]; /**< unit test defined parameter */
+ u8 buffer2[KBASE_TEST_BUFFER_SIZE]; /**< unit test defined parameter */
+};
+
+struct kbase_uk_set_test_data {
+ union uk_header header;
+ /* IN */
+ struct kbase_exported_test_data test_data;
+};
+
+#endif /* MALI_UNIT_TEST */
+
+#ifdef SUPPORT_MALI_ERROR_INJECT
+struct kbase_uk_error_params {
+ union uk_header header;
+ /* IN */
+ struct kbase_error_params params;
+};
+#endif /* SUPPORT_MALI_ERROR_INJECT */
+
+#ifdef SUPPORT_MALI_NO_MALI
+struct kbase_uk_model_control_params {
+ union uk_header header;
+ /* IN */
+ struct kbase_model_control_params params;
+};
+#endif /* SUPPORT_MALI_NO_MALI */
+
+struct kbase_uk_profiling_controls {
+ union uk_header header;
+ u32 profiling_controls[FBDUMP_CONTROL_MAX];
+};
+
+struct kbase_uk_debugfs_mem_profile_add {
+ union uk_header header;
+ u32 len;
+ u32 padding;
+ u64 buf;
+};
+
+struct kbase_uk_context_id {
+ union uk_header header;
+ /* OUT */
+ int id;
+};
+
+/**
+ * struct kbase_uk_tlstream_acquire - User/Kernel space data exchange structure
+ * @header: UK structure header
+ * @flags: timeline stream flags
+ * @fd: timeline stream file descriptor
+ *
+ * This structure is used when performing a call to acquire kernel side timeline
+ * stream file descriptor.
+ */
+struct kbase_uk_tlstream_acquire {
+ union uk_header header;
+ /* IN */
+ u32 flags;
+ /* OUT */
+ s32 fd;
+};
+
+/**
+ * struct kbase_uk_tlstream_acquire_v10_4 - User/Kernel space data exchange
+ * structure
+ * @header: UK structure header
+ * @fd: timeline stream file descriptor
+ *
+ * This structure is used when performing a call to acquire kernel side timeline
+ * stream file descriptor.
+ */
+struct kbase_uk_tlstream_acquire_v10_4 {
+ union uk_header header;
+ /* IN */
+ /* OUT */
+ s32 fd;
+};
+
+/**
+ * struct kbase_uk_tlstream_flush - User/Kernel space data exchange structure
+ * @header: UK structure header
+ *
+ * This structure is used when performing a call to flush kernel side
+ * timeline streams.
+ */
+struct kbase_uk_tlstream_flush {
+ union uk_header header;
+ /* IN */
+ /* OUT */
+};
+
+#if MALI_UNIT_TEST
+/**
+ * struct kbase_uk_tlstream_test - User/Kernel space data exchange structure
+ * @header: UK structure header
+ * @tpw_count: number of trace point writers in each context
+ * @msg_delay: time delay between tracepoints from one writer in milliseconds
+ * @msg_count: number of trace points written by one writer
+ * @aux_msg: if non-zero aux messages will be included
+ *
+ * This structure is used when performing a call to start timeline stream test
+ * embedded in kernel.
+ */
+struct kbase_uk_tlstream_test {
+ union uk_header header;
+ /* IN */
+ u32 tpw_count;
+ u32 msg_delay;
+ u32 msg_count;
+ u32 aux_msg;
+ /* OUT */
+};
+
+/**
+ * struct kbase_uk_tlstream_stats - User/Kernel space data exchange structure
+ * @header: UK structure header
+ * @bytes_collected: number of bytes read by user
+ * @bytes_generated: number of bytes generated by tracepoints
+ *
+ * This structure is used when performing a call to obtain timeline stream
+ * statistics.
+ */
+struct kbase_uk_tlstream_stats {
+ union uk_header header; /**< UK structure header. */
+ /* IN */
+ /* OUT */
+ u32 bytes_collected;
+ u32 bytes_generated;
+};
+#endif /* MALI_UNIT_TEST */
+
+/**
+ * struct struct kbase_uk_prfcnt_value for the KBASE_FUNC_SET_PRFCNT_VALUES ioctl
+ * @header: UK structure header
+ * @data: Counter samples for the dummy model
+ * @size:............Size of the counter sample data
+ */
+struct kbase_uk_prfcnt_values {
+ union uk_header header;
+ /* IN */
+ u32 *data;
+ u32 size;
+};
+
+/**
+ * struct kbase_uk_soft_event_update - User/Kernel space data exchange structure
+ * @header: UK structure header
+ * @evt: the GPU address containing the event
+ * @new_status: the new event status, must be either BASE_JD_SOFT_EVENT_SET or
+ * BASE_JD_SOFT_EVENT_RESET
+ * @flags: reserved for future uses, must be set to 0
+ *
+ * This structure is used to update the status of a software event. If the
+ * event's status is set to BASE_JD_SOFT_EVENT_SET, any job currently waiting
+ * on this event will complete.
+ */
+struct kbase_uk_soft_event_update {
+ union uk_header header;
+ /* IN */
+ u64 evt;
+ u32 new_status;
+ u32 flags;
+};
+
+/**
+ * struct kbase_uk_mem_jit_init - User/Kernel space data exchange structure
+ * @header: UK structure header
+ * @va_pages: Number of virtual pages required for JIT
+ *
+ * This structure is used when requesting initialization of JIT.
+ */
+struct kbase_uk_mem_jit_init {
+ union uk_header header;
+ /* IN */
+ u64 va_pages;
+};
+
+enum kbase_uk_function_id {
+ KBASE_FUNC_MEM_ALLOC = (UK_FUNC_ID + 0),
+ KBASE_FUNC_MEM_IMPORT = (UK_FUNC_ID + 1),
+ KBASE_FUNC_MEM_COMMIT = (UK_FUNC_ID + 2),
+ KBASE_FUNC_MEM_QUERY = (UK_FUNC_ID + 3),
+ KBASE_FUNC_MEM_FREE = (UK_FUNC_ID + 4),
+ KBASE_FUNC_MEM_FLAGS_CHANGE = (UK_FUNC_ID + 5),
+ KBASE_FUNC_MEM_ALIAS = (UK_FUNC_ID + 6),
+
+ /* UK_FUNC_ID + 7 not in use since BASE_LEGACY_UK6_SUPPORT dropped */
+
+ KBASE_FUNC_SYNC = (UK_FUNC_ID + 8),
+
+ KBASE_FUNC_POST_TERM = (UK_FUNC_ID + 9),
+
+ KBASE_FUNC_HWCNT_SETUP = (UK_FUNC_ID + 10),
+ KBASE_FUNC_HWCNT_DUMP = (UK_FUNC_ID + 11),
+ KBASE_FUNC_HWCNT_CLEAR = (UK_FUNC_ID + 12),
+
+ KBASE_FUNC_GPU_PROPS_REG_DUMP = (UK_FUNC_ID + 14),
+
+ KBASE_FUNC_FIND_CPU_OFFSET = (UK_FUNC_ID + 15),
+
+ KBASE_FUNC_GET_VERSION = (UK_FUNC_ID + 16),
+ KBASE_FUNC_SET_FLAGS = (UK_FUNC_ID + 18),
+
+ KBASE_FUNC_SET_TEST_DATA = (UK_FUNC_ID + 19),
+ KBASE_FUNC_INJECT_ERROR = (UK_FUNC_ID + 20),
+ KBASE_FUNC_MODEL_CONTROL = (UK_FUNC_ID + 21),
+
+ /* UK_FUNC_ID + 22 not in use since BASE_LEGACY_UK8_SUPPORT dropped */
+
+ KBASE_FUNC_FENCE_VALIDATE = (UK_FUNC_ID + 23),
+ KBASE_FUNC_STREAM_CREATE = (UK_FUNC_ID + 24),
+ KBASE_FUNC_GET_PROFILING_CONTROLS = (UK_FUNC_ID + 25),
+ KBASE_FUNC_SET_PROFILING_CONTROLS = (UK_FUNC_ID + 26),
+ /* to be used only for testing
+ * purposes, otherwise these controls
+ * are set through gator API */
+
+ KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD = (UK_FUNC_ID + 27),
+ KBASE_FUNC_JOB_SUBMIT = (UK_FUNC_ID + 28),
+ KBASE_FUNC_DISJOINT_QUERY = (UK_FUNC_ID + 29),
+
+ KBASE_FUNC_GET_CONTEXT_ID = (UK_FUNC_ID + 31),
+
+ KBASE_FUNC_TLSTREAM_ACQUIRE_V10_4 = (UK_FUNC_ID + 32),
+#if MALI_UNIT_TEST
+ KBASE_FUNC_TLSTREAM_TEST = (UK_FUNC_ID + 33),
+ KBASE_FUNC_TLSTREAM_STATS = (UK_FUNC_ID + 34),
+#endif /* MALI_UNIT_TEST */
+ KBASE_FUNC_TLSTREAM_FLUSH = (UK_FUNC_ID + 35),
+
+ KBASE_FUNC_HWCNT_READER_SETUP = (UK_FUNC_ID + 36),
+
+#ifdef SUPPORT_MALI_NO_MALI
+ KBASE_FUNC_SET_PRFCNT_VALUES = (UK_FUNC_ID + 37),
+#endif
+
+ KBASE_FUNC_SOFT_EVENT_UPDATE = (UK_FUNC_ID + 38),
+
+ KBASE_FUNC_MEM_JIT_INIT = (UK_FUNC_ID + 39),
+
+ KBASE_FUNC_TLSTREAM_ACQUIRE = (UK_FUNC_ID + 40),
+
+ KBASE_FUNC_MAX
+};
+
+#endif /* _KBASE_UKU_H_ */
+
diff --git a/drivers/gpu/arm_gpu/mali_kbase_utility.c b/drivers/gpu/arm_gpu/mali_kbase_utility.c
new file mode 100644
index 000000000000..be474ff87401
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_utility.c
@@ -0,0 +1,33 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <mali_kbase.h>
+
+bool kbasep_list_member_of(const struct list_head *base, struct list_head *entry)
+{
+ struct list_head *pos = base->next;
+
+ while (pos != base) {
+ if (pos == entry)
+ return true;
+
+ pos = pos->next;
+ }
+ return false;
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_utility.h b/drivers/gpu/arm_gpu/mali_kbase_utility.h
new file mode 100644
index 000000000000..fd7252dab0de
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_utility.h
@@ -0,0 +1,37 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_UTILITY_H
+#define _KBASE_UTILITY_H
+
+#ifndef _KBASE_H_
+#error "Don't include this file directly, use mali_kbase.h instead"
+#endif
+
+/** Test whether the given list entry is a member of the given list.
+ *
+ * @param base The head of the list to be tested
+ * @param entry The list entry to be tested
+ *
+ * @return true if entry is a member of base
+ * false otherwise
+ */
+bool kbasep_list_member_of(const struct list_head *base, struct list_head *entry);
+
+#endif /* _KBASE_UTILITY_H */
diff --git a/drivers/gpu/arm_gpu/mali_kbase_vinstr.c b/drivers/gpu/arm_gpu/mali_kbase_vinstr.c
new file mode 100644
index 000000000000..165841dd1459
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_vinstr.c
@@ -0,0 +1,2076 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/anon_inodes.h>
+#include <linux/atomic.h>
+#include <linux/hrtimer.h>
+#include <linux/jiffies.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/preempt.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_instr.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_hwcnt_reader.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_tlstream.h>
+
+/*****************************************************************************/
+
+/* Hwcnt reader API version */
+#define HWCNT_READER_API 1
+
+/* The number of nanoseconds in a second. */
+#define NSECS_IN_SEC 1000000000ull /* ns */
+
+/* The time resolution of dumping service. */
+#define DUMPING_RESOLUTION 500000ull /* ns */
+
+/* The maximal supported number of dumping buffers. */
+#define MAX_BUFFER_COUNT 32
+
+/* Size and number of hw counters blocks. */
+#define NR_CNT_BLOCKS_PER_GROUP 8
+#define NR_CNT_PER_BLOCK 64
+#define NR_BYTES_PER_CNT 4
+#define NR_BYTES_PER_HDR 16
+#define PRFCNT_EN_MASK_OFFSET 0x8
+
+/*****************************************************************************/
+
+enum {
+ SHADER_HWCNT_BM,
+ TILER_HWCNT_BM,
+ MMU_L2_HWCNT_BM,
+ JM_HWCNT_BM
+};
+
+enum vinstr_state {
+ VINSTR_IDLE,
+ VINSTR_DUMPING,
+ VINSTR_SUSPENDING,
+ VINSTR_SUSPENDED,
+ VINSTR_RESUMING
+};
+
+/**
+ * struct kbase_vinstr_context - vinstr context per device
+ * @lock: protects the entire vinstr context
+ * @kbdev: pointer to kbase device
+ * @kctx: pointer to kbase context
+ * @vmap: vinstr vmap for mapping hwcnt dump buffer
+ * @gpu_va: GPU hwcnt dump buffer address
+ * @cpu_va: the CPU side mapping of the hwcnt dump buffer
+ * @dump_size: size of the dump buffer in bytes
+ * @bitmap: current set of counters monitored, not always in sync
+ * with hardware
+ * @reprogram: when true, reprogram hwcnt block with the new set of
+ * counters
+ * @state: vinstr state
+ * @state_lock: protects information about vinstr state
+ * @suspend_waitq: notification queue to trigger state re-validation
+ * @suspend_cnt: reference counter of vinstr's suspend state
+ * @suspend_work: worker to execute on entering suspended state
+ * @resume_work: worker to execute on leaving suspended state
+ * @nclients: number of attached clients, pending or otherwise
+ * @waiting_clients: head of list of clients being periodically sampled
+ * @idle_clients: head of list of clients being idle
+ * @suspended_clients: head of list of clients being suspended
+ * @thread: periodic sampling thread
+ * @waitq: notification queue of sampling thread
+ * @request_pending: request for action for sampling thread
+ * @clients_present: when true, we have at least one client
+ * Note: this variable is in sync. with nclients and is
+ * present to preserve simplicity. Protected by state_lock.
+ */
+struct kbase_vinstr_context {
+ struct mutex lock;
+ struct kbase_device *kbdev;
+ struct kbase_context *kctx;
+
+ struct kbase_vmap_struct vmap;
+ u64 gpu_va;
+ void *cpu_va;
+ size_t dump_size;
+ u32 bitmap[4];
+ bool reprogram;
+
+ enum vinstr_state state;
+ struct spinlock state_lock;
+ wait_queue_head_t suspend_waitq;
+ unsigned int suspend_cnt;
+ struct work_struct suspend_work;
+ struct work_struct resume_work;
+
+ u32 nclients;
+ struct list_head waiting_clients;
+ struct list_head idle_clients;
+ struct list_head suspended_clients;
+
+ struct task_struct *thread;
+ wait_queue_head_t waitq;
+ atomic_t request_pending;
+
+ bool clients_present;
+};
+
+/**
+ * struct kbase_vinstr_client - a vinstr client attached to a vinstr context
+ * @vinstr_ctx: vinstr context client is attached to
+ * @list: node used to attach this client to list in vinstr context
+ * @buffer_count: number of buffers this client is using
+ * @event_mask: events this client reacts to
+ * @dump_size: size of one dump buffer in bytes
+ * @bitmap: bitmap request for JM, TILER, SHADER and MMU counters
+ * @legacy_buffer: userspace hwcnt dump buffer (legacy interface)
+ * @kernel_buffer: kernel hwcnt dump buffer (kernel client interface)
+ * @accum_buffer: temporary accumulation buffer for preserving counters
+ * @dump_time: next time this clients shall request hwcnt dump
+ * @dump_interval: interval between periodic hwcnt dumps
+ * @dump_buffers: kernel hwcnt dump buffers allocated by this client
+ * @dump_buffers_meta: metadata of dump buffers
+ * @meta_idx: index of metadata being accessed by userspace
+ * @read_idx: index of buffer read by userspace
+ * @write_idx: index of buffer being written by dumping service
+ * @waitq: client's notification queue
+ * @pending: when true, client has attached but hwcnt not yet updated
+ */
+struct kbase_vinstr_client {
+ struct kbase_vinstr_context *vinstr_ctx;
+ struct list_head list;
+ unsigned int buffer_count;
+ u32 event_mask;
+ size_t dump_size;
+ u32 bitmap[4];
+ void __user *legacy_buffer;
+ void *kernel_buffer;
+ void *accum_buffer;
+ u64 dump_time;
+ u32 dump_interval;
+ char *dump_buffers;
+ struct kbase_hwcnt_reader_metadata *dump_buffers_meta;
+ atomic_t meta_idx;
+ atomic_t read_idx;
+ atomic_t write_idx;
+ wait_queue_head_t waitq;
+ bool pending;
+};
+
+/**
+ * struct kbasep_vinstr_wake_up_timer - vinstr service thread wake up timer
+ * @hrtimer: high resolution timer
+ * @vinstr_ctx: vinstr context
+ */
+struct kbasep_vinstr_wake_up_timer {
+ struct hrtimer hrtimer;
+ struct kbase_vinstr_context *vinstr_ctx;
+};
+
+/*****************************************************************************/
+
+static int kbasep_vinstr_service_task(void *data);
+
+static unsigned int kbasep_vinstr_hwcnt_reader_poll(
+ struct file *filp,
+ poll_table *wait);
+static long kbasep_vinstr_hwcnt_reader_ioctl(
+ struct file *filp,
+ unsigned int cmd,
+ unsigned long arg);
+static int kbasep_vinstr_hwcnt_reader_mmap(
+ struct file *filp,
+ struct vm_area_struct *vma);
+static int kbasep_vinstr_hwcnt_reader_release(
+ struct inode *inode,
+ struct file *filp);
+
+/* The timeline stream file operations structure. */
+static const struct file_operations vinstr_client_fops = {
+ .poll = kbasep_vinstr_hwcnt_reader_poll,
+ .unlocked_ioctl = kbasep_vinstr_hwcnt_reader_ioctl,
+ .compat_ioctl = kbasep_vinstr_hwcnt_reader_ioctl,
+ .mmap = kbasep_vinstr_hwcnt_reader_mmap,
+ .release = kbasep_vinstr_hwcnt_reader_release,
+};
+
+/*****************************************************************************/
+
+static int enable_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_context *kctx = vinstr_ctx->kctx;
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct kbase_uk_hwcnt_setup setup;
+ int err;
+
+ setup.dump_buffer = vinstr_ctx->gpu_va;
+ setup.jm_bm = vinstr_ctx->bitmap[JM_HWCNT_BM];
+ setup.tiler_bm = vinstr_ctx->bitmap[TILER_HWCNT_BM];
+ setup.shader_bm = vinstr_ctx->bitmap[SHADER_HWCNT_BM];
+ setup.mmu_l2_bm = vinstr_ctx->bitmap[MMU_L2_HWCNT_BM];
+
+ /* Mark the context as active so the GPU is kept turned on */
+ /* A suspend won't happen here, because we're in a syscall from a
+ * userspace thread. */
+ kbase_pm_context_active(kbdev);
+
+ /* Schedule the context in */
+ kbasep_js_schedule_privileged_ctx(kbdev, kctx);
+ err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, &setup);
+ if (err) {
+ /* Release the context. This had its own Power Manager Active
+ * reference */
+ kbasep_js_release_privileged_ctx(kbdev, kctx);
+
+ /* Also release our Power Manager Active reference */
+ kbase_pm_context_idle(kbdev);
+ }
+
+ return err;
+}
+
+static void disable_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_context *kctx = vinstr_ctx->kctx;
+ struct kbase_device *kbdev = kctx->kbdev;
+ int err;
+
+ err = kbase_instr_hwcnt_disable_internal(kctx);
+ if (err) {
+ dev_warn(kbdev->dev, "Failed to disable HW counters (ctx:%p)",
+ kctx);
+ return;
+ }
+
+ /* Release the context. This had its own Power Manager Active reference. */
+ kbasep_js_release_privileged_ctx(kbdev, kctx);
+
+ /* Also release our Power Manager Active reference. */
+ kbase_pm_context_idle(kbdev);
+
+ dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p", kctx);
+}
+
+static int reprogram_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
+{
+ disable_hwcnt(vinstr_ctx);
+ return enable_hwcnt(vinstr_ctx);
+}
+
+static void hwcnt_bitmap_set(u32 dst[4], u32 src[4])
+{
+ dst[JM_HWCNT_BM] = src[JM_HWCNT_BM];
+ dst[TILER_HWCNT_BM] = src[TILER_HWCNT_BM];
+ dst[SHADER_HWCNT_BM] = src[SHADER_HWCNT_BM];
+ dst[MMU_L2_HWCNT_BM] = src[MMU_L2_HWCNT_BM];
+}
+
+static void hwcnt_bitmap_union(u32 dst[4], u32 src[4])
+{
+ dst[JM_HWCNT_BM] |= src[JM_HWCNT_BM];
+ dst[TILER_HWCNT_BM] |= src[TILER_HWCNT_BM];
+ dst[SHADER_HWCNT_BM] |= src[SHADER_HWCNT_BM];
+ dst[MMU_L2_HWCNT_BM] |= src[MMU_L2_HWCNT_BM];
+}
+
+size_t kbase_vinstr_dump_size(struct kbase_device *kbdev)
+{
+ size_t dump_size;
+
+#ifndef CONFIG_MALI_NO_MALI
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_V4)) {
+ u32 nr_cg;
+
+ nr_cg = kbdev->gpu_props.num_core_groups;
+ dump_size = nr_cg * NR_CNT_BLOCKS_PER_GROUP *
+ NR_CNT_PER_BLOCK *
+ NR_BYTES_PER_CNT;
+ } else
+#endif /* CONFIG_MALI_NO_MALI */
+ {
+ /* assume v5 for now */
+ base_gpu_props *props = &kbdev->gpu_props.props;
+ u32 nr_l2 = props->l2_props.num_l2_slices;
+ u64 core_mask = props->coherency_info.group[0].core_mask;
+ u32 nr_blocks = fls64(core_mask);
+
+ /* JM and tiler counter blocks are always present */
+ dump_size = (2 + nr_l2 + nr_blocks) *
+ NR_CNT_PER_BLOCK *
+ NR_BYTES_PER_CNT;
+ }
+ return dump_size;
+}
+KBASE_EXPORT_TEST_API(kbase_vinstr_dump_size);
+
+static size_t kbasep_vinstr_dump_size_ctx(
+ struct kbase_vinstr_context *vinstr_ctx)
+{
+ return kbase_vinstr_dump_size(vinstr_ctx->kctx->kbdev);
+}
+
+static int kbasep_vinstr_map_kernel_dump_buffer(
+ struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_va_region *reg;
+ struct kbase_context *kctx = vinstr_ctx->kctx;
+ u64 flags, nr_pages;
+
+ flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_WR;
+ vinstr_ctx->dump_size = kbasep_vinstr_dump_size_ctx(vinstr_ctx);
+ nr_pages = PFN_UP(vinstr_ctx->dump_size);
+
+ reg = kbase_mem_alloc(kctx, nr_pages, nr_pages, 0, &flags,
+ &vinstr_ctx->gpu_va);
+ if (!reg)
+ return -ENOMEM;
+
+ vinstr_ctx->cpu_va = kbase_vmap(
+ kctx,
+ vinstr_ctx->gpu_va,
+ vinstr_ctx->dump_size,
+ &vinstr_ctx->vmap);
+ if (!vinstr_ctx->cpu_va) {
+ kbase_mem_free(kctx, vinstr_ctx->gpu_va);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void kbasep_vinstr_unmap_kernel_dump_buffer(
+ struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_context *kctx = vinstr_ctx->kctx;
+
+ kbase_vunmap(kctx, &vinstr_ctx->vmap);
+ kbase_mem_free(kctx, vinstr_ctx->gpu_va);
+}
+
+/**
+ * kbasep_vinstr_create_kctx - create kernel context for vinstr
+ * @vinstr_ctx: vinstr context
+ * Return: zero on success
+ */
+static int kbasep_vinstr_create_kctx(struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_device *kbdev = vinstr_ctx->kbdev;
+ struct kbasep_kctx_list_element *element;
+ unsigned long flags;
+ bool enable_backend = false;
+ int err;
+
+ vinstr_ctx->kctx = kbase_create_context(vinstr_ctx->kbdev, true);
+ if (!vinstr_ctx->kctx)
+ return -ENOMEM;
+
+ /* Map the master kernel dump buffer. The HW dumps the counters
+ * into this memory region. */
+ err = kbasep_vinstr_map_kernel_dump_buffer(vinstr_ctx);
+ if (err) {
+ kbase_destroy_context(vinstr_ctx->kctx);
+ vinstr_ctx->kctx = NULL;
+ return err;
+ }
+
+ /* Add kernel context to list of contexts associated with device. */
+ element = kzalloc(sizeof(*element), GFP_KERNEL);
+ if (element) {
+ element->kctx = vinstr_ctx->kctx;
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_add(&element->link, &kbdev->kctx_list);
+
+ /* Inform timeline client about new context.
+ * Do this while holding the lock to avoid tracepoint
+ * being created in both body and summary stream. */
+ KBASE_TLSTREAM_TL_NEW_CTX(
+ vinstr_ctx->kctx,
+ (u32)(vinstr_ctx->kctx->id),
+ (u32)(vinstr_ctx->kctx->tgid));
+
+ mutex_unlock(&kbdev->kctx_list_lock);
+ } else {
+ /* Don't treat this as a fail - just warn about it. */
+ dev_warn(kbdev->dev,
+ "couldn't add kctx to kctx_list\n");
+ }
+
+ /* Don't enable hardware counters if vinstr is suspended.
+ * Note that vinstr resume code is run under vinstr context lock,
+ * lower layer will be enabled as needed on resume. */
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ if (VINSTR_IDLE == vinstr_ctx->state)
+ enable_backend = true;
+ vinstr_ctx->clients_present = true;
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+ if (enable_backend)
+ err = enable_hwcnt(vinstr_ctx);
+
+ if (err) {
+ kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
+ kbase_destroy_context(vinstr_ctx->kctx);
+ if (element) {
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_del(&element->link);
+ kfree(element);
+ mutex_unlock(&kbdev->kctx_list_lock);
+ }
+ KBASE_TLSTREAM_TL_DEL_CTX(vinstr_ctx->kctx);
+ vinstr_ctx->kctx = NULL;
+ return err;
+ }
+
+ vinstr_ctx->thread = kthread_run(
+ kbasep_vinstr_service_task,
+ vinstr_ctx,
+ "mali_vinstr_service");
+ if (IS_ERR(vinstr_ctx->thread)) {
+ disable_hwcnt(vinstr_ctx);
+ kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
+ kbase_destroy_context(vinstr_ctx->kctx);
+ if (element) {
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_del(&element->link);
+ kfree(element);
+ mutex_unlock(&kbdev->kctx_list_lock);
+ }
+ KBASE_TLSTREAM_TL_DEL_CTX(vinstr_ctx->kctx);
+ vinstr_ctx->kctx = NULL;
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_destroy_kctx - destroy vinstr's kernel context
+ * @vinstr_ctx: vinstr context
+ */
+static void kbasep_vinstr_destroy_kctx(struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_device *kbdev = vinstr_ctx->kbdev;
+ struct kbasep_kctx_list_element *element;
+ struct kbasep_kctx_list_element *tmp;
+ bool found = false;
+ unsigned long flags;
+
+ /* Release hw counters dumping resources. */
+ vinstr_ctx->thread = NULL;
+ disable_hwcnt(vinstr_ctx);
+ kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
+ kbase_destroy_context(vinstr_ctx->kctx);
+
+ /* Simplify state transitions by specifying that we have no clients. */
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ vinstr_ctx->clients_present = false;
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ /* Remove kernel context from the device's contexts list. */
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
+ if (element->kctx == vinstr_ctx->kctx) {
+ list_del(&element->link);
+ kfree(element);
+ found = true;
+ }
+ }
+ mutex_unlock(&kbdev->kctx_list_lock);
+
+ if (!found)
+ dev_warn(kbdev->dev, "kctx not in kctx_list\n");
+
+ /* Inform timeline client about context destruction. */
+ KBASE_TLSTREAM_TL_DEL_CTX(vinstr_ctx->kctx);
+
+ vinstr_ctx->kctx = NULL;
+}
+
+/**
+ * kbasep_vinstr_attach_client - Attach a client to the vinstr core
+ * @vinstr_ctx: vinstr context
+ * @buffer_count: requested number of dump buffers
+ * @bitmap: bitmaps describing which counters should be enabled
+ * @argp: pointer where notification descriptor shall be stored
+ * @kernel_buffer: pointer to kernel side buffer
+ *
+ * Return: vinstr opaque client handle or NULL on failure
+ */
+static struct kbase_vinstr_client *kbasep_vinstr_attach_client(
+ struct kbase_vinstr_context *vinstr_ctx, u32 buffer_count,
+ u32 bitmap[4], void *argp, void *kernel_buffer)
+{
+ struct task_struct *thread = NULL;
+ struct kbase_vinstr_client *cli;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ if (buffer_count > MAX_BUFFER_COUNT
+ || (buffer_count & (buffer_count - 1)))
+ return NULL;
+
+ cli = kzalloc(sizeof(*cli), GFP_KERNEL);
+ if (!cli)
+ return NULL;
+
+ cli->vinstr_ctx = vinstr_ctx;
+ cli->buffer_count = buffer_count;
+ cli->event_mask =
+ (1 << BASE_HWCNT_READER_EVENT_MANUAL) |
+ (1 << BASE_HWCNT_READER_EVENT_PERIODIC);
+ cli->pending = true;
+
+ hwcnt_bitmap_set(cli->bitmap, bitmap);
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ hwcnt_bitmap_union(vinstr_ctx->bitmap, cli->bitmap);
+ vinstr_ctx->reprogram = true;
+
+ /* If this is the first client, create the vinstr kbase
+ * context. This context is permanently resident until the
+ * last client exits. */
+ if (!vinstr_ctx->nclients) {
+ hwcnt_bitmap_set(vinstr_ctx->bitmap, cli->bitmap);
+ if (kbasep_vinstr_create_kctx(vinstr_ctx) < 0)
+ goto error;
+
+ vinstr_ctx->reprogram = false;
+ cli->pending = false;
+ }
+
+ /* The GPU resets the counter block every time there is a request
+ * to dump it. We need a per client kernel buffer for accumulating
+ * the counters. */
+ cli->dump_size = kbasep_vinstr_dump_size_ctx(vinstr_ctx);
+ cli->accum_buffer = kzalloc(cli->dump_size, GFP_KERNEL);
+ if (!cli->accum_buffer)
+ goto error;
+
+ /* Prepare buffers. */
+ if (cli->buffer_count) {
+ int *fd = (int *)argp;
+ size_t tmp;
+
+ /* Allocate area for buffers metadata storage. */
+ tmp = sizeof(struct kbase_hwcnt_reader_metadata) *
+ cli->buffer_count;
+ cli->dump_buffers_meta = kmalloc(tmp, GFP_KERNEL);
+ if (!cli->dump_buffers_meta)
+ goto error;
+
+ /* Allocate required number of dumping buffers. */
+ cli->dump_buffers = (char *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(cli->dump_size * cli->buffer_count));
+ if (!cli->dump_buffers)
+ goto error;
+
+ /* Create descriptor for user-kernel data exchange. */
+ *fd = anon_inode_getfd(
+ "[mali_vinstr_desc]",
+ &vinstr_client_fops,
+ cli,
+ O_RDONLY | O_CLOEXEC);
+ if (0 > *fd)
+ goto error;
+ } else if (kernel_buffer) {
+ cli->kernel_buffer = kernel_buffer;
+ } else {
+ cli->legacy_buffer = (void __user *)argp;
+ }
+
+ atomic_set(&cli->read_idx, 0);
+ atomic_set(&cli->meta_idx, 0);
+ atomic_set(&cli->write_idx, 0);
+ init_waitqueue_head(&cli->waitq);
+
+ vinstr_ctx->nclients++;
+ list_add(&cli->list, &vinstr_ctx->idle_clients);
+
+ mutex_unlock(&vinstr_ctx->lock);
+
+ return cli;
+
+error:
+ kfree(cli->dump_buffers_meta);
+ if (cli->dump_buffers)
+ free_pages(
+ (unsigned long)cli->dump_buffers,
+ get_order(cli->dump_size * cli->buffer_count));
+ kfree(cli->accum_buffer);
+ if (!vinstr_ctx->nclients && vinstr_ctx->kctx) {
+ thread = vinstr_ctx->thread;
+ kbasep_vinstr_destroy_kctx(vinstr_ctx);
+ }
+ kfree(cli);
+
+ mutex_unlock(&vinstr_ctx->lock);
+
+ /* Thread must be stopped after lock is released. */
+ if (thread)
+ kthread_stop(thread);
+
+ return NULL;
+}
+
+void kbase_vinstr_detach_client(struct kbase_vinstr_client *cli)
+{
+ struct kbase_vinstr_context *vinstr_ctx;
+ struct kbase_vinstr_client *iter, *tmp;
+ struct task_struct *thread = NULL;
+ u32 zerobitmap[4] = { 0 };
+ int cli_found = 0;
+
+ KBASE_DEBUG_ASSERT(cli);
+ vinstr_ctx = cli->vinstr_ctx;
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ list_for_each_entry_safe(iter, tmp, &vinstr_ctx->idle_clients, list) {
+ if (iter == cli) {
+ vinstr_ctx->reprogram = true;
+ cli_found = 1;
+ list_del(&iter->list);
+ break;
+ }
+ }
+ if (!cli_found) {
+ list_for_each_entry_safe(
+ iter, tmp, &vinstr_ctx->waiting_clients, list) {
+ if (iter == cli) {
+ vinstr_ctx->reprogram = true;
+ cli_found = 1;
+ list_del(&iter->list);
+ break;
+ }
+ }
+ }
+ KBASE_DEBUG_ASSERT(cli_found);
+
+ kfree(cli->dump_buffers_meta);
+ free_pages(
+ (unsigned long)cli->dump_buffers,
+ get_order(cli->dump_size * cli->buffer_count));
+ kfree(cli->accum_buffer);
+ kfree(cli);
+
+ vinstr_ctx->nclients--;
+ if (!vinstr_ctx->nclients) {
+ thread = vinstr_ctx->thread;
+ kbasep_vinstr_destroy_kctx(vinstr_ctx);
+ }
+
+ /* Rebuild context bitmap now that the client has detached */
+ hwcnt_bitmap_set(vinstr_ctx->bitmap, zerobitmap);
+ list_for_each_entry(iter, &vinstr_ctx->idle_clients, list)
+ hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap);
+ list_for_each_entry(iter, &vinstr_ctx->waiting_clients, list)
+ hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap);
+
+ mutex_unlock(&vinstr_ctx->lock);
+
+ /* Thread must be stopped after lock is released. */
+ if (thread)
+ kthread_stop(thread);
+}
+KBASE_EXPORT_TEST_API(kbase_vinstr_detach_client);
+
+/* Accumulate counters in the dump buffer */
+static void accum_dump_buffer(void *dst, void *src, size_t dump_size)
+{
+ size_t block_size = NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT;
+ u32 *d = dst;
+ u32 *s = src;
+ size_t i, j;
+
+ for (i = 0; i < dump_size; i += block_size) {
+ /* skip over the header block */
+ d += NR_BYTES_PER_HDR / sizeof(u32);
+ s += NR_BYTES_PER_HDR / sizeof(u32);
+ for (j = 0; j < (block_size - NR_BYTES_PER_HDR) / sizeof(u32); j++) {
+ /* saturate result if addition would result in wraparound */
+ if (U32_MAX - *d < *s)
+ *d = U32_MAX;
+ else
+ *d += *s;
+ d++;
+ s++;
+ }
+ }
+}
+
+/* This is the Midgard v4 patch function. It copies the headers for each
+ * of the defined blocks from the master kernel buffer and then patches up
+ * the performance counter enable mask for each of the blocks to exclude
+ * counters that were not requested by the client. */
+static void patch_dump_buffer_hdr_v4(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_vinstr_client *cli)
+{
+ u32 *mask;
+ u8 *dst = cli->accum_buffer;
+ u8 *src = vinstr_ctx->cpu_va;
+ u32 nr_cg = vinstr_ctx->kctx->kbdev->gpu_props.num_core_groups;
+ size_t i, group_size, group;
+ enum {
+ SC0_BASE = 0 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
+ SC1_BASE = 1 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
+ SC2_BASE = 2 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
+ SC3_BASE = 3 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
+ TILER_BASE = 4 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
+ MMU_L2_BASE = 5 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
+ JM_BASE = 7 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT
+ };
+
+ group_size = NR_CNT_BLOCKS_PER_GROUP *
+ NR_CNT_PER_BLOCK *
+ NR_BYTES_PER_CNT;
+ for (i = 0; i < nr_cg; i++) {
+ group = i * group_size;
+ /* copy shader core headers */
+ memcpy(&dst[group + SC0_BASE], &src[group + SC0_BASE],
+ NR_BYTES_PER_HDR);
+ memcpy(&dst[group + SC1_BASE], &src[group + SC1_BASE],
+ NR_BYTES_PER_HDR);
+ memcpy(&dst[group + SC2_BASE], &src[group + SC2_BASE],
+ NR_BYTES_PER_HDR);
+ memcpy(&dst[group + SC3_BASE], &src[group + SC3_BASE],
+ NR_BYTES_PER_HDR);
+
+ /* copy tiler header */
+ memcpy(&dst[group + TILER_BASE], &src[group + TILER_BASE],
+ NR_BYTES_PER_HDR);
+
+ /* copy mmu header */
+ memcpy(&dst[group + MMU_L2_BASE], &src[group + MMU_L2_BASE],
+ NR_BYTES_PER_HDR);
+
+ /* copy job manager header */
+ memcpy(&dst[group + JM_BASE], &src[group + JM_BASE],
+ NR_BYTES_PER_HDR);
+
+ /* patch the shader core enable mask */
+ mask = (u32 *)&dst[group + SC0_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[SHADER_HWCNT_BM];
+ mask = (u32 *)&dst[group + SC1_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[SHADER_HWCNT_BM];
+ mask = (u32 *)&dst[group + SC2_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[SHADER_HWCNT_BM];
+ mask = (u32 *)&dst[group + SC3_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[SHADER_HWCNT_BM];
+
+ /* patch the tiler core enable mask */
+ mask = (u32 *)&dst[group + TILER_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[TILER_HWCNT_BM];
+
+ /* patch the mmu core enable mask */
+ mask = (u32 *)&dst[group + MMU_L2_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[MMU_L2_HWCNT_BM];
+
+ /* patch the job manager enable mask */
+ mask = (u32 *)&dst[group + JM_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[JM_HWCNT_BM];
+ }
+}
+
+/* This is the Midgard v5 patch function. It copies the headers for each
+ * of the defined blocks from the master kernel buffer and then patches up
+ * the performance counter enable mask for each of the blocks to exclude
+ * counters that were not requested by the client. */
+static void patch_dump_buffer_hdr_v5(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_vinstr_client *cli)
+{
+ struct kbase_device *kbdev = vinstr_ctx->kctx->kbdev;
+ u32 i, nr_l2;
+ u64 core_mask;
+ u32 *mask;
+ u8 *dst = cli->accum_buffer;
+ u8 *src = vinstr_ctx->cpu_va;
+ size_t block_size = NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT;
+
+ /* copy and patch job manager header */
+ memcpy(dst, src, NR_BYTES_PER_HDR);
+ mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[JM_HWCNT_BM];
+ dst += block_size;
+ src += block_size;
+
+ /* copy and patch tiler header */
+ memcpy(dst, src, NR_BYTES_PER_HDR);
+ mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[TILER_HWCNT_BM];
+ dst += block_size;
+ src += block_size;
+
+ /* copy and patch MMU/L2C headers */
+ nr_l2 = kbdev->gpu_props.props.l2_props.num_l2_slices;
+ for (i = 0; i < nr_l2; i++) {
+ memcpy(dst, src, NR_BYTES_PER_HDR);
+ mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[MMU_L2_HWCNT_BM];
+ dst += block_size;
+ src += block_size;
+ }
+
+ /* copy and patch shader core headers */
+ core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+ while (0ull != core_mask) {
+ memcpy(dst, src, NR_BYTES_PER_HDR);
+ if (0ull != (core_mask & 1ull)) {
+ /* if block is not reserved update header */
+ mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[SHADER_HWCNT_BM];
+ }
+ dst += block_size;
+ src += block_size;
+
+ core_mask >>= 1;
+ }
+}
+
+/**
+ * accum_clients - accumulate dumped hw counters for all known clients
+ * @vinstr_ctx: vinstr context
+ */
+static void accum_clients(struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_vinstr_client *iter;
+ int v4 = 0;
+
+#ifndef CONFIG_MALI_NO_MALI
+ v4 = kbase_hw_has_feature(vinstr_ctx->kbdev, BASE_HW_FEATURE_V4);
+#endif
+
+ list_for_each_entry(iter, &vinstr_ctx->idle_clients, list) {
+ /* Don't bother accumulating clients whose hwcnt requests
+ * have not yet been honoured. */
+ if (iter->pending)
+ continue;
+ if (v4)
+ patch_dump_buffer_hdr_v4(vinstr_ctx, iter);
+ else
+ patch_dump_buffer_hdr_v5(vinstr_ctx, iter);
+ accum_dump_buffer(
+ iter->accum_buffer,
+ vinstr_ctx->cpu_va,
+ iter->dump_size);
+ }
+ list_for_each_entry(iter, &vinstr_ctx->waiting_clients, list) {
+ /* Don't bother accumulating clients whose hwcnt requests
+ * have not yet been honoured. */
+ if (iter->pending)
+ continue;
+ if (v4)
+ patch_dump_buffer_hdr_v4(vinstr_ctx, iter);
+ else
+ patch_dump_buffer_hdr_v5(vinstr_ctx, iter);
+ accum_dump_buffer(
+ iter->accum_buffer,
+ vinstr_ctx->cpu_va,
+ iter->dump_size);
+ }
+}
+
+/*****************************************************************************/
+
+/**
+ * kbasep_vinstr_get_timestamp - return timestamp
+ *
+ * Function returns timestamp value based on raw monotonic timer. Value will
+ * wrap around zero in case of overflow.
+ *
+ * Return: timestamp value
+ */
+static u64 kbasep_vinstr_get_timestamp(void)
+{
+ struct timespec ts;
+
+ getrawmonotonic(&ts);
+ return (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec;
+}
+
+/**
+ * kbasep_vinstr_add_dump_request - register client's dumping request
+ * @cli: requesting client
+ * @waiting_clients: list of pending dumping requests
+ */
+static void kbasep_vinstr_add_dump_request(
+ struct kbase_vinstr_client *cli,
+ struct list_head *waiting_clients)
+{
+ struct kbase_vinstr_client *tmp;
+
+ if (list_empty(waiting_clients)) {
+ list_add(&cli->list, waiting_clients);
+ return;
+ }
+ list_for_each_entry(tmp, waiting_clients, list) {
+ if (tmp->dump_time > cli->dump_time) {
+ list_add_tail(&cli->list, &tmp->list);
+ return;
+ }
+ }
+ list_add_tail(&cli->list, waiting_clients);
+}
+
+/**
+ * kbasep_vinstr_collect_and_accumulate - collect hw counters via low level
+ * dump and accumulate them for known
+ * clients
+ * @vinstr_ctx: vinstr context
+ * @timestamp: pointer where collection timestamp will be recorded
+ *
+ * Return: zero on success
+ */
+static int kbasep_vinstr_collect_and_accumulate(
+ struct kbase_vinstr_context *vinstr_ctx, u64 *timestamp)
+{
+ unsigned long flags;
+ int rcode;
+
+#ifdef CONFIG_MALI_NO_MALI
+ /* The dummy model needs the CPU mapping. */
+ gpu_model_set_dummy_prfcnt_base_cpu(vinstr_ctx->cpu_va);
+#endif
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ if (VINSTR_IDLE != vinstr_ctx->state) {
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+ return -EAGAIN;
+ } else {
+ vinstr_ctx->state = VINSTR_DUMPING;
+ }
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ /* Request HW counters dump.
+ * Disable preemption to make dump timestamp more accurate. */
+ preempt_disable();
+ *timestamp = kbasep_vinstr_get_timestamp();
+ rcode = kbase_instr_hwcnt_request_dump(vinstr_ctx->kctx);
+ preempt_enable();
+
+ if (!rcode)
+ rcode = kbase_instr_hwcnt_wait_for_dump(vinstr_ctx->kctx);
+ WARN_ON(rcode);
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ switch (vinstr_ctx->state)
+ {
+ case VINSTR_SUSPENDING:
+ schedule_work(&vinstr_ctx->suspend_work);
+ break;
+ case VINSTR_DUMPING:
+ vinstr_ctx->state = VINSTR_IDLE;
+ wake_up_all(&vinstr_ctx->suspend_waitq);
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ /* Accumulate values of collected counters. */
+ if (!rcode)
+ accum_clients(vinstr_ctx);
+
+ return rcode;
+}
+
+/**
+ * kbasep_vinstr_fill_dump_buffer - copy accumulated counters to empty kernel
+ * buffer
+ * @cli: requesting client
+ * @timestamp: timestamp when counters were collected
+ * @event_id: id of event that caused triggered counters collection
+ *
+ * Return: zero on success
+ */
+static int kbasep_vinstr_fill_dump_buffer(
+ struct kbase_vinstr_client *cli, u64 timestamp,
+ enum base_hwcnt_reader_event event_id)
+{
+ unsigned int write_idx = atomic_read(&cli->write_idx);
+ unsigned int read_idx = atomic_read(&cli->read_idx);
+
+ struct kbase_hwcnt_reader_metadata *meta;
+ void *buffer;
+
+ /* Check if there is a place to copy HWC block into. */
+ if (write_idx - read_idx == cli->buffer_count)
+ return -1;
+ write_idx %= cli->buffer_count;
+
+ /* Fill in dump buffer and its metadata. */
+ buffer = &cli->dump_buffers[write_idx * cli->dump_size];
+ meta = &cli->dump_buffers_meta[write_idx];
+ meta->timestamp = timestamp;
+ meta->event_id = event_id;
+ meta->buffer_idx = write_idx;
+ memcpy(buffer, cli->accum_buffer, cli->dump_size);
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_fill_dump_buffer_legacy - copy accumulated counters to buffer
+ * allocated in userspace
+ * @cli: requesting client
+ *
+ * Return: zero on success
+ *
+ * This is part of legacy ioctl interface.
+ */
+static int kbasep_vinstr_fill_dump_buffer_legacy(
+ struct kbase_vinstr_client *cli)
+{
+ void __user *buffer = cli->legacy_buffer;
+ int rcode;
+
+ /* Copy data to user buffer. */
+ rcode = copy_to_user(buffer, cli->accum_buffer, cli->dump_size);
+ if (rcode) {
+ pr_warn("error while copying buffer to user\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_fill_dump_buffer_kernel - copy accumulated counters to buffer
+ * allocated in kernel space
+ * @cli: requesting client
+ *
+ * Return: zero on success
+ *
+ * This is part of the kernel client interface.
+ */
+static int kbasep_vinstr_fill_dump_buffer_kernel(
+ struct kbase_vinstr_client *cli)
+{
+ memcpy(cli->kernel_buffer, cli->accum_buffer, cli->dump_size);
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_reprogram - reprogram hwcnt set collected by inst
+ * @vinstr_ctx: vinstr context
+ */
+static void kbasep_vinstr_reprogram(
+ struct kbase_vinstr_context *vinstr_ctx)
+{
+ unsigned long flags;
+ bool suspended = false;
+
+ /* Don't enable hardware counters if vinstr is suspended. */
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ if (VINSTR_IDLE != vinstr_ctx->state)
+ suspended = true;
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+ if (suspended)
+ return;
+
+ /* Change to suspended state is done while holding vinstr context
+ * lock. Below code will then no re-enable the instrumentation. */
+
+ if (vinstr_ctx->reprogram) {
+ struct kbase_vinstr_client *iter;
+
+ if (!reprogram_hwcnt(vinstr_ctx)) {
+ vinstr_ctx->reprogram = false;
+ list_for_each_entry(
+ iter,
+ &vinstr_ctx->idle_clients,
+ list)
+ iter->pending = false;
+ list_for_each_entry(
+ iter,
+ &vinstr_ctx->waiting_clients,
+ list)
+ iter->pending = false;
+ }
+ }
+}
+
+/**
+ * kbasep_vinstr_update_client - copy accumulated counters to user readable
+ * buffer and notify the user
+ * @cli: requesting client
+ * @timestamp: timestamp when counters were collected
+ * @event_id: id of event that caused triggered counters collection
+ *
+ * Return: zero on success
+ */
+static int kbasep_vinstr_update_client(
+ struct kbase_vinstr_client *cli, u64 timestamp,
+ enum base_hwcnt_reader_event event_id)
+{
+ int rcode = 0;
+
+ /* Copy collected counters to user readable buffer. */
+ if (cli->buffer_count)
+ rcode = kbasep_vinstr_fill_dump_buffer(
+ cli, timestamp, event_id);
+ else if (cli->kernel_buffer)
+ rcode = kbasep_vinstr_fill_dump_buffer_kernel(cli);
+ else
+ rcode = kbasep_vinstr_fill_dump_buffer_legacy(cli);
+
+ if (rcode)
+ goto exit;
+
+
+ /* Notify client. Make sure all changes to memory are visible. */
+ wmb();
+ atomic_inc(&cli->write_idx);
+ wake_up_interruptible(&cli->waitq);
+
+ /* Prepare for next request. */
+ memset(cli->accum_buffer, 0, cli->dump_size);
+
+exit:
+ return rcode;
+}
+
+/**
+ * kbasep_vinstr_wake_up_callback - vinstr wake up timer wake up function
+ *
+ * @hrtimer: high resolution timer
+ *
+ * Return: High resolution timer restart enum.
+ */
+static enum hrtimer_restart kbasep_vinstr_wake_up_callback(
+ struct hrtimer *hrtimer)
+{
+ struct kbasep_vinstr_wake_up_timer *timer =
+ container_of(
+ hrtimer,
+ struct kbasep_vinstr_wake_up_timer,
+ hrtimer);
+
+ KBASE_DEBUG_ASSERT(timer);
+
+ atomic_set(&timer->vinstr_ctx->request_pending, 1);
+ wake_up_all(&timer->vinstr_ctx->waitq);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * kbasep_vinstr_service_task - HWC dumping service thread
+ *
+ * @data: Pointer to vinstr context structure.
+ *
+ * Return: 0 on success; -ENOMEM if timer allocation fails
+ */
+static int kbasep_vinstr_service_task(void *data)
+{
+ struct kbase_vinstr_context *vinstr_ctx = data;
+ struct kbasep_vinstr_wake_up_timer *timer;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ timer = kmalloc(sizeof(*timer), GFP_KERNEL);
+
+ if (!timer) {
+ dev_warn(vinstr_ctx->kbdev->dev, "Timer allocation failed!\n");
+ return -ENOMEM;
+ }
+
+ hrtimer_init(&timer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ timer->hrtimer.function = kbasep_vinstr_wake_up_callback;
+ timer->vinstr_ctx = vinstr_ctx;
+
+ while (!kthread_should_stop()) {
+ struct kbase_vinstr_client *cli = NULL;
+ struct kbase_vinstr_client *tmp;
+ int rcode;
+
+ u64 timestamp = kbasep_vinstr_get_timestamp();
+ u64 dump_time = 0;
+ struct list_head expired_requests;
+
+ /* Hold lock while performing operations on lists of clients. */
+ mutex_lock(&vinstr_ctx->lock);
+
+ /* Closing thread must not interact with client requests. */
+ if (current == vinstr_ctx->thread) {
+ atomic_set(&vinstr_ctx->request_pending, 0);
+
+ if (!list_empty(&vinstr_ctx->waiting_clients)) {
+ cli = list_first_entry(
+ &vinstr_ctx->waiting_clients,
+ struct kbase_vinstr_client,
+ list);
+ dump_time = cli->dump_time;
+ }
+ }
+
+ if (!cli || ((s64)timestamp - (s64)dump_time < 0ll)) {
+ mutex_unlock(&vinstr_ctx->lock);
+
+ /* Sleep until next dumping event or service request. */
+ if (cli) {
+ u64 diff = dump_time - timestamp;
+
+ hrtimer_start(
+ &timer->hrtimer,
+ ns_to_ktime(diff),
+ HRTIMER_MODE_REL);
+ }
+ wait_event(
+ vinstr_ctx->waitq,
+ atomic_read(
+ &vinstr_ctx->request_pending) ||
+ kthread_should_stop());
+ hrtimer_cancel(&timer->hrtimer);
+ continue;
+ }
+
+ rcode = kbasep_vinstr_collect_and_accumulate(vinstr_ctx,
+ &timestamp);
+
+ INIT_LIST_HEAD(&expired_requests);
+
+ /* Find all expired requests. */
+ list_for_each_entry_safe(
+ cli,
+ tmp,
+ &vinstr_ctx->waiting_clients,
+ list) {
+ s64 tdiff =
+ (s64)(timestamp + DUMPING_RESOLUTION) -
+ (s64)cli->dump_time;
+ if (tdiff >= 0ll) {
+ list_del(&cli->list);
+ list_add(&cli->list, &expired_requests);
+ } else {
+ break;
+ }
+ }
+
+ /* Fill data for each request found. */
+ list_for_each_entry_safe(cli, tmp, &expired_requests, list) {
+ /* Ensure that legacy buffer will not be used from
+ * this kthread context. */
+ BUG_ON(0 == cli->buffer_count);
+ /* Expect only periodically sampled clients. */
+ BUG_ON(0 == cli->dump_interval);
+
+ if (!rcode)
+ kbasep_vinstr_update_client(
+ cli,
+ timestamp,
+ BASE_HWCNT_READER_EVENT_PERIODIC);
+
+ /* Set new dumping time. Drop missed probing times. */
+ do {
+ cli->dump_time += cli->dump_interval;
+ } while (cli->dump_time < timestamp);
+
+ list_del(&cli->list);
+ kbasep_vinstr_add_dump_request(
+ cli,
+ &vinstr_ctx->waiting_clients);
+ }
+
+ /* Reprogram counters set if required. */
+ kbasep_vinstr_reprogram(vinstr_ctx);
+
+ mutex_unlock(&vinstr_ctx->lock);
+ }
+
+ kfree(timer);
+
+ return 0;
+}
+
+/*****************************************************************************/
+
+/**
+ * kbasep_vinstr_hwcnt_reader_buffer_ready - check if client has ready buffers
+ * @cli: pointer to vinstr client structure
+ *
+ * Return: non-zero if client has at least one dumping buffer filled that was
+ * not notified to user yet
+ */
+static int kbasep_vinstr_hwcnt_reader_buffer_ready(
+ struct kbase_vinstr_client *cli)
+{
+ KBASE_DEBUG_ASSERT(cli);
+ return atomic_read(&cli->write_idx) != atomic_read(&cli->meta_idx);
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_get_buffer - hwcnt reader's ioctl command
+ * @cli: pointer to vinstr client structure
+ * @buffer: pointer to userspace buffer
+ * @size: size of buffer
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_get_buffer(
+ struct kbase_vinstr_client *cli, void __user *buffer,
+ size_t size)
+{
+ unsigned int meta_idx = atomic_read(&cli->meta_idx);
+ unsigned int idx = meta_idx % cli->buffer_count;
+
+ struct kbase_hwcnt_reader_metadata *meta = &cli->dump_buffers_meta[idx];
+
+ /* Metadata sanity check. */
+ KBASE_DEBUG_ASSERT(idx == meta->buffer_idx);
+
+ if (sizeof(struct kbase_hwcnt_reader_metadata) != size)
+ return -EINVAL;
+
+ /* Check if there is any buffer available. */
+ if (atomic_read(&cli->write_idx) == meta_idx)
+ return -EAGAIN;
+
+ /* Check if previously taken buffer was put back. */
+ if (atomic_read(&cli->read_idx) != meta_idx)
+ return -EBUSY;
+
+ /* Copy next available buffer's metadata to user. */
+ if (copy_to_user(buffer, meta, size))
+ return -EFAULT;
+
+ atomic_inc(&cli->meta_idx);
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_put_buffer - hwcnt reader's ioctl command
+ * @cli: pointer to vinstr client structure
+ * @buffer: pointer to userspace buffer
+ * @size: size of buffer
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_put_buffer(
+ struct kbase_vinstr_client *cli, void __user *buffer,
+ size_t size)
+{
+ unsigned int read_idx = atomic_read(&cli->read_idx);
+ unsigned int idx = read_idx % cli->buffer_count;
+
+ struct kbase_hwcnt_reader_metadata meta;
+
+ if (sizeof(struct kbase_hwcnt_reader_metadata) != size)
+ return -EINVAL;
+
+ /* Check if any buffer was taken. */
+ if (atomic_read(&cli->meta_idx) == read_idx)
+ return -EPERM;
+
+ /* Check if correct buffer is put back. */
+ if (copy_from_user(&meta, buffer, size))
+ return -EFAULT;
+ if (idx != meta.buffer_idx)
+ return -EINVAL;
+
+ atomic_inc(&cli->read_idx);
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_set_interval - hwcnt reader's ioctl command
+ * @cli: pointer to vinstr client structure
+ * @interval: periodic dumping interval (disable periodic dumping if zero)
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
+ struct kbase_vinstr_client *cli, u32 interval)
+{
+ struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ list_del(&cli->list);
+
+ cli->dump_interval = interval;
+
+ /* If interval is non-zero, enable periodic dumping for this client. */
+ if (cli->dump_interval) {
+ if (DUMPING_RESOLUTION > cli->dump_interval)
+ cli->dump_interval = DUMPING_RESOLUTION;
+ cli->dump_time =
+ kbasep_vinstr_get_timestamp() + cli->dump_interval;
+
+ kbasep_vinstr_add_dump_request(
+ cli, &vinstr_ctx->waiting_clients);
+
+ atomic_set(&vinstr_ctx->request_pending, 1);
+ wake_up_all(&vinstr_ctx->waitq);
+ } else {
+ list_add(&cli->list, &vinstr_ctx->idle_clients);
+ }
+
+ mutex_unlock(&vinstr_ctx->lock);
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_event_mask - return event mask for event id
+ * @event_id: id of event
+ * Return: event_mask or zero if event is not supported or maskable
+ */
+static u32 kbasep_vinstr_hwcnt_reader_event_mask(
+ enum base_hwcnt_reader_event event_id)
+{
+ u32 event_mask = 0;
+
+ switch (event_id) {
+ case BASE_HWCNT_READER_EVENT_PREJOB:
+ case BASE_HWCNT_READER_EVENT_POSTJOB:
+ /* These event are maskable. */
+ event_mask = (1 << event_id);
+ break;
+
+ case BASE_HWCNT_READER_EVENT_MANUAL:
+ case BASE_HWCNT_READER_EVENT_PERIODIC:
+ /* These event are non-maskable. */
+ default:
+ /* These event are not supported. */
+ break;
+ }
+
+ return event_mask;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_enable_event - hwcnt reader's ioctl command
+ * @cli: pointer to vinstr client structure
+ * @event_id: id of event to enable
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_enable_event(
+ struct kbase_vinstr_client *cli,
+ enum base_hwcnt_reader_event event_id)
+{
+ struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx;
+ u32 event_mask;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ event_mask = kbasep_vinstr_hwcnt_reader_event_mask(event_id);
+ if (!event_mask)
+ return -EINVAL;
+
+ mutex_lock(&vinstr_ctx->lock);
+ cli->event_mask |= event_mask;
+ mutex_unlock(&vinstr_ctx->lock);
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_disable_event - hwcnt reader's ioctl command
+ * @cli: pointer to vinstr client structure
+ * @event_id: id of event to disable
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_disable_event(
+ struct kbase_vinstr_client *cli,
+ enum base_hwcnt_reader_event event_id)
+{
+ struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx;
+ u32 event_mask;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ event_mask = kbasep_vinstr_hwcnt_reader_event_mask(event_id);
+ if (!event_mask)
+ return -EINVAL;
+
+ mutex_lock(&vinstr_ctx->lock);
+ cli->event_mask &= ~event_mask;
+ mutex_unlock(&vinstr_ctx->lock);
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_get_hwver - hwcnt reader's ioctl command
+ * @cli: pointer to vinstr client structure
+ * @hwver: pointer to user buffer where hw version will be stored
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_get_hwver(
+ struct kbase_vinstr_client *cli, u32 __user *hwver)
+{
+#ifndef CONFIG_MALI_NO_MALI
+ struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx;
+#endif
+
+ u32 ver = 5;
+
+#ifndef CONFIG_MALI_NO_MALI
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+ if (kbase_hw_has_feature(vinstr_ctx->kbdev, BASE_HW_FEATURE_V4))
+ ver = 4;
+#endif
+
+ return put_user(ver, hwver);
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl - hwcnt reader's ioctl
+ * @filp: pointer to file structure
+ * @cmd: user command
+ * @arg: command's argument
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ long rcode = 0;
+ struct kbase_vinstr_client *cli;
+
+ KBASE_DEBUG_ASSERT(filp);
+
+ cli = filp->private_data;
+ KBASE_DEBUG_ASSERT(cli);
+
+ if (unlikely(KBASE_HWCNT_READER != _IOC_TYPE(cmd)))
+ return -EINVAL;
+
+ switch (cmd) {
+ case KBASE_HWCNT_READER_GET_API_VERSION:
+ rcode = put_user(HWCNT_READER_API, (u32 __user *)arg);
+ break;
+ case KBASE_HWCNT_READER_GET_HWVER:
+ rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_hwver(
+ cli, (u32 __user *)arg);
+ break;
+ case KBASE_HWCNT_READER_GET_BUFFER_SIZE:
+ KBASE_DEBUG_ASSERT(cli->vinstr_ctx);
+ rcode = put_user(
+ (u32)cli->vinstr_ctx->dump_size,
+ (u32 __user *)arg);
+ break;
+ case KBASE_HWCNT_READER_DUMP:
+ rcode = kbase_vinstr_hwc_dump(
+ cli, BASE_HWCNT_READER_EVENT_MANUAL);
+ break;
+ case KBASE_HWCNT_READER_CLEAR:
+ rcode = kbase_vinstr_hwc_clear(cli);
+ break;
+ case KBASE_HWCNT_READER_GET_BUFFER:
+ rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_buffer(
+ cli, (void __user *)arg, _IOC_SIZE(cmd));
+ break;
+ case KBASE_HWCNT_READER_PUT_BUFFER:
+ rcode = kbasep_vinstr_hwcnt_reader_ioctl_put_buffer(
+ cli, (void __user *)arg, _IOC_SIZE(cmd));
+ break;
+ case KBASE_HWCNT_READER_SET_INTERVAL:
+ rcode = kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
+ cli, (u32)arg);
+ break;
+ case KBASE_HWCNT_READER_ENABLE_EVENT:
+ rcode = kbasep_vinstr_hwcnt_reader_ioctl_enable_event(
+ cli, (enum base_hwcnt_reader_event)arg);
+ break;
+ case KBASE_HWCNT_READER_DISABLE_EVENT:
+ rcode = kbasep_vinstr_hwcnt_reader_ioctl_disable_event(
+ cli, (enum base_hwcnt_reader_event)arg);
+ break;
+ default:
+ rcode = -EINVAL;
+ break;
+ }
+
+ return rcode;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_poll - hwcnt reader's poll
+ * @filp: pointer to file structure
+ * @wait: pointer to poll table
+ * Return: POLLIN if data can be read without blocking, otherwise zero
+ */
+static unsigned int kbasep_vinstr_hwcnt_reader_poll(struct file *filp,
+ poll_table *wait)
+{
+ struct kbase_vinstr_client *cli;
+
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(wait);
+
+ cli = filp->private_data;
+ KBASE_DEBUG_ASSERT(cli);
+
+ poll_wait(filp, &cli->waitq, wait);
+ if (kbasep_vinstr_hwcnt_reader_buffer_ready(cli))
+ return POLLIN;
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_mmap - hwcnt reader's mmap
+ * @filp: pointer to file structure
+ * @vma: pointer to vma structure
+ * Return: zero on success
+ */
+static int kbasep_vinstr_hwcnt_reader_mmap(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct kbase_vinstr_client *cli;
+ unsigned long size, addr, pfn, offset;
+ unsigned long vm_size = vma->vm_end - vma->vm_start;
+
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(vma);
+
+ cli = filp->private_data;
+ KBASE_DEBUG_ASSERT(cli);
+
+ size = cli->buffer_count * cli->dump_size;
+
+ if (vma->vm_pgoff > (size >> PAGE_SHIFT))
+ return -EINVAL;
+
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ if (vm_size > size - offset)
+ return -EINVAL;
+
+ addr = __pa((unsigned long)cli->dump_buffers + offset);
+ pfn = addr >> PAGE_SHIFT;
+
+ return remap_pfn_range(
+ vma,
+ vma->vm_start,
+ pfn,
+ vm_size,
+ vma->vm_page_prot);
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_release - hwcnt reader's release
+ * @inode: pointer to inode structure
+ * @filp: pointer to file structure
+ * Return always return zero
+ */
+static int kbasep_vinstr_hwcnt_reader_release(struct inode *inode,
+ struct file *filp)
+{
+ struct kbase_vinstr_client *cli;
+
+ KBASE_DEBUG_ASSERT(inode);
+ KBASE_DEBUG_ASSERT(filp);
+
+ cli = filp->private_data;
+ KBASE_DEBUG_ASSERT(cli);
+
+ kbase_vinstr_detach_client(cli);
+ return 0;
+}
+
+/*****************************************************************************/
+
+/**
+ * kbasep_vinstr_kick_scheduler - trigger scheduler cycle
+ * @kbdev: pointer to kbase device structure
+ */
+static void kbasep_vinstr_kick_scheduler(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ unsigned long flags;
+
+ down(&js_devdata->schedule_sem);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_backend_slot_update(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ up(&js_devdata->schedule_sem);
+}
+
+/**
+ * kbasep_vinstr_suspend_worker - worker suspending vinstr module
+ * @data: pointer to work structure
+ */
+static void kbasep_vinstr_suspend_worker(struct work_struct *data)
+{
+ struct kbase_vinstr_context *vinstr_ctx;
+ unsigned long flags;
+
+ vinstr_ctx = container_of(data, struct kbase_vinstr_context,
+ suspend_work);
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ if (vinstr_ctx->kctx)
+ disable_hwcnt(vinstr_ctx);
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ vinstr_ctx->state = VINSTR_SUSPENDED;
+ wake_up_all(&vinstr_ctx->suspend_waitq);
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ mutex_unlock(&vinstr_ctx->lock);
+
+ /* Kick GPU scheduler to allow entering protected mode.
+ * This must happen after vinstr was suspended. */
+ kbasep_vinstr_kick_scheduler(vinstr_ctx->kbdev);
+}
+
+/**
+ * kbasep_vinstr_suspend_worker - worker resuming vinstr module
+ * @data: pointer to work structure
+ */
+static void kbasep_vinstr_resume_worker(struct work_struct *data)
+{
+ struct kbase_vinstr_context *vinstr_ctx;
+ unsigned long flags;
+
+ vinstr_ctx = container_of(data, struct kbase_vinstr_context,
+ resume_work);
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ if (vinstr_ctx->kctx)
+ enable_hwcnt(vinstr_ctx);
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ vinstr_ctx->state = VINSTR_IDLE;
+ wake_up_all(&vinstr_ctx->suspend_waitq);
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ mutex_unlock(&vinstr_ctx->lock);
+
+ /* Kick GPU scheduler to allow entering protected mode.
+ * Note that scheduler state machine might requested re-entry to
+ * protected mode before vinstr was resumed.
+ * This must happen after vinstr was release. */
+ kbasep_vinstr_kick_scheduler(vinstr_ctx->kbdev);
+}
+
+/*****************************************************************************/
+
+struct kbase_vinstr_context *kbase_vinstr_init(struct kbase_device *kbdev)
+{
+ struct kbase_vinstr_context *vinstr_ctx;
+
+ vinstr_ctx = kzalloc(sizeof(*vinstr_ctx), GFP_KERNEL);
+ if (!vinstr_ctx)
+ return NULL;
+
+ INIT_LIST_HEAD(&vinstr_ctx->idle_clients);
+ INIT_LIST_HEAD(&vinstr_ctx->waiting_clients);
+ mutex_init(&vinstr_ctx->lock);
+ spin_lock_init(&vinstr_ctx->state_lock);
+ vinstr_ctx->kbdev = kbdev;
+ vinstr_ctx->thread = NULL;
+ vinstr_ctx->state = VINSTR_IDLE;
+ vinstr_ctx->suspend_cnt = 0;
+ INIT_WORK(&vinstr_ctx->suspend_work, kbasep_vinstr_suspend_worker);
+ INIT_WORK(&vinstr_ctx->resume_work, kbasep_vinstr_resume_worker);
+ init_waitqueue_head(&vinstr_ctx->suspend_waitq);
+
+ atomic_set(&vinstr_ctx->request_pending, 0);
+ init_waitqueue_head(&vinstr_ctx->waitq);
+
+ return vinstr_ctx;
+}
+
+void kbase_vinstr_term(struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_vinstr_client *cli;
+
+ /* Stop service thread first. */
+ if (vinstr_ctx->thread)
+ kthread_stop(vinstr_ctx->thread);
+
+ /* Wait for workers. */
+ flush_work(&vinstr_ctx->suspend_work);
+ flush_work(&vinstr_ctx->resume_work);
+
+ while (1) {
+ struct list_head *list = &vinstr_ctx->idle_clients;
+
+ if (list_empty(list)) {
+ list = &vinstr_ctx->waiting_clients;
+ if (list_empty(list))
+ break;
+ }
+
+ cli = list_first_entry(list, struct kbase_vinstr_client, list);
+ list_del(&cli->list);
+ kfree(cli->accum_buffer);
+ kfree(cli);
+ vinstr_ctx->nclients--;
+ }
+ KBASE_DEBUG_ASSERT(!vinstr_ctx->nclients);
+ if (vinstr_ctx->kctx)
+ kbasep_vinstr_destroy_kctx(vinstr_ctx);
+ kfree(vinstr_ctx);
+}
+
+int kbase_vinstr_hwcnt_reader_setup(struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_uk_hwcnt_reader_setup *setup)
+{
+ struct kbase_vinstr_client *cli;
+ u32 bitmap[4];
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+ KBASE_DEBUG_ASSERT(setup);
+ KBASE_DEBUG_ASSERT(setup->buffer_count);
+
+ bitmap[SHADER_HWCNT_BM] = setup->shader_bm;
+ bitmap[TILER_HWCNT_BM] = setup->tiler_bm;
+ bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm;
+ bitmap[JM_HWCNT_BM] = setup->jm_bm;
+
+ cli = kbasep_vinstr_attach_client(
+ vinstr_ctx,
+ setup->buffer_count,
+ bitmap,
+ &setup->fd,
+ NULL);
+
+ if (!cli)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int kbase_vinstr_legacy_hwc_setup(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_vinstr_client **cli,
+ struct kbase_uk_hwcnt_setup *setup)
+{
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+ KBASE_DEBUG_ASSERT(setup);
+ KBASE_DEBUG_ASSERT(cli);
+
+ if (setup->dump_buffer) {
+ u32 bitmap[4];
+
+ bitmap[SHADER_HWCNT_BM] = setup->shader_bm;
+ bitmap[TILER_HWCNT_BM] = setup->tiler_bm;
+ bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm;
+ bitmap[JM_HWCNT_BM] = setup->jm_bm;
+
+ if (*cli)
+ return -EBUSY;
+
+ *cli = kbasep_vinstr_attach_client(
+ vinstr_ctx,
+ 0,
+ bitmap,
+ (void *)(long)setup->dump_buffer,
+ NULL);
+
+ if (!(*cli))
+ return -ENOMEM;
+ } else {
+ if (!*cli)
+ return -EINVAL;
+
+ kbase_vinstr_detach_client(*cli);
+ *cli = NULL;
+ }
+
+ return 0;
+}
+
+struct kbase_vinstr_client *kbase_vinstr_hwcnt_kernel_setup(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_uk_hwcnt_reader_setup *setup,
+ void *kernel_buffer)
+{
+ u32 bitmap[4];
+
+ if (!vinstr_ctx || !setup || !kernel_buffer)
+ return NULL;
+
+ bitmap[SHADER_HWCNT_BM] = setup->shader_bm;
+ bitmap[TILER_HWCNT_BM] = setup->tiler_bm;
+ bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm;
+ bitmap[JM_HWCNT_BM] = setup->jm_bm;
+
+ return kbasep_vinstr_attach_client(
+ vinstr_ctx,
+ 0,
+ bitmap,
+ NULL,
+ kernel_buffer);
+}
+KBASE_EXPORT_TEST_API(kbase_vinstr_hwcnt_kernel_setup);
+
+int kbase_vinstr_hwc_dump(struct kbase_vinstr_client *cli,
+ enum base_hwcnt_reader_event event_id)
+{
+ int rcode = 0;
+ struct kbase_vinstr_context *vinstr_ctx;
+ u64 timestamp;
+ u32 event_mask;
+
+ if (!cli)
+ return -EINVAL;
+
+ vinstr_ctx = cli->vinstr_ctx;
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ KBASE_DEBUG_ASSERT(event_id < BASE_HWCNT_READER_EVENT_COUNT);
+ event_mask = 1 << event_id;
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ if (event_mask & cli->event_mask) {
+ rcode = kbasep_vinstr_collect_and_accumulate(
+ vinstr_ctx,
+ &timestamp);
+ if (rcode)
+ goto exit;
+
+ rcode = kbasep_vinstr_update_client(cli, timestamp, event_id);
+ if (rcode)
+ goto exit;
+
+ kbasep_vinstr_reprogram(vinstr_ctx);
+ }
+
+exit:
+ mutex_unlock(&vinstr_ctx->lock);
+
+ return rcode;
+}
+KBASE_EXPORT_TEST_API(kbase_vinstr_hwc_dump);
+
+int kbase_vinstr_hwc_clear(struct kbase_vinstr_client *cli)
+{
+ struct kbase_vinstr_context *vinstr_ctx;
+ int rcode;
+ u64 unused;
+
+ if (!cli)
+ return -EINVAL;
+
+ vinstr_ctx = cli->vinstr_ctx;
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ rcode = kbasep_vinstr_collect_and_accumulate(vinstr_ctx, &unused);
+ if (rcode)
+ goto exit;
+ rcode = kbase_instr_hwcnt_clear(vinstr_ctx->kctx);
+ if (rcode)
+ goto exit;
+ memset(cli->accum_buffer, 0, cli->dump_size);
+
+ kbasep_vinstr_reprogram(vinstr_ctx);
+
+exit:
+ mutex_unlock(&vinstr_ctx->lock);
+
+ return rcode;
+}
+
+int kbase_vinstr_try_suspend(struct kbase_vinstr_context *vinstr_ctx)
+{
+ unsigned long flags;
+ int ret = -EAGAIN;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ switch (vinstr_ctx->state) {
+ case VINSTR_SUSPENDED:
+ vinstr_ctx->suspend_cnt++;
+ /* overflow shall not happen */
+ BUG_ON(0 == vinstr_ctx->suspend_cnt);
+ ret = 0;
+ break;
+
+ case VINSTR_IDLE:
+ if (vinstr_ctx->clients_present) {
+ vinstr_ctx->state = VINSTR_SUSPENDING;
+ schedule_work(&vinstr_ctx->suspend_work);
+ } else {
+ vinstr_ctx->state = VINSTR_SUSPENDED;
+
+ vinstr_ctx->suspend_cnt++;
+ /* overflow shall not happen */
+ WARN_ON(0 == vinstr_ctx->suspend_cnt);
+ ret = 0;
+ }
+ break;
+
+ case VINSTR_DUMPING:
+ vinstr_ctx->state = VINSTR_SUSPENDING;
+ break;
+
+ case VINSTR_SUSPENDING:
+ /* fall through */
+ case VINSTR_RESUMING:
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ return ret;
+}
+
+void kbase_vinstr_suspend(struct kbase_vinstr_context *vinstr_ctx)
+{
+ wait_event(vinstr_ctx->suspend_waitq,
+ (0 == kbase_vinstr_try_suspend(vinstr_ctx)));
+}
+
+void kbase_vinstr_resume(struct kbase_vinstr_context *vinstr_ctx)
+{
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ BUG_ON(VINSTR_SUSPENDING == vinstr_ctx->state);
+ if (VINSTR_SUSPENDED == vinstr_ctx->state) {
+ BUG_ON(0 == vinstr_ctx->suspend_cnt);
+ vinstr_ctx->suspend_cnt--;
+ if (0 == vinstr_ctx->suspend_cnt) {
+ if (vinstr_ctx->clients_present) {
+ vinstr_ctx->state = VINSTR_RESUMING;
+ schedule_work(&vinstr_ctx->resume_work);
+ } else {
+ vinstr_ctx->state = VINSTR_IDLE;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+}
diff --git a/drivers/gpu/arm_gpu/mali_kbase_vinstr.h b/drivers/gpu/arm_gpu/mali_kbase_vinstr.h
new file mode 100644
index 000000000000..6207d25aef06
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_kbase_vinstr.h
@@ -0,0 +1,155 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_VINSTR_H_
+#define _KBASE_VINSTR_H_
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwcnt_reader.h>
+
+/*****************************************************************************/
+
+struct kbase_vinstr_context;
+struct kbase_vinstr_client;
+
+/*****************************************************************************/
+
+/**
+ * kbase_vinstr_init() - initialize the vinstr core
+ * @kbdev: kbase device
+ *
+ * Return: pointer to the vinstr context on success or NULL on failure
+ */
+struct kbase_vinstr_context *kbase_vinstr_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_vinstr_term() - terminate the vinstr core
+ * @vinstr_ctx: vinstr context
+ */
+void kbase_vinstr_term(struct kbase_vinstr_context *vinstr_ctx);
+
+/**
+ * kbase_vinstr_hwcnt_reader_setup - configure hw counters reader
+ * @vinstr_ctx: vinstr context
+ * @setup: reader's configuration
+ *
+ * Return: zero on success
+ */
+int kbase_vinstr_hwcnt_reader_setup(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_uk_hwcnt_reader_setup *setup);
+
+/**
+ * kbase_vinstr_legacy_hwc_setup - configure hw counters for dumping
+ * @vinstr_ctx: vinstr context
+ * @cli: pointer where to store pointer to new vinstr client structure
+ * @setup: hwc configuration
+ *
+ * Return: zero on success
+ */
+int kbase_vinstr_legacy_hwc_setup(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_vinstr_client **cli,
+ struct kbase_uk_hwcnt_setup *setup);
+
+/**
+ * kbase_vinstr_hwcnt_kernel_setup - configure hw counters for kernel side
+ * client
+ * @vinstr_ctx: vinstr context
+ * @setup: reader's configuration
+ * @kernel_buffer: pointer to dump buffer
+ *
+ * setup->buffer_count and setup->fd are not used for kernel side clients.
+ *
+ * Return: pointer to client structure, or NULL on failure
+ */
+struct kbase_vinstr_client *kbase_vinstr_hwcnt_kernel_setup(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_uk_hwcnt_reader_setup *setup,
+ void *kernel_buffer);
+
+/**
+ * kbase_vinstr_hwc_dump - issue counter dump for vinstr client
+ * @cli: pointer to vinstr client
+ * @event_id: id of event that triggered hwcnt dump
+ *
+ * Return: zero on success
+ */
+int kbase_vinstr_hwc_dump(
+ struct kbase_vinstr_client *cli,
+ enum base_hwcnt_reader_event event_id);
+
+/**
+ * kbase_vinstr_hwc_clear - performs a reset of the hardware counters for
+ * a given kbase context
+ * @cli: pointer to vinstr client
+ *
+ * Return: zero on success
+ */
+int kbase_vinstr_hwc_clear(struct kbase_vinstr_client *cli);
+
+/**
+ * kbase_vinstr_try_suspend - try suspending operation of a given vinstr context
+ * @vinstr_ctx: vinstr context
+ *
+ * Return: 0 on success, or negative if state change is in progress
+ *
+ * Warning: This API call is non-generic. It is meant to be used only by
+ * job scheduler state machine.
+ *
+ * Function initiates vinstr switch to suspended state. Once it was called
+ * vinstr enters suspending state. If function return non-zero value, it
+ * indicates that state switch is not complete and function must be called
+ * again. On state switch vinstr will trigger job scheduler state machine
+ * cycle.
+ */
+int kbase_vinstr_try_suspend(struct kbase_vinstr_context *vinstr_ctx);
+
+/**
+ * kbase_vinstr_suspend - suspends operation of a given vinstr context
+ * @vinstr_ctx: vinstr context
+ *
+ * Function initiates vinstr switch to suspended state. Then it blocks until
+ * operation is completed.
+ */
+void kbase_vinstr_suspend(struct kbase_vinstr_context *vinstr_ctx);
+
+/**
+ * kbase_vinstr_resume - resumes operation of a given vinstr context
+ * @vinstr_ctx: vinstr context
+ *
+ * Function can be called only if it was preceded by a successful call
+ * to kbase_vinstr_suspend.
+ */
+void kbase_vinstr_resume(struct kbase_vinstr_context *vinstr_ctx);
+
+/**
+ * kbase_vinstr_dump_size - Return required size of dump buffer
+ * @kbdev: device pointer
+ *
+ * Return : buffer size in bytes
+ */
+size_t kbase_vinstr_dump_size(struct kbase_device *kbdev);
+
+/**
+ * kbase_vinstr_detach_client - Detach a client from the vinstr core
+ * @cli: pointer to vinstr client
+ */
+void kbase_vinstr_detach_client(struct kbase_vinstr_client *cli);
+
+#endif /* _KBASE_VINSTR_H_ */
+
diff --git a/drivers/gpu/arm_gpu/mali_linux_kbase_trace.h b/drivers/gpu/arm_gpu/mali_linux_kbase_trace.h
new file mode 100644
index 000000000000..5d6b4021d626
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_linux_kbase_trace.h
@@ -0,0 +1,201 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+#if !defined(_TRACE_MALI_KBASE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MALI_KBASE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(mali_slot_template,
+ TP_PROTO(int jobslot, unsigned int info_val),
+ TP_ARGS(jobslot, info_val),
+ TP_STRUCT__entry(
+ __field(unsigned int, jobslot)
+ __field(unsigned int, info_val)
+ ),
+ TP_fast_assign(
+ __entry->jobslot = jobslot;
+ __entry->info_val = info_val;
+ ),
+ TP_printk("jobslot=%u info=%u", __entry->jobslot, __entry->info_val)
+);
+
+#define DEFINE_MALI_SLOT_EVENT(name) \
+DEFINE_EVENT(mali_slot_template, mali_##name, \
+ TP_PROTO(int jobslot, unsigned int info_val), \
+ TP_ARGS(jobslot, info_val))
+DEFINE_MALI_SLOT_EVENT(JM_SUBMIT);
+DEFINE_MALI_SLOT_EVENT(JM_JOB_DONE);
+DEFINE_MALI_SLOT_EVENT(JM_UPDATE_HEAD);
+DEFINE_MALI_SLOT_EVENT(JM_CHECK_HEAD);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_0);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_1);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_0);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_1);
+DEFINE_MALI_SLOT_EVENT(JM_SLOT_SOFT_OR_HARD_STOP);
+DEFINE_MALI_SLOT_EVENT(JM_SLOT_EVICT);
+DEFINE_MALI_SLOT_EVENT(JM_BEGIN_RESET_WORKER);
+DEFINE_MALI_SLOT_EVENT(JM_END_RESET_WORKER);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_SUBMIT_TO_BLOCKED);
+DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_CURRENT);
+DEFINE_MALI_SLOT_EVENT(JD_DONE_TRY_RUN_NEXT_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_CORES_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_INUSE_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_AFFINITY_WOULD_VIOLATE);
+DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_TRY_RUN_NEXT_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_RETRY_NEEDED);
+DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB_IRQ);
+#undef DEFINE_MALI_SLOT_EVENT
+
+DECLARE_EVENT_CLASS(mali_refcount_template,
+ TP_PROTO(int refcount, unsigned int info_val),
+ TP_ARGS(refcount, info_val),
+ TP_STRUCT__entry(
+ __field(unsigned int, refcount)
+ __field(unsigned int, info_val)
+ ),
+ TP_fast_assign(
+ __entry->refcount = refcount;
+ __entry->info_val = info_val;
+ ),
+ TP_printk("refcount=%u info=%u", __entry->refcount, __entry->info_val)
+);
+
+#define DEFINE_MALI_REFCOUNT_EVENT(name) \
+DEFINE_EVENT(mali_refcount_template, mali_##name, \
+ TP_PROTO(int refcount, unsigned int info_val), \
+ TP_ARGS(refcount, info_val))
+DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX_NOLOCK);
+DEFINE_MALI_REFCOUNT_EVENT(JS_ADD_JOB);
+DEFINE_MALI_REFCOUNT_EVENT(JS_REMOVE_JOB);
+DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_RELEASE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_TRY_SCHEDULE_HEAD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_INIT_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TERM_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_ENQUEUE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_DEQUEUE_HEAD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TRY_EVICT_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_ADD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_REMOVE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_FOREACH_CTX_JOBS);
+DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_ACTIVE);
+DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_IDLE);
+#undef DEFINE_MALI_REFCOUNT_EVENT
+
+DECLARE_EVENT_CLASS(mali_add_template,
+ TP_PROTO(int gpu_addr, unsigned int info_val),
+ TP_ARGS(gpu_addr, info_val),
+ TP_STRUCT__entry(
+ __field(unsigned int, gpu_addr)
+ __field(unsigned int, info_val)
+ ),
+ TP_fast_assign(
+ __entry->gpu_addr = gpu_addr;
+ __entry->info_val = info_val;
+ ),
+ TP_printk("gpu_addr=%u info=%u", __entry->gpu_addr, __entry->info_val)
+);
+
+#define DEFINE_MALI_ADD_EVENT(name) \
+DEFINE_EVENT(mali_add_template, mali_##name, \
+ TP_PROTO(int gpu_addr, unsigned int info_val), \
+ TP_ARGS(gpu_addr, info_val))
+DEFINE_MALI_ADD_EVENT(CORE_CTX_DESTROY);
+DEFINE_MALI_ADD_EVENT(CORE_CTX_HWINSTR_TERM);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_CLEAR);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_DONE);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_SOFT_RESET);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_HARD_RESET);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_SAMPLE);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_CLEAR);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_CLEAN_INV_CACHES);
+DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER);
+DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER_END);
+DEFINE_MALI_ADD_EVENT(JD_CANCEL_WORKER);
+DEFINE_MALI_ADD_EVENT(JD_DONE);
+DEFINE_MALI_ADD_EVENT(JD_CANCEL);
+DEFINE_MALI_ADD_EVENT(JD_ZAP_CONTEXT);
+DEFINE_MALI_ADD_EVENT(JM_IRQ);
+DEFINE_MALI_ADD_EVENT(JM_IRQ_END);
+DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS);
+DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS_DONE);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_NON_SCHEDULED);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_SCHEDULED);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_DONE);
+DEFINE_MALI_ADD_EVENT(JM_SUBMIT_AFTER_RESET);
+DEFINE_MALI_ADD_EVENT(JM_JOB_COMPLETE);
+DEFINE_MALI_ADD_EVENT(JS_FAST_START_EVICTS_CTX);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_RUNPOOL);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_RUNPOOL);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_CTX);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_CTX);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_END);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_START);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_ENQUEUE_JOB);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_DESIRED);
+DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERING_UP);
+DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERED_UP);
+DEFINE_MALI_ADD_EVENT(PM_PWRON);
+DEFINE_MALI_ADD_EVENT(PM_PWRON_TILER);
+DEFINE_MALI_ADD_EVENT(PM_PWRON_L2);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF_TILER);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF_L2);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_TILER);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_L2);
+DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED);
+DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED_TILER);
+DEFINE_MALI_ADD_EVENT(PM_UNREQUEST_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_REQUEST_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_REGISTER_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_REGISTER_CHANGE_SHADER_INUSE);
+DEFINE_MALI_ADD_EVENT(PM_RELEASE_CHANGE_SHADER_INUSE);
+DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE);
+DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE_TILER);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE_TILER);
+DEFINE_MALI_ADD_EVENT(PM_GPU_ON);
+DEFINE_MALI_ADD_EVENT(PM_GPU_OFF);
+DEFINE_MALI_ADD_EVENT(PM_SET_POLICY);
+DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_INIT);
+DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_TERM);
+DEFINE_MALI_ADD_EVENT(PM_CA_SET_POLICY);
+DEFINE_MALI_ADD_EVENT(PM_WAKE_WAITERS);
+#undef DEFINE_MALI_ADD_EVENT
+
+#endif /* _TRACE_MALI_KBASE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef linux
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mali_linux_kbase_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/arm_gpu/mali_linux_trace.h b/drivers/gpu/arm_gpu/mali_linux_trace.h
new file mode 100644
index 000000000000..2be06a552768
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_linux_trace.h
@@ -0,0 +1,189 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#if !defined(_TRACE_MALI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MALI_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+#define TRACE_INCLUDE_FILE mali_linux_trace
+
+#include <linux/tracepoint.h>
+
+#define MALI_JOB_SLOTS_EVENT_CHANGED
+
+/**
+ * mali_job_slots_event - called from mali_kbase_core_linux.c
+ * @event_id: ORed together bitfields representing a type of event, made with the GATOR_MAKE_EVENT() macro.
+ */
+TRACE_EVENT(mali_job_slots_event,
+ TP_PROTO(unsigned int event_id, unsigned int tgid, unsigned int pid,
+ unsigned char job_id),
+ TP_ARGS(event_id, tgid, pid, job_id),
+ TP_STRUCT__entry(
+ __field(unsigned int, event_id)
+ __field(unsigned int, tgid)
+ __field(unsigned int, pid)
+ __field(unsigned char, job_id)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->tgid = tgid;
+ __entry->pid = pid;
+ __entry->job_id = job_id;
+ ),
+ TP_printk("event=%u tgid=%u pid=%u job_id=%u",
+ __entry->event_id, __entry->tgid, __entry->pid, __entry->job_id)
+);
+
+/**
+ * mali_pm_status - Called by mali_kbase_pm_driver.c
+ * @event_id: core type (shader, tiler, l2 cache)
+ * @value: 64bits bitmask reporting either power status of the cores (1-ON, 0-OFF)
+ */
+TRACE_EVENT(mali_pm_status,
+ TP_PROTO(unsigned int event_id, unsigned long long value),
+ TP_ARGS(event_id, value),
+ TP_STRUCT__entry(
+ __field(unsigned int, event_id)
+ __field(unsigned long long, value)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->value = value;
+ ),
+ TP_printk("event %u = %llu", __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_pm_power_on - Called by mali_kbase_pm_driver.c
+ * @event_id: core type (shader, tiler, l2 cache)
+ * @value: 64bits bitmask reporting the cores to power up
+ */
+TRACE_EVENT(mali_pm_power_on,
+ TP_PROTO(unsigned int event_id, unsigned long long value),
+ TP_ARGS(event_id, value),
+ TP_STRUCT__entry(
+ __field(unsigned int, event_id)
+ __field(unsigned long long, value)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->value = value;
+ ),
+ TP_printk("event %u = %llu", __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_pm_power_off - Called by mali_kbase_pm_driver.c
+ * @event_id: core type (shader, tiler, l2 cache)
+ * @value: 64bits bitmask reporting the cores to power down
+ */
+TRACE_EVENT(mali_pm_power_off,
+ TP_PROTO(unsigned int event_id, unsigned long long value),
+ TP_ARGS(event_id, value),
+ TP_STRUCT__entry(
+ __field(unsigned int, event_id)
+ __field(unsigned long long, value)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->value = value;
+ ),
+ TP_printk("event %u = %llu", __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_page_fault_insert_pages - Called by page_fault_worker()
+ * it reports an MMU page fault resulting in new pages being mapped.
+ * @event_id: MMU address space number.
+ * @value: number of newly allocated pages
+ */
+TRACE_EVENT(mali_page_fault_insert_pages,
+ TP_PROTO(int event_id, unsigned long value),
+ TP_ARGS(event_id, value),
+ TP_STRUCT__entry(
+ __field(int, event_id)
+ __field(unsigned long, value)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->value = value;
+ ),
+ TP_printk("event %d = %lu", __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_mmu_as_in_use - Called by assign_and_activate_kctx_addr_space()
+ * it reports that a certain MMU address space is in use now.
+ * @event_id: MMU address space number.
+ */
+TRACE_EVENT(mali_mmu_as_in_use,
+ TP_PROTO(int event_id),
+ TP_ARGS(event_id),
+ TP_STRUCT__entry(
+ __field(int, event_id)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ ),
+ TP_printk("event=%d", __entry->event_id)
+);
+
+/**
+ * mali_mmu_as_released - Called by kbasep_js_runpool_release_ctx_internal()
+ * it reports that a certain MMU address space has been released now.
+ * @event_id: MMU address space number.
+ */
+TRACE_EVENT(mali_mmu_as_released,
+ TP_PROTO(int event_id),
+ TP_ARGS(event_id),
+ TP_STRUCT__entry(
+ __field(int, event_id)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ ),
+ TP_printk("event=%d", __entry->event_id)
+);
+
+/**
+ * mali_total_alloc_pages_change - Called by kbase_atomic_add_pages()
+ * and by kbase_atomic_sub_pages()
+ * it reports that the total number of allocated pages is changed.
+ * @event_id: number of pages to be added or subtracted (according to the sign).
+ */
+TRACE_EVENT(mali_total_alloc_pages_change,
+ TP_PROTO(long long int event_id),
+ TP_ARGS(event_id),
+ TP_STRUCT__entry(
+ __field(long long int, event_id)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ ),
+ TP_printk("event=%lld", __entry->event_id)
+);
+
+#endif /* _TRACE_MALI_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef linux
+#define TRACE_INCLUDE_PATH .
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/arm_gpu/mali_malisw.h b/drivers/gpu/arm_gpu/mali_malisw.h
new file mode 100644
index 000000000000..99452933eab4
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_malisw.h
@@ -0,0 +1,131 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Kernel-wide include for common macros and types.
+ */
+
+#ifndef _MALISW_H_
+#define _MALISW_H_
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+#define U8_MAX ((u8)~0U)
+#define S8_MAX ((s8)(U8_MAX>>1))
+#define S8_MIN ((s8)(-S8_MAX - 1))
+#define U16_MAX ((u16)~0U)
+#define S16_MAX ((s16)(U16_MAX>>1))
+#define S16_MIN ((s16)(-S16_MAX - 1))
+#define U32_MAX ((u32)~0U)
+#define S32_MAX ((s32)(U32_MAX>>1))
+#define S32_MIN ((s32)(-S32_MAX - 1))
+#define U64_MAX ((u64)~0ULL)
+#define S64_MAX ((s64)(U64_MAX>>1))
+#define S64_MIN ((s64)(-S64_MAX - 1))
+#endif /* LINUX_VERSION_CODE */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+#define SIZE_MAX (~(size_t)0)
+#endif /* LINUX_VERSION_CODE */
+
+/**
+ * MIN - Return the lesser of two values.
+ *
+ * As a macro it may evaluate its arguments more than once.
+ * Refer to MAX macro for more details
+ */
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+
+/**
+ * MAX - Return the greater of two values.
+ *
+ * As a macro it may evaluate its arguments more than once.
+ * If called on the same two arguments as MIN it is guaranteed to return
+ * the one that MIN didn't return. This is significant for types where not
+ * all values are comparable e.g. NaNs in floating-point types. But if you want
+ * to retrieve the min and max of two values, consider using a conditional swap
+ * instead.
+ */
+#define MAX(x, y) ((x) < (y) ? (y) : (x))
+
+/**
+ * @hideinitializer
+ * Function-like macro for suppressing unused variable warnings. Where possible
+ * such variables should be removed; this macro is present for cases where we
+ * much support API backwards compatibility.
+ */
+#define CSTD_UNUSED(x) ((void)(x))
+
+/**
+ * @hideinitializer
+ * Function-like macro for use where "no behavior" is desired. This is useful
+ * when compile time macros turn a function-like macro in to a no-op, but
+ * where having no statement is otherwise invalid.
+ */
+#define CSTD_NOP(...) ((void)#__VA_ARGS__)
+
+/**
+ * Function-like macro for converting a pointer in to a u64 for storing into
+ * an external data structure. This is commonly used when pairing a 32-bit
+ * CPU with a 64-bit peripheral, such as a Midgard GPU. C's type promotion
+ * is complex and a straight cast does not work reliably as pointers are
+ * often considered as signed.
+ */
+#define PTR_TO_U64(x) ((uint64_t)((uintptr_t)(x)))
+
+/**
+ * @hideinitializer
+ * Function-like macro for stringizing a single level macro.
+ * @code
+ * #define MY_MACRO 32
+ * CSTD_STR1( MY_MACRO )
+ * > "MY_MACRO"
+ * @endcode
+ */
+#define CSTD_STR1(x) #x
+
+/**
+ * @hideinitializer
+ * Function-like macro for stringizing a macro's value. This should not be used
+ * if the macro is defined in a way which may have no value; use the
+ * alternative @c CSTD_STR2N macro should be used instead.
+ * @code
+ * #define MY_MACRO 32
+ * CSTD_STR2( MY_MACRO )
+ * > "32"
+ * @endcode
+ */
+#define CSTD_STR2(x) CSTD_STR1(x)
+
+/**
+ * Specify an assertion value which is evaluated at compile time. Recommended
+ * usage is specification of a @c static @c INLINE function containing all of
+ * the assertions thus:
+ *
+ * @code
+ * static INLINE [module]_compile_time_assertions( void )
+ * {
+ * COMPILE_TIME_ASSERT( sizeof(uintptr_t) == sizeof(intptr_t) );
+ * }
+ * @endcode
+ *
+ * @note Use @c static not @c STATIC. We never want to turn off this @c static
+ * specification for testing purposes.
+ */
+#define CSTD_COMPILE_TIME_ASSERT(expr) \
+ do { switch (0) { case 0: case (expr):; } } while (false)
+
+#endif /* _MALISW_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_midg_coherency.h b/drivers/gpu/arm_gpu/mali_midg_coherency.h
new file mode 100644
index 000000000000..a509cbd5f175
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_midg_coherency.h
@@ -0,0 +1,26 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _MIDG_COHERENCY_H_
+#define _MIDG_COHERENCY_H_
+
+#define COHERENCY_ACE_LITE 0
+#define COHERENCY_ACE 1
+#define COHERENCY_NONE 31
+#define COHERENCY_FEATURE_BIT(x) (1 << (x))
+
+#endif /* _MIDG_COHERENCY_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_midg_regmap.h b/drivers/gpu/arm_gpu/mali_midg_regmap.h
new file mode 100644
index 000000000000..7d7b7bcd3cc3
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_midg_regmap.h
@@ -0,0 +1,611 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _MIDGARD_REGMAP_H_
+#define _MIDGARD_REGMAP_H_
+
+#include "mali_midg_coherency.h"
+#include "mali_kbase_gpu_id.h"
+
+/*
+ * Begin Register Offsets
+ */
+
+#define GPU_CONTROL_BASE 0x0000
+#define GPU_CONTROL_REG(r) (GPU_CONTROL_BASE + (r))
+#define GPU_ID 0x000 /* (RO) GPU and revision identifier */
+#define L2_FEATURES 0x004 /* (RO) Level 2 cache features */
+#define SUSPEND_SIZE 0x008 /* (RO) Fixed-function suspend buffer
+ size */
+#define TILER_FEATURES 0x00C /* (RO) Tiler Features */
+#define MEM_FEATURES 0x010 /* (RO) Memory system features */
+#define MMU_FEATURES 0x014 /* (RO) MMU features */
+#define AS_PRESENT 0x018 /* (RO) Address space slots present */
+#define JS_PRESENT 0x01C /* (RO) Job slots present */
+#define GPU_IRQ_RAWSTAT 0x020 /* (RW) */
+#define GPU_IRQ_CLEAR 0x024 /* (WO) */
+#define GPU_IRQ_MASK 0x028 /* (RW) */
+#define GPU_IRQ_STATUS 0x02C /* (RO) */
+
+/* IRQ flags */
+#define GPU_FAULT (1 << 0) /* A GPU Fault has occurred */
+#define MULTIPLE_GPU_FAULTS (1 << 7) /* More than one GPU Fault occurred. */
+#define RESET_COMPLETED (1 << 8) /* Set when a reset has completed. Intended to use with SOFT_RESET
+ commands which may take time. */
+#define POWER_CHANGED_SINGLE (1 << 9) /* Set when a single core has finished powering up or down. */
+#define POWER_CHANGED_ALL (1 << 10) /* Set when all cores have finished powering up or down
+ and the power manager is idle. */
+
+#define PRFCNT_SAMPLE_COMPLETED (1 << 16) /* Set when a performance count sample has completed. */
+#define CLEAN_CACHES_COMPLETED (1 << 17) /* Set when a cache clean operation has completed. */
+
+#define GPU_IRQ_REG_ALL (GPU_FAULT | MULTIPLE_GPU_FAULTS | RESET_COMPLETED \
+ | POWER_CHANGED_ALL | PRFCNT_SAMPLE_COMPLETED)
+
+#define GPU_COMMAND 0x030 /* (WO) */
+#define GPU_STATUS 0x034 /* (RO) */
+#define LATEST_FLUSH 0x038 /* (RO) */
+
+#define GROUPS_L2_COHERENT (1 << 0) /* Cores groups are l2 coherent */
+#define GPU_DBGEN (1 << 8) /* DBGEN wire status */
+
+#define GPU_FAULTSTATUS 0x03C /* (RO) GPU exception type and fault status */
+#define GPU_FAULTADDRESS_LO 0x040 /* (RO) GPU exception fault address, low word */
+#define GPU_FAULTADDRESS_HI 0x044 /* (RO) GPU exception fault address, high word */
+
+#define PWR_KEY 0x050 /* (WO) Power manager key register */
+#define PWR_OVERRIDE0 0x054 /* (RW) Power manager override settings */
+#define PWR_OVERRIDE1 0x058 /* (RW) Power manager override settings */
+
+#define PRFCNT_BASE_LO 0x060 /* (RW) Performance counter memory region base address, low word */
+#define PRFCNT_BASE_HI 0x064 /* (RW) Performance counter memory region base address, high word */
+#define PRFCNT_CONFIG 0x068 /* (RW) Performance counter configuration */
+#define PRFCNT_JM_EN 0x06C /* (RW) Performance counter enable flags for Job Manager */
+#define PRFCNT_SHADER_EN 0x070 /* (RW) Performance counter enable flags for shader cores */
+#define PRFCNT_TILER_EN 0x074 /* (RW) Performance counter enable flags for tiler */
+#define PRFCNT_MMU_L2_EN 0x07C /* (RW) Performance counter enable flags for MMU/L2 cache */
+
+#define CYCLE_COUNT_LO 0x090 /* (RO) Cycle counter, low word */
+#define CYCLE_COUNT_HI 0x094 /* (RO) Cycle counter, high word */
+#define TIMESTAMP_LO 0x098 /* (RO) Global time stamp counter, low word */
+#define TIMESTAMP_HI 0x09C /* (RO) Global time stamp counter, high word */
+
+#define THREAD_MAX_THREADS 0x0A0 /* (RO) Maximum number of threads per core */
+#define THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */
+#define THREAD_MAX_BARRIER_SIZE 0x0A8 /* (RO) Maximum threads waiting at a barrier */
+#define THREAD_FEATURES 0x0AC /* (RO) Thread features */
+
+#define TEXTURE_FEATURES_0 0x0B0 /* (RO) Support flags for indexed texture formats 0..31 */
+#define TEXTURE_FEATURES_1 0x0B4 /* (RO) Support flags for indexed texture formats 32..63 */
+#define TEXTURE_FEATURES_2 0x0B8 /* (RO) Support flags for indexed texture formats 64..95 */
+
+#define TEXTURE_FEATURES_REG(n) GPU_CONTROL_REG(TEXTURE_FEATURES_0 + ((n) << 2))
+
+#define JS0_FEATURES 0x0C0 /* (RO) Features of job slot 0 */
+#define JS1_FEATURES 0x0C4 /* (RO) Features of job slot 1 */
+#define JS2_FEATURES 0x0C8 /* (RO) Features of job slot 2 */
+#define JS3_FEATURES 0x0CC /* (RO) Features of job slot 3 */
+#define JS4_FEATURES 0x0D0 /* (RO) Features of job slot 4 */
+#define JS5_FEATURES 0x0D4 /* (RO) Features of job slot 5 */
+#define JS6_FEATURES 0x0D8 /* (RO) Features of job slot 6 */
+#define JS7_FEATURES 0x0DC /* (RO) Features of job slot 7 */
+#define JS8_FEATURES 0x0E0 /* (RO) Features of job slot 8 */
+#define JS9_FEATURES 0x0E4 /* (RO) Features of job slot 9 */
+#define JS10_FEATURES 0x0E8 /* (RO) Features of job slot 10 */
+#define JS11_FEATURES 0x0EC /* (RO) Features of job slot 11 */
+#define JS12_FEATURES 0x0F0 /* (RO) Features of job slot 12 */
+#define JS13_FEATURES 0x0F4 /* (RO) Features of job slot 13 */
+#define JS14_FEATURES 0x0F8 /* (RO) Features of job slot 14 */
+#define JS15_FEATURES 0x0FC /* (RO) Features of job slot 15 */
+
+#define JS_FEATURES_REG(n) GPU_CONTROL_REG(JS0_FEATURES + ((n) << 2))
+
+#define SHADER_PRESENT_LO 0x100 /* (RO) Shader core present bitmap, low word */
+#define SHADER_PRESENT_HI 0x104 /* (RO) Shader core present bitmap, high word */
+
+#define TILER_PRESENT_LO 0x110 /* (RO) Tiler core present bitmap, low word */
+#define TILER_PRESENT_HI 0x114 /* (RO) Tiler core present bitmap, high word */
+
+#define L2_PRESENT_LO 0x120 /* (RO) Level 2 cache present bitmap, low word */
+#define L2_PRESENT_HI 0x124 /* (RO) Level 2 cache present bitmap, high word */
+
+#define STACK_PRESENT_LO 0xE00 /* (RO) Core stack present bitmap, low word */
+#define STACK_PRESENT_HI 0xE04 /* (RO) Core stack present bitmap, high word */
+
+
+#define SHADER_READY_LO 0x140 /* (RO) Shader core ready bitmap, low word */
+#define SHADER_READY_HI 0x144 /* (RO) Shader core ready bitmap, high word */
+
+#define TILER_READY_LO 0x150 /* (RO) Tiler core ready bitmap, low word */
+#define TILER_READY_HI 0x154 /* (RO) Tiler core ready bitmap, high word */
+
+#define L2_READY_LO 0x160 /* (RO) Level 2 cache ready bitmap, low word */
+#define L2_READY_HI 0x164 /* (RO) Level 2 cache ready bitmap, high word */
+
+#define STACK_READY_LO 0xE10 /* (RO) Core stack ready bitmap, low word */
+#define STACK_READY_HI 0xE14 /* (RO) Core stack ready bitmap, high word */
+
+
+#define SHADER_PWRON_LO 0x180 /* (WO) Shader core power on bitmap, low word */
+#define SHADER_PWRON_HI 0x184 /* (WO) Shader core power on bitmap, high word */
+
+#define TILER_PWRON_LO 0x190 /* (WO) Tiler core power on bitmap, low word */
+#define TILER_PWRON_HI 0x194 /* (WO) Tiler core power on bitmap, high word */
+
+#define L2_PWRON_LO 0x1A0 /* (WO) Level 2 cache power on bitmap, low word */
+#define L2_PWRON_HI 0x1A4 /* (WO) Level 2 cache power on bitmap, high word */
+
+#define STACK_PWRON_LO 0xE20 /* (RO) Core stack power on bitmap, low word */
+#define STACK_PWRON_HI 0xE24 /* (RO) Core stack power on bitmap, high word */
+
+
+#define SHADER_PWROFF_LO 0x1C0 /* (WO) Shader core power off bitmap, low word */
+#define SHADER_PWROFF_HI 0x1C4 /* (WO) Shader core power off bitmap, high word */
+
+#define TILER_PWROFF_LO 0x1D0 /* (WO) Tiler core power off bitmap, low word */
+#define TILER_PWROFF_HI 0x1D4 /* (WO) Tiler core power off bitmap, high word */
+
+#define L2_PWROFF_LO 0x1E0 /* (WO) Level 2 cache power off bitmap, low word */
+#define L2_PWROFF_HI 0x1E4 /* (WO) Level 2 cache power off bitmap, high word */
+
+#define STACK_PWROFF_LO 0xE30 /* (RO) Core stack power off bitmap, low word */
+#define STACK_PRWOFF_HI 0xE34 /* (RO) Core stack power off bitmap, high word */
+
+
+#define SHADER_PWRTRANS_LO 0x200 /* (RO) Shader core power transition bitmap, low word */
+#define SHADER_PWRTRANS_HI 0x204 /* (RO) Shader core power transition bitmap, high word */
+
+#define TILER_PWRTRANS_LO 0x210 /* (RO) Tiler core power transition bitmap, low word */
+#define TILER_PWRTRANS_HI 0x214 /* (RO) Tiler core power transition bitmap, high word */
+
+#define L2_PWRTRANS_LO 0x220 /* (RO) Level 2 cache power transition bitmap, low word */
+#define L2_PWRTRANS_HI 0x224 /* (RO) Level 2 cache power transition bitmap, high word */
+
+#define STACK_PWRTRANS_LO 0xE40 /* (RO) Core stack power transition bitmap, low word */
+#define STACK_PRWTRANS_HI 0xE44 /* (RO) Core stack power transition bitmap, high word */
+
+
+#define SHADER_PWRACTIVE_LO 0x240 /* (RO) Shader core active bitmap, low word */
+#define SHADER_PWRACTIVE_HI 0x244 /* (RO) Shader core active bitmap, high word */
+
+#define TILER_PWRACTIVE_LO 0x250 /* (RO) Tiler core active bitmap, low word */
+#define TILER_PWRACTIVE_HI 0x254 /* (RO) Tiler core active bitmap, high word */
+
+#define L2_PWRACTIVE_LO 0x260 /* (RO) Level 2 cache active bitmap, low word */
+#define L2_PWRACTIVE_HI 0x264 /* (RO) Level 2 cache active bitmap, high word */
+
+#define COHERENCY_FEATURES 0x300 /* (RO) Coherency features present */
+#define COHERENCY_ENABLE 0x304 /* (RW) Coherency enable */
+
+#define JM_CONFIG 0xF00 /* (RW) Job Manager configuration register (Implementation specific register) */
+#define SHADER_CONFIG 0xF04 /* (RW) Shader core configuration settings (Implementation specific register) */
+#define TILER_CONFIG 0xF08 /* (RW) Tiler core configuration settings (Implementation specific register) */
+#define L2_MMU_CONFIG 0xF0C /* (RW) Configuration of the L2 cache and MMU (Implementation specific register) */
+
+#define JOB_CONTROL_BASE 0x1000
+
+#define JOB_CONTROL_REG(r) (JOB_CONTROL_BASE + (r))
+
+#define JOB_IRQ_RAWSTAT 0x000 /* Raw interrupt status register */
+#define JOB_IRQ_CLEAR 0x004 /* Interrupt clear register */
+#define JOB_IRQ_MASK 0x008 /* Interrupt mask register */
+#define JOB_IRQ_STATUS 0x00C /* Interrupt status register */
+#define JOB_IRQ_JS_STATE 0x010 /* status==active and _next == busy snapshot from last JOB_IRQ_CLEAR */
+#define JOB_IRQ_THROTTLE 0x014 /* cycles to delay delivering an interrupt externally. The JOB_IRQ_STATUS is NOT affected by this, just the delivery of the interrupt. */
+
+#define JOB_SLOT0 0x800 /* Configuration registers for job slot 0 */
+#define JOB_SLOT1 0x880 /* Configuration registers for job slot 1 */
+#define JOB_SLOT2 0x900 /* Configuration registers for job slot 2 */
+#define JOB_SLOT3 0x980 /* Configuration registers for job slot 3 */
+#define JOB_SLOT4 0xA00 /* Configuration registers for job slot 4 */
+#define JOB_SLOT5 0xA80 /* Configuration registers for job slot 5 */
+#define JOB_SLOT6 0xB00 /* Configuration registers for job slot 6 */
+#define JOB_SLOT7 0xB80 /* Configuration registers for job slot 7 */
+#define JOB_SLOT8 0xC00 /* Configuration registers for job slot 8 */
+#define JOB_SLOT9 0xC80 /* Configuration registers for job slot 9 */
+#define JOB_SLOT10 0xD00 /* Configuration registers for job slot 10 */
+#define JOB_SLOT11 0xD80 /* Configuration registers for job slot 11 */
+#define JOB_SLOT12 0xE00 /* Configuration registers for job slot 12 */
+#define JOB_SLOT13 0xE80 /* Configuration registers for job slot 13 */
+#define JOB_SLOT14 0xF00 /* Configuration registers for job slot 14 */
+#define JOB_SLOT15 0xF80 /* Configuration registers for job slot 15 */
+
+#define JOB_SLOT_REG(n, r) (JOB_CONTROL_REG(JOB_SLOT0 + ((n) << 7)) + (r))
+
+#define JS_HEAD_LO 0x00 /* (RO) Job queue head pointer for job slot n, low word */
+#define JS_HEAD_HI 0x04 /* (RO) Job queue head pointer for job slot n, high word */
+#define JS_TAIL_LO 0x08 /* (RO) Job queue tail pointer for job slot n, low word */
+#define JS_TAIL_HI 0x0C /* (RO) Job queue tail pointer for job slot n, high word */
+#define JS_AFFINITY_LO 0x10 /* (RO) Core affinity mask for job slot n, low word */
+#define JS_AFFINITY_HI 0x14 /* (RO) Core affinity mask for job slot n, high word */
+#define JS_CONFIG 0x18 /* (RO) Configuration settings for job slot n */
+#define JS_XAFFINITY 0x1C /* (RO) Extended affinity mask for job
+ slot n */
+
+#define JS_COMMAND 0x20 /* (WO) Command register for job slot n */
+#define JS_STATUS 0x24 /* (RO) Status register for job slot n */
+
+#define JS_HEAD_NEXT_LO 0x40 /* (RW) Next job queue head pointer for job slot n, low word */
+#define JS_HEAD_NEXT_HI 0x44 /* (RW) Next job queue head pointer for job slot n, high word */
+
+#define JS_AFFINITY_NEXT_LO 0x50 /* (RW) Next core affinity mask for job slot n, low word */
+#define JS_AFFINITY_NEXT_HI 0x54 /* (RW) Next core affinity mask for job slot n, high word */
+#define JS_CONFIG_NEXT 0x58 /* (RW) Next configuration settings for job slot n */
+#define JS_XAFFINITY_NEXT 0x5C /* (RW) Next extended affinity mask for
+ job slot n */
+
+#define JS_COMMAND_NEXT 0x60 /* (RW) Next command register for job slot n */
+
+#define JS_FLUSH_ID_NEXT 0x70 /* (RW) Next job slot n cache flush ID */
+
+#define MEMORY_MANAGEMENT_BASE 0x2000
+#define MMU_REG(r) (MEMORY_MANAGEMENT_BASE + (r))
+
+#define MMU_IRQ_RAWSTAT 0x000 /* (RW) Raw interrupt status register */
+#define MMU_IRQ_CLEAR 0x004 /* (WO) Interrupt clear register */
+#define MMU_IRQ_MASK 0x008 /* (RW) Interrupt mask register */
+#define MMU_IRQ_STATUS 0x00C /* (RO) Interrupt status register */
+
+#define MMU_AS0 0x400 /* Configuration registers for address space 0 */
+#define MMU_AS1 0x440 /* Configuration registers for address space 1 */
+#define MMU_AS2 0x480 /* Configuration registers for address space 2 */
+#define MMU_AS3 0x4C0 /* Configuration registers for address space 3 */
+#define MMU_AS4 0x500 /* Configuration registers for address space 4 */
+#define MMU_AS5 0x540 /* Configuration registers for address space 5 */
+#define MMU_AS6 0x580 /* Configuration registers for address space 6 */
+#define MMU_AS7 0x5C0 /* Configuration registers for address space 7 */
+#define MMU_AS8 0x600 /* Configuration registers for address space 8 */
+#define MMU_AS9 0x640 /* Configuration registers for address space 9 */
+#define MMU_AS10 0x680 /* Configuration registers for address space 10 */
+#define MMU_AS11 0x6C0 /* Configuration registers for address space 11 */
+#define MMU_AS12 0x700 /* Configuration registers for address space 12 */
+#define MMU_AS13 0x740 /* Configuration registers for address space 13 */
+#define MMU_AS14 0x780 /* Configuration registers for address space 14 */
+#define MMU_AS15 0x7C0 /* Configuration registers for address space 15 */
+
+#define MMU_AS_REG(n, r) (MMU_REG(MMU_AS0 + ((n) << 6)) + (r))
+
+#define AS_TRANSTAB_LO 0x00 /* (RW) Translation Table Base Address for address space n, low word */
+#define AS_TRANSTAB_HI 0x04 /* (RW) Translation Table Base Address for address space n, high word */
+#define AS_MEMATTR_LO 0x08 /* (RW) Memory attributes for address space n, low word. */
+#define AS_MEMATTR_HI 0x0C /* (RW) Memory attributes for address space n, high word. */
+#define AS_LOCKADDR_LO 0x10 /* (RW) Lock region address for address space n, low word */
+#define AS_LOCKADDR_HI 0x14 /* (RW) Lock region address for address space n, high word */
+#define AS_COMMAND 0x18 /* (WO) MMU command register for address space n */
+#define AS_FAULTSTATUS 0x1C /* (RO) MMU fault status register for address space n */
+#define AS_FAULTADDRESS_LO 0x20 /* (RO) Fault Address for address space n, low word */
+#define AS_FAULTADDRESS_HI 0x24 /* (RO) Fault Address for address space n, high word */
+#define AS_STATUS 0x28 /* (RO) Status flags for address space n */
+
+
+/* (RW) Translation table configuration for address space n, low word */
+#define AS_TRANSCFG_LO 0x30
+/* (RW) Translation table configuration for address space n, high word */
+#define AS_TRANSCFG_HI 0x34
+/* (RO) Secondary fault address for address space n, low word */
+#define AS_FAULTEXTRA_LO 0x38
+/* (RO) Secondary fault address for address space n, high word */
+#define AS_FAULTEXTRA_HI 0x3C
+
+/* End Register Offsets */
+
+/*
+ * MMU_IRQ_RAWSTAT register values. Values are valid also for
+ MMU_IRQ_CLEAR, MMU_IRQ_MASK, MMU_IRQ_STATUS registers.
+ */
+
+#define MMU_PAGE_FAULT_FLAGS 16
+
+/* Macros returning a bitmask to retrieve page fault or bus error flags from
+ * MMU registers */
+#define MMU_PAGE_FAULT(n) (1UL << (n))
+#define MMU_BUS_ERROR(n) (1UL << ((n) + MMU_PAGE_FAULT_FLAGS))
+
+/*
+ * Begin LPAE MMU TRANSTAB register values
+ */
+#define AS_TRANSTAB_LPAE_ADDR_SPACE_MASK 0xfffff000
+#define AS_TRANSTAB_LPAE_ADRMODE_UNMAPPED (0u << 0)
+#define AS_TRANSTAB_LPAE_ADRMODE_IDENTITY (1u << 1)
+#define AS_TRANSTAB_LPAE_ADRMODE_TABLE (3u << 0)
+#define AS_TRANSTAB_LPAE_READ_INNER (1u << 2)
+#define AS_TRANSTAB_LPAE_SHARE_OUTER (1u << 4)
+
+#define AS_TRANSTAB_LPAE_ADRMODE_MASK 0x00000003
+
+/*
+ * Begin AARCH64 MMU TRANSTAB register values
+ */
+#define MMU_HW_OUTA_BITS 40
+#define AS_TRANSTAB_BASE_MASK ((1ULL << MMU_HW_OUTA_BITS) - (1ULL << 4))
+
+/*
+ * Begin MMU STATUS register values
+ */
+#define AS_STATUS_AS_ACTIVE 0x01
+
+#define AS_FAULTSTATUS_EXCEPTION_CODE_MASK (0x7<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT (0x0<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT (0x1<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT (0x2<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG (0x3<<3)
+
+#define AS_FAULTSTATUS_EXCEPTION_CODE_ADDRESS_SIZE_FAULT (0x4<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_MEMORY_ATTRIBUTES_FAULT (0x5<<3)
+
+#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC (0x0<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3<<8)
+
+/*
+ * Begin MMU TRANSCFG register values
+ */
+
+#define AS_TRANSCFG_ADRMODE_LEGACY 0
+#define AS_TRANSCFG_ADRMODE_UNMAPPED 1
+#define AS_TRANSCFG_ADRMODE_IDENTITY 2
+#define AS_TRANSCFG_ADRMODE_AARCH64_4K 6
+#define AS_TRANSCFG_ADRMODE_AARCH64_64K 8
+
+#define AS_TRANSCFG_ADRMODE_MASK 0xF
+
+
+/*
+ * Begin TRANSCFG register values
+ */
+#define AS_TRANSCFG_PTW_MEMATTR_MASK (3 << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_NON_CACHEABLE (1 << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK (2 << 24)
+
+#define AS_TRANSCFG_PTW_SH_MASK ((3 << 28))
+#define AS_TRANSCFG_PTW_SH_OS (2 << 28)
+#define AS_TRANSCFG_PTW_SH_IS (3 << 28)
+
+/*
+ * Begin Command Values
+ */
+
+/* JS_COMMAND register commands */
+#define JS_COMMAND_NOP 0x00 /* NOP Operation. Writing this value is ignored */
+#define JS_COMMAND_START 0x01 /* Start processing a job chain. Writing this value is ignored */
+#define JS_COMMAND_SOFT_STOP 0x02 /* Gently stop processing a job chain */
+#define JS_COMMAND_HARD_STOP 0x03 /* Rudely stop processing a job chain */
+#define JS_COMMAND_SOFT_STOP_0 0x04 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_HARD_STOP_0 0x05 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_SOFT_STOP_1 0x06 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 1 */
+#define JS_COMMAND_HARD_STOP_1 0x07 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 1 */
+
+#define JS_COMMAND_MASK 0x07 /* Mask of bits currently in use by the HW */
+
+/* AS_COMMAND register commands */
+#define AS_COMMAND_NOP 0x00 /* NOP Operation */
+#define AS_COMMAND_UPDATE 0x01 /* Broadcasts the values in AS_TRANSTAB and ASn_MEMATTR to all MMUs */
+#define AS_COMMAND_LOCK 0x02 /* Issue a lock region command to all MMUs */
+#define AS_COMMAND_UNLOCK 0x03 /* Issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs
+ (deprecated - only for use with T60x) */
+#define AS_COMMAND_FLUSH_PT 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH_MEM 0x05 /* Wait for memory accesses to complete, flush all the L1s cache then
+ flush all L2 caches then issue a flush region command to all MMUs */
+
+/* Possible values of JS_CONFIG and JS_CONFIG_NEXT registers */
+#define JS_CONFIG_START_FLUSH_NO_ACTION (0u << 0)
+#define JS_CONFIG_START_FLUSH_CLEAN (1u << 8)
+#define JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE (3u << 8)
+#define JS_CONFIG_START_MMU (1u << 10)
+#define JS_CONFIG_JOB_CHAIN_FLAG (1u << 11)
+#define JS_CONFIG_END_FLUSH_NO_ACTION JS_CONFIG_START_FLUSH_NO_ACTION
+#define JS_CONFIG_END_FLUSH_CLEAN (1u << 12)
+#define JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE (3u << 12)
+#define JS_CONFIG_ENABLE_FLUSH_REDUCTION (1u << 14)
+#define JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK (1u << 15)
+#define JS_CONFIG_THREAD_PRI(n) ((n) << 16)
+
+/* JS_XAFFINITY register values */
+#define JS_XAFFINITY_XAFFINITY_ENABLE (1u << 0)
+#define JS_XAFFINITY_TILER_ENABLE (1u << 8)
+#define JS_XAFFINITY_CACHE_ENABLE (1u << 16)
+
+/* JS_STATUS register values */
+
+/* NOTE: Please keep this values in sync with enum base_jd_event_code in mali_base_kernel.h.
+ * The values are separated to avoid dependency of userspace and kernel code.
+ */
+
+/* Group of values representing the job status insead a particular fault */
+#define JS_STATUS_NO_EXCEPTION_BASE 0x00
+#define JS_STATUS_INTERRUPTED (JS_STATUS_NO_EXCEPTION_BASE + 0x02) /* 0x02 means INTERRUPTED */
+#define JS_STATUS_STOPPED (JS_STATUS_NO_EXCEPTION_BASE + 0x03) /* 0x03 means STOPPED */
+#define JS_STATUS_TERMINATED (JS_STATUS_NO_EXCEPTION_BASE + 0x04) /* 0x04 means TERMINATED */
+
+/* General fault values */
+#define JS_STATUS_FAULT_BASE 0x40
+#define JS_STATUS_CONFIG_FAULT (JS_STATUS_FAULT_BASE) /* 0x40 means CONFIG FAULT */
+#define JS_STATUS_POWER_FAULT (JS_STATUS_FAULT_BASE + 0x01) /* 0x41 means POWER FAULT */
+#define JS_STATUS_READ_FAULT (JS_STATUS_FAULT_BASE + 0x02) /* 0x42 means READ FAULT */
+#define JS_STATUS_WRITE_FAULT (JS_STATUS_FAULT_BASE + 0x03) /* 0x43 means WRITE FAULT */
+#define JS_STATUS_AFFINITY_FAULT (JS_STATUS_FAULT_BASE + 0x04) /* 0x44 means AFFINITY FAULT */
+#define JS_STATUS_BUS_FAULT (JS_STATUS_FAULT_BASE + 0x08) /* 0x48 means BUS FAULT */
+
+/* Instruction or data faults */
+#define JS_STATUS_INSTRUCTION_FAULT_BASE 0x50
+#define JS_STATUS_INSTR_INVALID_PC (JS_STATUS_INSTRUCTION_FAULT_BASE) /* 0x50 means INSTR INVALID PC */
+#define JS_STATUS_INSTR_INVALID_ENC (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x01) /* 0x51 means INSTR INVALID ENC */
+#define JS_STATUS_INSTR_TYPE_MISMATCH (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x02) /* 0x52 means INSTR TYPE MISMATCH */
+#define JS_STATUS_INSTR_OPERAND_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x03) /* 0x53 means INSTR OPERAND FAULT */
+#define JS_STATUS_INSTR_TLS_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x04) /* 0x54 means INSTR TLS FAULT */
+#define JS_STATUS_INSTR_BARRIER_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x05) /* 0x55 means INSTR BARRIER FAULT */
+#define JS_STATUS_INSTR_ALIGN_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x06) /* 0x56 means INSTR ALIGN FAULT */
+/* NOTE: No fault with 0x57 code defined in spec. */
+#define JS_STATUS_DATA_INVALID_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x08) /* 0x58 means DATA INVALID FAULT */
+#define JS_STATUS_TILE_RANGE_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x09) /* 0x59 means TILE RANGE FAULT */
+#define JS_STATUS_ADDRESS_RANGE_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x0A) /* 0x5A means ADDRESS RANGE FAULT */
+
+/* Other faults */
+#define JS_STATUS_MEMORY_FAULT_BASE 0x60
+#define JS_STATUS_OUT_OF_MEMORY (JS_STATUS_MEMORY_FAULT_BASE) /* 0x60 means OUT OF MEMORY */
+#define JS_STATUS_UNKNOWN 0x7F /* 0x7F means UNKNOWN */
+
+/* GPU_COMMAND values */
+#define GPU_COMMAND_NOP 0x00 /* No operation, nothing happens */
+#define GPU_COMMAND_SOFT_RESET 0x01 /* Stop all external bus interfaces, and then reset the entire GPU. */
+#define GPU_COMMAND_HARD_RESET 0x02 /* Immediately reset the entire GPU. */
+#define GPU_COMMAND_PRFCNT_CLEAR 0x03 /* Clear all performance counters, setting them all to zero. */
+#define GPU_COMMAND_PRFCNT_SAMPLE 0x04 /* Sample all performance counters, writing them out to memory */
+#define GPU_COMMAND_CYCLE_COUNT_START 0x05 /* Starts the cycle counter, and system timestamp propagation */
+#define GPU_COMMAND_CYCLE_COUNT_STOP 0x06 /* Stops the cycle counter, and system timestamp propagation */
+#define GPU_COMMAND_CLEAN_CACHES 0x07 /* Clean all caches */
+#define GPU_COMMAND_CLEAN_INV_CACHES 0x08 /* Clean and invalidate all caches */
+#define GPU_COMMAND_SET_PROTECTED_MODE 0x09 /* Places the GPU in protected mode */
+
+/* End Command Values */
+
+/* GPU_STATUS values */
+#define GPU_STATUS_PRFCNT_ACTIVE (1 << 2) /* Set if the performance counters are active. */
+#define GPU_STATUS_PROTECTED_MODE_ACTIVE (1 << 7) /* Set if protected mode is active */
+
+/* PRFCNT_CONFIG register values */
+#define PRFCNT_CONFIG_MODE_SHIFT 0 /* Counter mode position. */
+#define PRFCNT_CONFIG_AS_SHIFT 4 /* Address space bitmap position. */
+#define PRFCNT_CONFIG_SETSELECT_SHIFT 8 /* Set select position. */
+
+#define PRFCNT_CONFIG_MODE_OFF 0 /* The performance counters are disabled. */
+#define PRFCNT_CONFIG_MODE_MANUAL 1 /* The performance counters are enabled, but are only written out when a PRFCNT_SAMPLE command is issued using the GPU_COMMAND register. */
+#define PRFCNT_CONFIG_MODE_TILE 2 /* The performance counters are enabled, and are written out each time a tile finishes rendering. */
+
+/* AS<n>_MEMATTR values: */
+/* Use GPU implementation-defined caching policy. */
+#define AS_MEMATTR_IMPL_DEF_CACHE_POLICY 0x88ull
+/* The attribute set to force all resources to be cached. */
+#define AS_MEMATTR_FORCE_TO_CACHE_ALL 0x8Full
+/* Inner write-alloc cache setup, no outer caching */
+#define AS_MEMATTR_WRITE_ALLOC 0x8Dull
+
+/* Set to implementation defined, outer caching */
+#define AS_MEMATTR_AARCH64_OUTER_IMPL_DEF 0x88ull
+/* Set to write back memory, outer caching */
+#define AS_MEMATTR_AARCH64_OUTER_WA 0x8Dull
+
+/* Use GPU implementation-defined caching policy. */
+#define AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY 0x48ull
+/* The attribute set to force all resources to be cached. */
+#define AS_MEMATTR_LPAE_FORCE_TO_CACHE_ALL 0x4Full
+/* Inner write-alloc cache setup, no outer caching */
+#define AS_MEMATTR_LPAE_WRITE_ALLOC 0x4Dull
+/* Set to implementation defined, outer caching */
+#define AS_MEMATTR_LPAE_OUTER_IMPL_DEF 0x88ull
+/* Set to write back memory, outer caching */
+#define AS_MEMATTR_LPAE_OUTER_WA 0x8Dull
+
+/* Symbol for default MEMATTR to use */
+
+/* Default is - HW implementation defined caching */
+#define AS_MEMATTR_INDEX_DEFAULT 0
+#define AS_MEMATTR_INDEX_DEFAULT_ACE 3
+
+/* HW implementation defined caching */
+#define AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY 0
+/* Force cache on */
+#define AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL 1
+/* Write-alloc */
+#define AS_MEMATTR_INDEX_WRITE_ALLOC 2
+/* Outer coherent, inner implementation defined policy */
+#define AS_MEMATTR_INDEX_OUTER_IMPL_DEF 3
+/* Outer coherent, write alloc inner */
+#define AS_MEMATTR_INDEX_OUTER_WA 4
+
+/* JS<n>_FEATURES register */
+
+#define JS_FEATURE_NULL_JOB (1u << 1)
+#define JS_FEATURE_SET_VALUE_JOB (1u << 2)
+#define JS_FEATURE_CACHE_FLUSH_JOB (1u << 3)
+#define JS_FEATURE_COMPUTE_JOB (1u << 4)
+#define JS_FEATURE_VERTEX_JOB (1u << 5)
+#define JS_FEATURE_GEOMETRY_JOB (1u << 6)
+#define JS_FEATURE_TILER_JOB (1u << 7)
+#define JS_FEATURE_FUSED_JOB (1u << 8)
+#define JS_FEATURE_FRAGMENT_JOB (1u << 9)
+
+/* End JS<n>_FEATURES register */
+
+/* L2_MMU_CONFIG register */
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT (23)
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY (0x1 << L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT (24)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT (26)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+/* End L2_MMU_CONFIG register */
+
+/* THREAD_* registers */
+
+/* THREAD_FEATURES IMPLEMENTATION_TECHNOLOGY values */
+#define IMPLEMENTATION_UNSPECIFIED 0
+#define IMPLEMENTATION_SILICON 1
+#define IMPLEMENTATION_FPGA 2
+#define IMPLEMENTATION_MODEL 3
+
+/* Default values when registers are not supported by the implemented hardware */
+#define THREAD_MT_DEFAULT 256
+#define THREAD_MWS_DEFAULT 256
+#define THREAD_MBS_DEFAULT 256
+#define THREAD_MR_DEFAULT 1024
+#define THREAD_MTQ_DEFAULT 4
+#define THREAD_MTGS_DEFAULT 10
+
+/* End THREAD_* registers */
+
+/* SHADER_CONFIG register */
+
+#define SC_ALT_COUNTERS (1ul << 3)
+#define SC_OVERRIDE_FWD_PIXEL_KILL (1ul << 4)
+#define SC_SDC_DISABLE_OQ_DISCARD (1ul << 6)
+#define SC_LS_ALLOW_ATTR_TYPES (1ul << 16)
+#define SC_LS_PAUSEBUFFER_DISABLE (1ul << 16)
+#define SC_LS_ATTR_CHECK_DISABLE (1ul << 18)
+#define SC_ENABLE_TEXGRD_FLAGS (1ul << 25)
+/* End SHADER_CONFIG register */
+
+/* TILER_CONFIG register */
+
+#define TC_CLOCK_GATE_OVERRIDE (1ul << 0)
+
+/* End TILER_CONFIG register */
+
+/* JM_CONFIG register */
+
+#define JM_TIMESTAMP_OVERRIDE (1ul << 0)
+#define JM_CLOCK_GATE_OVERRIDE (1ul << 1)
+#define JM_JOB_THROTTLE_ENABLE (1ul << 2)
+#define JM_JOB_THROTTLE_LIMIT_SHIFT (3)
+#define JM_MAX_JOB_THROTTLE_LIMIT (0x3F)
+#define JM_FORCE_COHERENCY_FEATURES_SHIFT (2)
+#define JM_IDVS_GROUP_SIZE_SHIFT (16)
+#define JM_MAX_IDVS_GROUP_SIZE (0x3F)
+/* End JM_CONFIG register */
+
+
+#endif /* _MIDGARD_REGMAP_H_ */
diff --git a/drivers/gpu/arm_gpu/mali_timeline.h b/drivers/gpu/arm_gpu/mali_timeline.h
new file mode 100644
index 000000000000..bd5f6614b6bb
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_timeline.h
@@ -0,0 +1,396 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali_timeline
+
+#if !defined(_MALI_TIMELINE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _MALI_TIMELINE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(mali_timeline_atoms_in_flight,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int tgid,
+ int count),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ tgid,
+ count),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, tgid)
+ __field(int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->tgid = tgid;
+ __entry->count = count;
+ ),
+
+ TP_printk("%i,%i.%.9i,%i,%i", CTX_SET_NR_ATOMS_IN_FLIGHT,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->tgid,
+ __entry->count)
+);
+
+
+TRACE_EVENT(mali_timeline_atom,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int tgid,
+ int atom_id),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ tgid,
+ atom_id),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, tgid)
+ __field(int, atom_id)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->tgid = tgid;
+ __entry->atom_id = atom_id;
+ ),
+
+ TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->tgid,
+ __entry->atom_id,
+ __entry->atom_id)
+);
+
+TRACE_EVENT(mali_timeline_gpu_slot_active,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int tgid,
+ int js,
+ int count),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ tgid,
+ js,
+ count),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, tgid)
+ __field(int, js)
+ __field(int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->tgid = tgid;
+ __entry->js = js;
+ __entry->count = count;
+ ),
+
+ TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->tgid,
+ __entry->js,
+ __entry->count)
+);
+
+TRACE_EVENT(mali_timeline_gpu_slot_action,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int tgid,
+ int js,
+ int count),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ tgid,
+ js,
+ count),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, tgid)
+ __field(int, js)
+ __field(int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->tgid = tgid;
+ __entry->js = js;
+ __entry->count = count;
+ ),
+
+ TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->tgid,
+ __entry->js,
+ __entry->count)
+);
+
+TRACE_EVENT(mali_timeline_gpu_power_active,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int active),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ active),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, active)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->active = active;
+ ),
+
+ TP_printk("%i,%i.%.9i,0,%i", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->active)
+
+);
+
+TRACE_EVENT(mali_timeline_l2_power_active,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int state),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ state),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, state)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->state = state;
+ ),
+
+ TP_printk("%i,%i.%.9i,0,%i", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->state)
+
+);
+TRACE_EVENT(mali_timeline_pm_event,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int pm_event_type,
+ unsigned int pm_event_id),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ pm_event_type,
+ pm_event_id),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, pm_event_type)
+ __field(unsigned int, pm_event_id)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->pm_event_type = pm_event_type;
+ __entry->pm_event_id = pm_event_id;
+ ),
+
+ TP_printk("%i,%i.%.9i,0,%i,%u", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->pm_event_type, __entry->pm_event_id)
+
+);
+
+TRACE_EVENT(mali_timeline_slot_atom,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int tgid,
+ int js,
+ int atom_id),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ tgid,
+ js,
+ atom_id),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, tgid)
+ __field(int, js)
+ __field(int, atom_id)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->tgid = tgid;
+ __entry->js = js;
+ __entry->atom_id = atom_id;
+ ),
+
+ TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->tgid,
+ __entry->js,
+ __entry->atom_id)
+);
+
+TRACE_EVENT(mali_timeline_pm_checktrans,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int trans_code,
+ int trans_id),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ trans_code,
+ trans_id),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, trans_code)
+ __field(int, trans_id)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->trans_code = trans_code;
+ __entry->trans_id = trans_id;
+ ),
+
+ TP_printk("%i,%i.%.9i,0,%i", __entry->trans_code,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->trans_id)
+
+);
+
+TRACE_EVENT(mali_timeline_context_active,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int count),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ count),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->count = count;
+ ),
+
+ TP_printk("%i,%i.%.9i,0,%i", SW_SET_CONTEXT_ACTIVE,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->count)
+);
+
+#endif /* _MALI_TIMELINE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/drivers/gpu/arm_gpu/mali_uk.h b/drivers/gpu/arm_gpu/mali_uk.h
new file mode 100644
index 000000000000..841d03fb5873
--- /dev/null
+++ b/drivers/gpu/arm_gpu/mali_uk.h
@@ -0,0 +1,141 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_uk.h
+ * Types and definitions that are common across OSs for both the user
+ * and kernel side of the User-Kernel interface.
+ */
+
+#ifndef _UK_H_
+#define _UK_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @defgroup uk_api User-Kernel Interface API
+ *
+ * The User-Kernel Interface abstracts the communication mechanism between the user and kernel-side code of device
+ * drivers developed as part of the Midgard DDK. Currently that includes the Base driver and the UMP driver.
+ *
+ * It exposes an OS independent API to user-side code (UKU) which routes functions calls to an OS-independent
+ * kernel-side API (UKK) via an OS-specific communication mechanism.
+ *
+ * This API is internal to the Midgard DDK and is not exposed to any applications.
+ *
+ * @{
+ */
+
+/**
+ * These are identifiers for kernel-side drivers implementing a UK interface, aka UKK clients. The
+ * UK module maps this to an OS specific device name, e.g. "gpu_base" -> "GPU0:". Specify this
+ * identifier to select a UKK client to the uku_open() function.
+ *
+ * When a new UKK client driver is created a new identifier needs to be added to the uk_client_id
+ * enumeration and the uku_open() implemenation for the various OS ports need to be updated to
+ * provide a mapping of the identifier to the OS specific device name.
+ *
+ */
+enum uk_client_id {
+ /**
+ * Value used to identify the Base driver UK client.
+ */
+ UK_CLIENT_MALI_T600_BASE,
+
+ /** The number of uk clients supported. This must be the last member of the enum */
+ UK_CLIENT_COUNT
+};
+
+/**
+ * Each function callable through the UK interface has a unique number.
+ * Functions provided by UK clients start from number UK_FUNC_ID.
+ * Numbers below UK_FUNC_ID are used for internal UK functions.
+ */
+enum uk_func {
+ UKP_FUNC_ID_CHECK_VERSION, /**< UKK Core internal function */
+ /**
+ * Each UK client numbers the functions they provide starting from
+ * number UK_FUNC_ID. This number is then eventually assigned to the
+ * id field of the union uk_header structure when preparing to make a
+ * UK call. See your UK client for a list of their function numbers.
+ */
+ UK_FUNC_ID = 512
+};
+
+/**
+ * Arguments for a UK call are stored in a structure. This structure consists
+ * of a fixed size header and a payload. The header carries a 32-bit number
+ * identifying the UK function to be called (see uk_func). When the UKK client
+ * receives this header and executed the requested UK function, it will use
+ * the same header to store the result of the function in the form of a
+ * int return code. The size of this structure is such that the
+ * first member of the payload following the header can be accessed efficiently
+ * on a 32 and 64-bit kernel and the structure has the same size regardless
+ * of a 32 or 64-bit kernel. The uk_kernel_size_type type should be defined
+ * accordingly in the OS specific mali_uk_os.h header file.
+ */
+union uk_header {
+ /**
+ * 32-bit number identifying the UK function to be called.
+ * Also see uk_func.
+ */
+ u32 id;
+ /**
+ * The int return code returned by the called UK function.
+ * See the specification of the particular UK function you are
+ * calling for the meaning of the error codes returned. All
+ * UK functions return 0 on success.
+ */
+ u32 ret;
+ /*
+ * Used to ensure 64-bit alignment of this union. Do not remove.
+ * This field is used for padding and does not need to be initialized.
+ */
+ u64 sizer;
+};
+
+/**
+ * This structure carries a 16-bit major and minor number and is sent along with an internal UK call
+ * used during uku_open to identify the versions of the UK module in use by the user-side and kernel-side.
+ */
+struct uku_version_check_args {
+ union uk_header header;
+ /**< UK call header */
+ u16 major;
+ /**< This field carries the user-side major version on input and the kernel-side major version on output */
+ u16 minor;
+ /**< This field carries the user-side minor version on input and the kernel-side minor version on output. */
+ u8 padding[4];
+};
+
+/** @} end group uk_api */
+
+/** @} *//* end group base_api */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* _UK_H_ */
diff --git a/drivers/gpu/arm_gpu/platform/Kconfig b/drivers/gpu/arm_gpu/platform/Kconfig
new file mode 100644
index 000000000000..c52cb77d0d1d
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/Kconfig
@@ -0,0 +1,25 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+
+# Add your platform specific Kconfig file here
+#
+# "drivers/gpu/arm/midgard/platform/xxx/Kconfig"
+source "drivers/gpu/arm_gpu/platform/hisilicon/Kconfig"
+#
+# Where xxx is the platform name is the name set in MALI_PLATFORM_THIRDPARTY_NAME
+#
+
diff --git a/drivers/gpu/arm_gpu/platform/devicetree/Kbuild b/drivers/gpu/arm_gpu/platform/devicetree/Kbuild
new file mode 100644
index 000000000000..e888a42fc69a
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/devicetree/Kbuild
@@ -0,0 +1,18 @@
+#
+# (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+mali_kbase-y += \
+ $(MALI_PLATFORM_THIRDPARTY_DIR)/mali_kbase_config_devicetree.o \
+ $(MALI_PLATFORM_THIRDPARTY_DIR)/mali_kbase_runtime_pm.o
diff --git a/drivers/gpu/arm_gpu/platform/devicetree/mali_kbase_config_devicetree.c b/drivers/gpu/arm_gpu/platform/devicetree/mali_kbase_config_devicetree.c
new file mode 100644
index 000000000000..b2a7c93f12a9
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/devicetree/mali_kbase_config_devicetree.c
@@ -0,0 +1,31 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase_config.h>
+
+int kbase_platform_early_init(void)
+{
+ /* Nothing needed at this stage */
+ return 0;
+}
+
+static struct kbase_platform_config dummy_platform_config;
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+ return &dummy_platform_config;
+}
diff --git a/drivers/gpu/arm_gpu/platform/devicetree/mali_kbase_config_platform.h b/drivers/gpu/arm_gpu/platform/devicetree/mali_kbase_config_platform.h
new file mode 100644
index 000000000000..2ceca34945b9
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/devicetree/mali_kbase_config_platform.h
@@ -0,0 +1,80 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX (5000)
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN (5000)
+
+/**
+ * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock
+ *
+ * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_cpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define CPU_SPEED_FUNC (NULL)
+
+/**
+ * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock
+ *
+ * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_gpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define GPU_SPEED_FUNC (NULL)
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
+
+/**
+ * Autosuspend delay
+ *
+ * The delay time (in milliseconds) to be used for autosuspend
+ */
+#define AUTO_SUSPEND_DELAY (100)
diff --git a/drivers/gpu/arm_gpu/platform/devicetree/mali_kbase_runtime_pm.c b/drivers/gpu/arm_gpu/platform/devicetree/mali_kbase_runtime_pm.c
new file mode 100644
index 000000000000..50cfb2c8b020
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/devicetree/mali_kbase_runtime_pm.c
@@ -0,0 +1,122 @@
+/*
+ *
+ * (C) COPYRIGHT 2015, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
+#include "mali_kbase_config_platform.h"
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+ int ret = 1; /* Assume GPU has been powered off */
+ int error;
+
+ dev_dbg(kbdev->dev, "pm_callback_power_on %p\n",
+ (void *)kbdev->dev->pm_domain);
+
+ error = pm_runtime_get_sync(kbdev->dev);
+ if (error == 1) {
+ /*
+ * Let core know that the chip has not been
+ * powered off, so we can save on re-initialization.
+ */
+ ret = 0;
+ }
+
+ dev_dbg(kbdev->dev, "pm_runtime_get_sync returned %d\n", error);
+
+ return ret;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+ dev_dbg(kbdev->dev, "pm_callback_power_off\n");
+
+ pm_runtime_mark_last_busy(kbdev->dev);
+ pm_runtime_put_autosuspend(kbdev->dev);
+}
+
+int kbase_device_runtime_init(struct kbase_device *kbdev)
+{
+ int ret = 0;
+
+ dev_dbg(kbdev->dev, "kbase_device_runtime_init\n");
+
+ pm_runtime_set_autosuspend_delay(kbdev->dev, AUTO_SUSPEND_DELAY);
+ pm_runtime_use_autosuspend(kbdev->dev);
+
+ pm_runtime_set_active(kbdev->dev);
+ pm_runtime_enable(kbdev->dev);
+
+ if (!pm_runtime_enabled(kbdev->dev)) {
+ dev_warn(kbdev->dev, "pm_runtime not enabled");
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+void kbase_device_runtime_disable(struct kbase_device *kbdev)
+{
+ dev_dbg(kbdev->dev, "kbase_device_runtime_disable\n");
+ pm_runtime_disable(kbdev->dev);
+}
+
+static int pm_callback_runtime_on(struct kbase_device *kbdev)
+{
+ dev_dbg(kbdev->dev, "pm_callback_runtime_on\n");
+
+ return 0;
+}
+
+static void pm_callback_runtime_off(struct kbase_device *kbdev)
+{
+ dev_dbg(kbdev->dev, "pm_callback_runtime_off\n");
+}
+
+static void pm_callback_resume(struct kbase_device *kbdev)
+{
+ int ret = pm_callback_runtime_on(kbdev);
+
+ WARN_ON(ret);
+}
+
+static void pm_callback_suspend(struct kbase_device *kbdev)
+{
+ pm_callback_runtime_off(kbdev);
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+ .power_on_callback = pm_callback_power_on,
+ .power_off_callback = pm_callback_power_off,
+ .power_suspend_callback = pm_callback_suspend,
+ .power_resume_callback = pm_callback_resume,
+#ifdef KBASE_PM_RUNTIME
+ .power_runtime_init_callback = kbase_device_runtime_init,
+ .power_runtime_term_callback = kbase_device_runtime_disable,
+ .power_runtime_on_callback = pm_callback_runtime_on,
+ .power_runtime_off_callback = pm_callback_runtime_off,
+#else /* KBASE_PM_RUNTIME */
+ .power_runtime_init_callback = NULL,
+ .power_runtime_term_callback = NULL,
+ .power_runtime_on_callback = NULL,
+ .power_runtime_off_callback = NULL,
+#endif /* KBASE_PM_RUNTIME */
+};
+
+
diff --git a/drivers/gpu/arm_gpu/platform/hisilicon/Kbuild b/drivers/gpu/arm_gpu/platform/hisilicon/Kbuild
new file mode 100644
index 000000000000..c566cfe1ea36
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/hisilicon/Kbuild
@@ -0,0 +1,15 @@
+#
+# (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+mali_kbase-y += $(MALI_PLATFORM_THIRDPARTY_DIR)/mali_kbase_config_hisilicon.o
diff --git a/drivers/gpu/arm_gpu/platform/hisilicon/Kconfig b/drivers/gpu/arm_gpu/platform/hisilicon/Kconfig
new file mode 100644
index 000000000000..151d1e3c0627
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/hisilicon/Kconfig
@@ -0,0 +1,41 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+config MALI_PM_DEMAND
+ bool "Switch power policy"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ switch policy for different platform, fpga or chip
+
+config REPORT_VSYNC
+ bool "Enabel REPORT_VSYNC"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default y
+ help
+ Enable REPORT_VSYNC
+
+config MALI_IDLE_AUTO_CLK_DIV
+ bool "Idle auto clock divide"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default y
+ help
+ when GPU in IDLE state, auto decrease the clock rate
+
+config MALI_GPU_DRM
+ bool "Enable gpu drm feature"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ enable mali gpu drm feature
diff --git a/drivers/gpu/arm_gpu/platform/hisilicon/mali_kbase_config_hifeatures.h b/drivers/gpu/arm_gpu/platform/hisilicon/mali_kbase_config_hifeatures.h
new file mode 100644
index 000000000000..78b39e74d9e9
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/hisilicon/mali_kbase_config_hifeatures.h
@@ -0,0 +1,70 @@
+/*
+ * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features,
+ * please update base/tools/hwconfig_generator/hwc_{issues,features}.py
+ * For more information see base/tools/hwconfig_generator/README
+ */
+
+#ifndef _KBASE_CONFIG_HI_FEATURES_H_
+#define _KBASE_CONFIG_HI_FEATURES_H_
+
+enum kbase_hi_feature {
+ KBASE_FEATURE_HI0001,
+ KBASE_FEATURE_HI0002,
+ KBASE_FEATURE_HI0003,
+ KBASE_FEATURE_HI0004,
+ KBASE_FEATURE_HI0005,
+ KBASE_FEATURE_HI0006,
+ KBASE_FEATURE_HI0007,
+ KBASE_FEATURE_HI0008,
+ KBASE_HI_FEATURE_END
+};
+
+static const enum kbase_hi_feature kbase_hi_feature_t880_r0p2[] = {
+ KBASE_FEATURE_HI0002,
+ KBASE_FEATURE_HI0004,
+ KBASE_FEATURE_HI0008,
+ KBASE_HI_FEATURE_END
+};
+
+static const enum kbase_hi_feature kbase_hi_feature_t830_r2p0[] = {
+ KBASE_FEATURE_HI0004,
+ KBASE_FEATURE_HI0007,
+ KBASE_FEATURE_HI0008,
+ KBASE_HI_FEATURE_END
+};
+
+static const enum kbase_hi_feature kbase_hi_feature_t880_r2p0[] = {
+ KBASE_FEATURE_HI0002,
+ KBASE_FEATURE_HI0003,
+ KBASE_FEATURE_HI0004,
+ KBASE_FEATURE_HI0005,
+ KBASE_FEATURE_HI0006,
+ KBASE_FEATURE_HI0008,
+ KBASE_HI_FEATURE_END
+};
+
+static const enum kbase_hi_feature kbase_hi_feature_tMIx_r0p0[] = {
+ KBASE_FEATURE_HI0004,
+ KBASE_FEATURE_HI0006,
+ KBASE_FEATURE_HI0008,
+ KBASE_HI_FEATURE_END
+};
+static const enum kbase_hi_feature kbase_hi_feature_tHEx_r0p0[] = {
+ KBASE_FEATURE_HI0004,
+ KBASE_FEATURE_HI0006,
+ KBASE_FEATURE_HI0008,
+ KBASE_HI_FEATURE_END
+};
+#endif /* _BASE_HWCONFIG_ISSUES_H_ */
diff --git a/drivers/gpu/arm_gpu/platform/hisilicon/mali_kbase_config_hisilicon.c b/drivers/gpu/arm_gpu/platform/hisilicon/mali_kbase_config_hisilicon.c
new file mode 100644
index 000000000000..8a45cfbb754b
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/hisilicon/mali_kbase_config_hisilicon.c
@@ -0,0 +1,512 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+#ifdef CONFIG_PM_DEVFREQ
+#include <linux/devfreq.h>
+#endif /* CONFIG_PM_DEVFREQ */
+
+#include <trace/events/power.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#ifdef CONFIG_REPORT_VSYNC
+#include <linux/export.h>
+#endif
+#include <linux/delay.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include "mali_kbase_config_platform.h"
+#include "mali_kbase_config_hifeatures.h"
+
+typedef enum {
+ MALI_ERROR_NONE = 0,
+ MALI_ERROR_OUT_OF_GPU_MEMORY,
+ MALI_ERROR_OUT_OF_MEMORY,
+ MALI_ERROR_FUNCTION_FAILED,
+}mali_error;
+
+#define HARD_RESET_AT_POWER_OFF 0
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+ .job_irq_number = 68,
+ .mmu_irq_number = 69,
+ .gpu_irq_number = 70,
+ .io_memory_region = {
+ .start = 0xFC010000,
+ .end = 0xFC010000 + (4096 * 4) - 1
+ }
+};
+#endif /* CONFIG_OF */
+
+#define RUNTIME_PM_DELAY_1MS 1
+#define RUNTIME_PM_DELAY_30MS 30
+
+#ifdef CONFIG_REPORT_VSYNC
+static struct kbase_device *kbase_dev = NULL;
+#endif
+
+struct hisi_platform_data {
+ int vsync_hit;
+ void __iomem *pctrlreg;
+ void __iomem *pmctrlreg;
+ unsigned long features_mask[2];
+ u32 gpu_vid;
+};
+
+static int kbase_set_hi_features_mask(struct kbase_device *kbdev,
+ struct hisi_platform_data *pd)
+{
+ const enum kbase_hi_feature *hi_features;
+ u32 gpu_vid;
+ u32 product_id;
+
+ gpu_vid = pd->gpu_vid;
+ product_id = gpu_vid & GPU_ID_VERSION_PRODUCT_ID;
+ product_id >>= GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+ if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+ switch (gpu_vid) {
+ case GPU_ID2_MAKE(6, 0, 10, 0, 0, 0, 2):
+ hi_features = kbase_hi_feature_tMIx_r0p0;
+ break;
+ case GPU_ID2_MAKE(6, 2, 2, 1, 0, 0, 0):
+ hi_features = kbase_hi_feature_tHEx_r0p0;
+ break;
+ case GPU_ID2_MAKE(6, 2, 2, 1, 0, 0, 1):
+ hi_features = kbase_hi_feature_tHEx_r0p0;
+ break;
+ default:
+ dev_err(kbdev->dev,
+ "[hi-feature]Unknown GPU ID %x", gpu_vid);
+ return -EINVAL;
+ }
+ } else {
+ switch (gpu_vid) {
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 2, 0):
+ hi_features = kbase_hi_feature_t880_r0p2;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T83X, 1, 0, 0):
+ hi_features = kbase_hi_feature_t830_r2p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 2, 0, 0):
+ hi_features = kbase_hi_feature_t880_r2p0;
+ break;
+ default:
+ dev_err(kbdev->dev,
+ "[hi-feature]Unknown GPU ID %x", gpu_vid);
+ return -EINVAL;
+ }
+ }
+
+ dev_info(kbdev->dev, "[hi-feature]GPU identified as 0x%04x r%dp%d status %d",
+ (gpu_vid & GPU_ID_VERSION_PRODUCT_ID) >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ (gpu_vid & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
+ (gpu_vid & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
+ (gpu_vid & GPU_ID_VERSION_STATUS) >> GPU_ID_VERSION_STATUS_SHIFT);
+
+ for (; *hi_features != KBASE_HI_FEATURE_END; hi_features++)
+ set_bit(*hi_features, &pd->features_mask[0]);
+
+ return 0;
+}
+
+static inline void kbase_platform_on(struct kbase_device *kbdev)
+{
+ if (kbdev->regulator) {
+ struct hisi_platform_data *pd;
+
+ pd = (struct hisi_platform_data *)kbdev->platform_context;
+
+ if (unlikely(regulator_enable(kbdev->regulator))) {
+ dev_err(kbdev->dev, "Failed to enable regulator\n");
+ BUG_ON(1);
+ }
+
+ if (pd->gpu_vid == 0) {
+ pd->gpu_vid = kbase_os_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_ID)
+ );
+ if (unlikely(kbase_set_hi_features_mask(kbdev, pd))) {
+ dev_err(kbdev->dev,
+ "Failed to set hi features\n");
+ }
+ }
+
+ if (kbase_has_hi_feature(pd, KBASE_FEATURE_HI0004)) {
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(PWR_KEY),
+ KBASE_PWR_KEY_VALUE);
+ kbase_os_reg_write(kbdev,
+ GPU_CONTROL_REG(PWR_OVERRIDE1),
+ KBASE_PWR_OVERRIDE_VALUE);
+ }
+
+ if (kbase_has_hi_feature(pd, KBASE_FEATURE_HI0003)) {
+ int value = 0;
+ value = readl(pd->pctrlreg + PERI_CTRL19) &
+ GPU_X2P_GATOR_BYPASS;
+ writel(value, pd->pctrlreg + PERI_CTRL19);
+ }
+ }
+}
+
+static inline void kbase_platform_off(struct kbase_device *kbdev)
+{
+ if (kbdev->regulator) {
+ if (unlikely(regulator_disable(kbdev->regulator))) {
+ dev_err(kbdev->dev, "MALI-MIDGARD: Failed to disable regulator\n");
+ }
+ }
+}
+
+#ifdef CONFIG_REPORT_VSYNC
+void mali_kbase_pm_report_vsync(int buffer_updated)
+{
+ unsigned long flags;
+
+ if (kbase_dev){
+ struct hisi_platform_data *pd;
+
+ pd = (struct hisi_platform_data *)kbase_dev->platform_context;
+
+ spin_lock_irqsave(&kbase_dev->pm.backend.metrics.lock, flags);
+ pd->vsync_hit = buffer_updated;
+ spin_unlock_irqrestore(&kbase_dev->pm.backend.metrics.lock,
+ flags);
+ }
+}
+EXPORT_SYMBOL(mali_kbase_pm_report_vsync);
+#endif
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation, u32 util_gl_share, u32 util_cl_share[2])
+{
+ return 1;
+}
+
+int kbase_platform_dvfs_enable(struct kbase_device *kbdev, bool enable, int freq)
+{
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ if (enable != kbdev->pm.backend.metrics.timer_active) {
+ if (enable) {
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+ kbdev->pm.backend.metrics.timer_active = true;
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+ hrtimer_start(&kbdev->pm.backend.metrics.timer,
+ HR_TIMER_DELAY_MSEC(kbdev->pm.dvfs_period),
+ HRTIMER_MODE_REL);
+ } else {
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+ kbdev->pm.backend.metrics.timer_active = false;
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+ hrtimer_cancel(&kbdev->pm.backend.metrics.timer);
+ }
+ }
+
+ return 1;
+}
+#endif
+
+static int kbase_platform_init(struct kbase_device *kbdev)
+{
+ struct hisi_platform_data *pd;
+ int err;
+
+#ifdef CONFIG_REPORT_VSYNC
+ kbase_dev = kbdev;
+#endif
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ err = -ENOMEM;
+ goto no_mem;
+ }
+
+ pd->pmctrlreg = ioremap(SYS_REG_PMCTRL_BASE_ADDR, SYS_REG_PMCTRL_SIZE);
+ if (!pd->pmctrlreg) {
+ dev_err(kbdev->dev, "Can't remap sys pmctrl register window on platform hi3660\n");
+ err = -EINVAL;
+ goto out_pmctrl_ioremap;
+ }
+
+ pd->pctrlreg = ioremap(SYS_REG_PCTRL_BASE_ADDR, SYS_REG_PCTRL_SIZE);
+ if (!pd->pctrlreg) {
+ dev_err(kbdev->dev, "Can't remap sys pctrl register window on platform hi3660\n");
+ err = -EINVAL;
+ goto out_pctrl_ioremap;
+ }
+
+ kbdev->platform_context = pd;
+
+ kbase_platform_on(kbdev);
+
+ if (kbase_has_hi_feature(pd, KBASE_FEATURE_HI0006)) {
+ unsigned int value = 0;
+ /*GPU and PMCTRL shader core power on/off decrease freq
+ * handshake start*/
+ /*read 0x264 and set it's [3:0] and [19:16]bit to 0,enable G3D
+ * HPM hardware status contrl*/
+ value = readl(pd->pmctrlreg + G3DHPMBYPASS) & MASK_G3DHPMBYPASS;
+ writel(value, pd->pmctrlreg + G3DHPMBYPASS);
+
+ /*read 0x268 and set it's [0]bit to 0,enable G3D auto clkdiv*/
+ value = readl(pd->pmctrlreg + G3DAUTOCLKDIVBYPASS) &
+ MASK_G3DAUTOCLKDIVBYPASS;
+ writel(value, pd->pmctrlreg + G3DAUTOCLKDIVBYPASS);
+ /*GPU and PMCTRL shader core power on/off decrease freq
+ * handshake end*/
+ /*GPU IDLE VDM decrease freq start*/
+ /*read 0x46c and set it's [26]bit to 1,enable L2 reduce freq
+ * when it IDLE*/
+ value = readl(pd->pmctrlreg + VS_CTRL_2) | (1<<26);
+ writel(value, pd->pmctrlreg + VS_CTRL_2);
+ /*GPU IDLE VDM decrease freq end*/
+ }
+
+ kbase_platform_off(kbdev);
+
+ return 0;
+
+out_pctrl_ioremap:
+ iounmap(pd->pmctrlreg);
+out_pmctrl_ioremap:
+ kfree(pd);
+no_mem:
+ return err;
+}
+
+static void kbase_platform_term(struct kbase_device *kbdev)
+{
+ struct hisi_platform_data *pd;
+
+ pd = (struct hisi_platform_data *)kbdev->platform_context;
+
+ iounmap(pd->pmctrlreg);
+ iounmap(pd->pctrlreg);
+
+ kfree(pd);
+}
+
+struct kbase_platform_funcs_conf platform_funcs = {
+ .platform_init_func = &kbase_platform_init,
+ .platform_term_func = &kbase_platform_term,
+};
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_MIDGARD_RT_PM
+ int result;
+ int ret_val;
+ struct device *dev = kbdev->dev;
+
+#if (HARD_RESET_AT_POWER_OFF != 1)
+ if (!pm_runtime_status_suspended(dev))
+ ret_val = 0;
+ else
+#endif
+ ret_val = 1;
+
+ if (unlikely(dev->power.disable_depth > 0)) {
+ kbase_platform_on(kbdev);
+ } else {
+ result = pm_runtime_resume(dev);
+ if (result < 0 && result == -EAGAIN)
+ kbase_platform_on(kbdev);
+ else if (result < 0)
+ printk("[mali] pm_runtime_resume failed (%d)\n", result);
+ }
+
+ return ret_val;
+#else
+ kbase_platform_on(kbdev);
+
+ return 1;
+#endif
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_MIDGARD_RT_PM
+ struct device *dev = kbdev->dev;
+ int ret = 0, retry = 0;
+ struct hisi_platform_data *pd;
+
+ pd = (struct hisi_platform_data *)kbdev->platform_context;
+
+ if (kbase_has_hi_feature(pd, KBASE_FEATURE_HI0008)) {
+ /* when GPU in idle state, auto decrease the clock rate.
+ */
+ unsigned int tiler_lo = kbdev->tiler_available_bitmap & 0xFFFFFFFF;
+ unsigned int tiler_hi = (kbdev->tiler_available_bitmap >> 32) & 0xFFFFFFFF;
+ unsigned int l2_lo = kbdev->l2_available_bitmap & 0xFFFFFFFF;
+ unsigned int l2_hi = (kbdev->l2_available_bitmap >> 32) & 0xFFFFFFFF;
+
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(TILER_PWROFF_LO), tiler_lo);
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(TILER_PWROFF_HI), tiler_hi);
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(L2_PWROFF_LO), l2_lo);
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(L2_PWROFF_HI), l2_hi);
+ }
+
+#if HARD_RESET_AT_POWER_OFF
+ /* Cause a GPU hard reset to test whether we have actually idled the GPU
+ * and that we properly reconfigure the GPU on power up.
+ * Usually this would be dangerous, but if the GPU is working correctly it should
+ * be completely safe as the GPU should not be active at this point.
+ * However this is disabled normally because it will most likely interfere with
+ * bus logging etc.
+ */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
+#endif
+
+ if (unlikely(dev->power.disable_depth > 0)) {
+ kbase_platform_off(kbdev);
+ } else {
+ do {
+ if (kbase_has_hi_feature(pd, KBASE_FEATURE_HI0007))
+ ret = pm_schedule_suspend(dev, RUNTIME_PM_DELAY_1MS);
+ else
+ ret = pm_schedule_suspend(dev, RUNTIME_PM_DELAY_30MS);
+ if (ret != -EAGAIN) {
+ if (unlikely(ret < 0)) {
+ pr_err("[mali] pm_schedule_suspend failed (%d)\n\n", ret);
+ WARN_ON(1);
+ }
+
+ /* correct status */
+ break;
+ }
+
+ /* -EAGAIN, repeated attempts for 1s totally */
+ msleep(50);
+ } while (++retry < 20);
+ }
+#else
+ kbase_platform_off(kbdev);
+#endif
+}
+
+#ifdef CONFIG_MALI_MIDGARD_RT_PM
+static int pm_callback_runtime_init(struct kbase_device *kbdev)
+{
+ pm_suspend_ignore_children(kbdev->dev, true);
+ pm_runtime_enable(kbdev->dev);
+ return 0;
+}
+
+static void pm_callback_runtime_term(struct kbase_device *kbdev)
+{
+ pm_runtime_disable(kbdev->dev);
+}
+
+static void pm_callback_runtime_off(struct kbase_device *kbdev)
+{
+#if defined(CONFIG_MALI_MIDGARD_DVFS)
+ kbase_platform_dvfs_enable(kbdev, false, 0);
+#endif
+
+ kbase_platform_off(kbdev);
+}
+
+static int pm_callback_runtime_on(struct kbase_device *kbdev)
+{
+ kbase_platform_on(kbdev);
+
+#if defined(CONFIG_MALI_MIDGARD_DVFS)
+ if (!kbase_platform_dvfs_enable(kbdev, true, 0))
+ return -EPERM;
+#endif
+
+ return 0;
+}
+#endif
+
+static inline void pm_callback_suspend(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_MIDGARD_RT_PM
+ if (!pm_runtime_status_suspended(kbdev->dev))
+ pm_callback_runtime_off(kbdev);
+#else
+ pm_callback_power_off(kbdev);
+#endif
+}
+
+static inline void pm_callback_resume(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_MIDGARD_RT_PM
+ if (!pm_runtime_status_suspended(kbdev->dev))
+ pm_callback_runtime_on(kbdev);
+ else
+ pm_callback_power_on(kbdev);
+#else
+ pm_callback_power_on(kbdev);
+#endif
+}
+
+#ifdef CONFIG_MALI_MIDGARD_RT_PM
+static inline int pm_callback_runtime_idle(struct kbase_device *kbdev)
+{
+ return 1;
+}
+#endif
+
+struct kbase_pm_callback_conf pm_callbacks = {
+ .power_on_callback = pm_callback_power_on,
+ .power_off_callback = pm_callback_power_off,
+ .power_suspend_callback = pm_callback_suspend,
+ .power_resume_callback = pm_callback_resume,
+#ifdef CONFIG_MALI_MIDGARD_RT_PM
+ .power_runtime_init_callback = pm_callback_runtime_init,
+ .power_runtime_term_callback = pm_callback_runtime_term,
+ .power_runtime_off_callback = pm_callback_runtime_off,
+ .power_runtime_on_callback = pm_callback_runtime_on,
+ .power_runtime_idle_callback = pm_callback_runtime_idle
+#else
+ .power_runtime_init_callback = NULL,
+ .power_runtime_term_callback = NULL,
+ .power_runtime_off_callback = NULL,
+ .power_runtime_on_callback = NULL,
+ .power_runtime_idle_callback = NULL
+#endif
+};
+
+
+
+static struct kbase_platform_config hi_platform_config = {
+#ifndef CONFIG_OF
+ .io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+ return &hi_platform_config;
+}
+
+int kbase_platform_early_init(void)
+{
+ /* Nothing needed at this stage */
+ return 0;
+}
diff --git a/drivers/gpu/arm_gpu/platform/hisilicon/mali_kbase_config_platform.h b/drivers/gpu/arm_gpu/platform/hisilicon/mali_kbase_config_platform.h
new file mode 100644
index 000000000000..b9275ad81e47
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/hisilicon/mali_kbase_config_platform.h
@@ -0,0 +1,104 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef _MALI_KBASE_CONFIG_PLATFORM_H_
+#define _MALI_KBASE_CONFIG_PLATFORM_H_
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX 5000
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN 5000
+
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+#define KBASE_PLATFORM_CALLBACKS ((uintptr_t)&platform_funcs)
+
+#ifdef CONFIG_PM_DEVFREQ
+#define POWER_MODEL_CALLBACKS ((uintptr_t)&hisi_model_ops)
+#endif
+
+#define GPU_SPEED_FUNC (NULL)
+
+#define CPU_SPEED_FUNC (&kbase_cpuprops_get_default_clock_speed)
+
+#define PLATFORM_FUNCS (KBASE_PLATFORM_CALLBACKS)
+
+/**
+ * @brief Tell whether a feature should be enabled
+ */
+#define kbase_has_hi_feature(pd, hi_feature)\
+ test_bit(hi_feature, &(pd)->features_mask[0])
+
+/*
+ * Begin Register Offsets
+ */
+#define SYS_REG_CRG_BASE_ADDR 0xFFF35000 /* Crg control register base address */
+#define SYS_REG_CRG_SIZE 0x1000 /* Crg control register size */
+
+#define KBASE_PWR_KEY_VALUE 0x2968a819
+#define KBASE_PWR_OVERRIDE_VALUE 0xc4b00960
+
+#define SYS_REG_PMCTRL_BASE_ADDR 0xFFF31000
+#define SYS_REG_PMCTRL_SIZE 0x1000
+#define G3DHPMBYPASS 0x264
+#define G3DAUTOCLKDIVBYPASS 0x268
+#define VS_CTRL_2 0x46c
+#define MASK_G3DHPMBYPASS 0xfff0fff0
+#define MASK_G3DAUTOCLKDIVBYPASS 0xfffffffe
+
+#define SYS_REG_PCTRL_BASE_ADDR 0xE8A09000
+#define SYS_REG_PCTRL_SIZE 0x1000
+#define PERI_CTRL19 0x050
+#define GPU_X2P_GATOR_BYPASS 0xfeffffff
+
+#define SYS_REG_CRG_CLOCK_EN 0x38
+#define SYS_REG_CRG_CLCOK_STATUS 0x3c
+#define SYS_REG_CRG_G3D 0x84
+#define SYS_REG_CRG_G3D_EN 0x88
+#define SYS_REG_CRG_RESET_STATUS 0x8c
+#define SYS_REG_CRG_ISO_STATUS 0x14c
+
+#define KBASE_PWR_RESET_VALUE 0x007c001c
+#define KBASE_PWR_ACTIVE_BIT 0x2
+#define KBASE_PWR_INACTIVE_MAX_LOOPS 100000
+
+#define SYS_REG_CRG_W_CLOCK_EN 0x30
+#define SYS_REG_CRG_W_CLOCK_CLOSE 0x34
+#define SYS_REG_CRG_CLK_DIV_MASK_EN 0xf0
+
+#define GPU_CRG_CLOCK_VALUE 0x00000038
+#define GPU_CRG_CLOCK_POWER_OFF_MASK 0x00010000
+#define GPU_CRG_CLOCK_POWER_ON_MASK 0x00010001
+
+#define PERI_STAT_FPGA_GPU_EXIST 0xBC
+#define PERI_STAT_FPGA_GPU_EXIST_MASK 0x400000
+
+extern struct kbase_pm_callback_conf pm_callbacks;
+
+extern struct kbase_platform_funcs_conf platform_funcs;
+
+#endif /* _MALI_KBASE_CONFIG_PLATFORM_H_ */
diff --git a/drivers/gpu/arm_gpu/platform/mali_kbase_platform_common.h b/drivers/gpu/arm_gpu/platform/mali_kbase_platform_common.h
new file mode 100644
index 000000000000..7cb3be7f78ce
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/mali_kbase_platform_common.h
@@ -0,0 +1,26 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @brief Entry point to transfer control to a platform for early initialization
+ *
+ * This function is called early on in the initialization during execution of
+ * @ref kbase_driver_init.
+ *
+ * @return Zero to indicate success non-zero for failure.
+ */
+int kbase_platform_early_init(void);
diff --git a/drivers/gpu/arm_gpu/platform/mali_kbase_platform_fake.h b/drivers/gpu/arm_gpu/platform/mali_kbase_platform_fake.h
new file mode 100644
index 000000000000..01f9dfce93cc
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/mali_kbase_platform_fake.h
@@ -0,0 +1,38 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+
+/**
+ * kbase_platform_fake_register - Entry point for fake platform registration
+ *
+ * This function is called early on in the initialization during execution of
+ * kbase_driver_init.
+ *
+ * Return: 0 to indicate success, non-zero for failure.
+ */
+int kbase_platform_fake_register(void);
+
+/**
+ * kbase_platform_fake_unregister - Entry point for fake platform unregistration
+ *
+ * This function is called in the termination during execution of
+ * kbase_driver_exit.
+ */
+void kbase_platform_fake_unregister(void);
+
+#endif /* CONFIG_MALI_PLATFORM_FAKE */
diff --git a/drivers/gpu/arm_gpu/platform/vexpress/Kbuild b/drivers/gpu/arm_gpu/platform/vexpress/Kbuild
new file mode 100644
index 000000000000..1caa293666d3
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/vexpress/Kbuild
@@ -0,0 +1,18 @@
+#
+# (C) COPYRIGHT 2012-2013, 2016 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+mali_kbase-y += \
+ $(MALI_PLATFORM_THIRDPARTY_DIR)/mali_kbase_config_vexpress.o \
+ $(MALI_PLATFORM_THIRDPARTY_DIR)/mali_kbase_cpu_vexpress.o
diff --git a/drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_config_platform.h b/drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_config_platform.h
new file mode 100644
index 000000000000..02835f129aa3
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_config_platform.h
@@ -0,0 +1,75 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include "mali_kbase_cpu_vexpress.h"
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX kbase_get_platform_max_freq()
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN kbase_get_platform_min_freq()
+
+/**
+ * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock
+ *
+ * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_cpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define CPU_SPEED_FUNC (&kbase_get_vexpress_cpu_clock_speed)
+
+/**
+ * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock
+ *
+ * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_gpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define GPU_SPEED_FUNC (NULL)
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
diff --git a/drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_config_vexpress.c b/drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_config_vexpress.c
new file mode 100644
index 000000000000..15ce2bc5eea5
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_config_vexpress.c
@@ -0,0 +1,85 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+#include "mali_kbase_cpu_vexpress.h"
+#include "mali_kbase_config_platform.h"
+
+#define HARD_RESET_AT_POWER_OFF 0
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+ .job_irq_number = 68,
+ .mmu_irq_number = 69,
+ .gpu_irq_number = 70,
+ .io_memory_region = {
+ .start = 0xFC010000,
+ .end = 0xFC010000 + (4096 * 4) - 1
+ }
+};
+#endif /* CONFIG_OF */
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+ /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+ return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+#if HARD_RESET_AT_POWER_OFF
+ /* Cause a GPU hard reset to test whether we have actually idled the GPU
+ * and that we properly reconfigure the GPU on power up.
+ * Usually this would be dangerous, but if the GPU is working correctly it should
+ * be completely safe as the GPU should not be active at this point.
+ * However this is disabled normally because it will most likely interfere with
+ * bus logging etc.
+ */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
+#endif
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+ .power_on_callback = pm_callback_power_on,
+ .power_off_callback = pm_callback_power_off,
+ .power_suspend_callback = NULL,
+ .power_resume_callback = NULL
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+#ifndef CONFIG_OF
+ .io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+ return &versatile_platform_config;
+}
+
+
+int kbase_platform_early_init(void)
+{
+ /* Nothing needed at this stage */
+ return 0;
+}
diff --git a/drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_cpu_vexpress.c b/drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_cpu_vexpress.c
new file mode 100644
index 000000000000..4665f98cbbe4
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_cpu_vexpress.c
@@ -0,0 +1,279 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/io.h>
+#include <mali_kbase.h>
+#include "mali_kbase_cpu_vexpress.h"
+
+#define HZ_IN_MHZ (1000000)
+
+#define CORETILE_EXPRESS_A9X4_SCC_START (0x100E2000)
+#define MOTHERBOARD_SYS_CFG_START (0x10000000)
+#define SYS_CFGDATA_OFFSET (0x000000A0)
+#define SYS_CFGCTRL_OFFSET (0x000000A4)
+#define SYS_CFGSTAT_OFFSET (0x000000A8)
+
+#define SYS_CFGCTRL_START_BIT_VALUE (1 << 31)
+#define READ_REG_BIT_VALUE (0 << 30)
+#define DCC_DEFAULT_BIT_VALUE (0 << 26)
+#define SYS_CFG_OSC_FUNC_BIT_VALUE (1 << 20)
+#define SITE_DEFAULT_BIT_VALUE (1 << 16)
+#define BOARD_STACK_POS_DEFAULT_BIT_VALUE (0 << 12)
+#define DEVICE_DEFAULT_BIT_VALUE (2 << 0)
+#define SYS_CFG_COMPLETE_BIT_VALUE (1 << 0)
+#define SYS_CFG_ERROR_BIT_VALUE (1 << 1)
+
+#define FEED_REG_BIT_MASK (0x0F)
+#define FCLK_PA_DIVIDE_BIT_SHIFT (0x03)
+#define FCLK_PB_DIVIDE_BIT_SHIFT (0x07)
+#define FCLK_PC_DIVIDE_BIT_SHIFT (0x0B)
+#define AXICLK_PA_DIVIDE_BIT_SHIFT (0x0F)
+#define AXICLK_PB_DIVIDE_BIT_SHIFT (0x13)
+
+/* the following three values used for reading
+ * HBI value of the LogicTile daughterboard */
+#define VE_MOTHERBOARD_PERIPHERALS_SMB_CS7 (0x10000000)
+#define VE_SYS_PROC_ID1_OFFSET (0x00000088)
+#define VE_LOGIC_TILE_HBI_MASK (0x00000FFF)
+
+#define IS_SINGLE_BIT_SET(val, pos) (val&(1<<pos))
+
+/**
+ * Values used for determining the GPU frequency based on the LogicTile type
+ * Used by the function kbase_get_platform_logic_tile_type
+ */
+#define VE_VIRTEX6_GPU_FREQ_MIN 5000
+#define VE_VIRTEX6_GPU_FREQ_MAX 5000
+#define VE_VIRTEX7_GPU_FREQ_MIN 40000
+#define VE_VIRTEX7_GPU_FREQ_MAX 40000
+#define VE_DEFAULT_GPU_FREQ_MIN 5000
+#define VE_DEFAULT_GPU_FREQ_MAX 5000
+
+
+#define CPU_CLOCK_SPEED_UNDEFINED (0)
+
+static u32 cpu_clock_speed = CPU_CLOCK_SPEED_UNDEFINED;
+
+static DEFINE_RAW_SPINLOCK(syscfg_lock);
+/**
+ * kbase_get_vendor_specific_cpu_clock_speed -Retrieves the CPU clock speed
+ * @cpu_clock - the value of CPU clock speed in MHz
+ *
+ * Returns 0 on success, error code otherwise.
+ *
+ * The implementation is platform specific.
+*/
+int kbase_get_vexpress_cpu_clock_speed(u32 *cpu_clock)
+{
+ int err = 0;
+ u32 reg_val = 0;
+ u32 osc2_value = 0;
+ u32 pa_divide = 0;
+ u32 pb_divide = 0;
+ u32 pc_divide = 0;
+ void __iomem *syscfg_reg = NULL;
+ void __iomem *scc_reg = NULL;
+
+ if (CPU_CLOCK_SPEED_UNDEFINED != cpu_clock_speed) {
+ *cpu_clock = cpu_clock_speed;
+ return 0;
+ }
+
+ /* Init the value in case something goes wrong */
+ *cpu_clock = 0;
+
+ /* Map CPU register into virtual memory */
+ syscfg_reg = ioremap(MOTHERBOARD_SYS_CFG_START, 0x1000);
+ if (syscfg_reg == NULL) {
+ err = -EIO;
+ goto syscfg_reg_map_failed;
+ }
+
+ scc_reg = ioremap(CORETILE_EXPRESS_A9X4_SCC_START, 0x1000);
+ if (scc_reg == NULL) {
+ err = -EIO;
+ goto scc_reg_map_failed;
+ }
+
+ raw_spin_lock(&syscfg_lock);
+
+ /* Read SYS regs - OSC2 */
+ reg_val = readl(syscfg_reg + SYS_CFGCTRL_OFFSET);
+
+ /* Check if there is any other undergoing request */
+ if (reg_val & SYS_CFGCTRL_START_BIT_VALUE) {
+ err = -EBUSY;
+ goto ongoing_request;
+ }
+ /* Reset the CGFGSTAT reg */
+ writel(0, (syscfg_reg + SYS_CFGSTAT_OFFSET));
+
+ writel(SYS_CFGCTRL_START_BIT_VALUE | READ_REG_BIT_VALUE |
+ DCC_DEFAULT_BIT_VALUE |
+ SYS_CFG_OSC_FUNC_BIT_VALUE |
+ SITE_DEFAULT_BIT_VALUE |
+ BOARD_STACK_POS_DEFAULT_BIT_VALUE |
+ DEVICE_DEFAULT_BIT_VALUE,
+ (syscfg_reg + SYS_CFGCTRL_OFFSET));
+ /* Wait for the transaction to complete */
+ while (!(readl(syscfg_reg + SYS_CFGSTAT_OFFSET) &
+ SYS_CFG_COMPLETE_BIT_VALUE))
+ ;
+ /* Read SYS_CFGSTAT Register to get the status of submitted
+ * transaction */
+ reg_val = readl(syscfg_reg + SYS_CFGSTAT_OFFSET);
+
+ if (reg_val & SYS_CFG_ERROR_BIT_VALUE) {
+ /* Error while setting register */
+ err = -EIO;
+ goto set_reg_error;
+ }
+
+ osc2_value = readl(syscfg_reg + SYS_CFGDATA_OFFSET);
+ /* Read the SCC CFGRW0 register */
+ reg_val = readl(scc_reg);
+
+ /*
+ * Select the appropriate feed:
+ * CFGRW0[0] - CLKOB
+ * CFGRW0[1] - CLKOC
+ * CFGRW0[2] - FACLK (CLK)B FROM AXICLK PLL)
+ */
+ /* Calculate the FCLK */
+ if (IS_SINGLE_BIT_SET(reg_val, 0)) {
+ /* CFGRW0[0] - CLKOB */
+ /* CFGRW0[6:3] */
+ pa_divide = ((reg_val & (FEED_REG_BIT_MASK <<
+ FCLK_PA_DIVIDE_BIT_SHIFT)) >>
+ FCLK_PA_DIVIDE_BIT_SHIFT);
+ /* CFGRW0[10:7] */
+ pb_divide = ((reg_val & (FEED_REG_BIT_MASK <<
+ FCLK_PB_DIVIDE_BIT_SHIFT)) >>
+ FCLK_PB_DIVIDE_BIT_SHIFT);
+ *cpu_clock = osc2_value * (pa_divide + 1) / (pb_divide + 1);
+ } else if (IS_SINGLE_BIT_SET(reg_val, 1)) {
+ /* CFGRW0[1] - CLKOC */
+ /* CFGRW0[6:3] */
+ pa_divide = ((reg_val & (FEED_REG_BIT_MASK <<
+ FCLK_PA_DIVIDE_BIT_SHIFT)) >>
+ FCLK_PA_DIVIDE_BIT_SHIFT);
+ /* CFGRW0[14:11] */
+ pc_divide = ((reg_val & (FEED_REG_BIT_MASK <<
+ FCLK_PC_DIVIDE_BIT_SHIFT)) >>
+ FCLK_PC_DIVIDE_BIT_SHIFT);
+ *cpu_clock = osc2_value * (pa_divide + 1) / (pc_divide + 1);
+ } else if (IS_SINGLE_BIT_SET(reg_val, 2)) {
+ /* CFGRW0[2] - FACLK */
+ /* CFGRW0[18:15] */
+ pa_divide = ((reg_val & (FEED_REG_BIT_MASK <<
+ AXICLK_PA_DIVIDE_BIT_SHIFT)) >>
+ AXICLK_PA_DIVIDE_BIT_SHIFT);
+ /* CFGRW0[22:19] */
+ pb_divide = ((reg_val & (FEED_REG_BIT_MASK <<
+ AXICLK_PB_DIVIDE_BIT_SHIFT)) >>
+ AXICLK_PB_DIVIDE_BIT_SHIFT);
+ *cpu_clock = osc2_value * (pa_divide + 1) / (pb_divide + 1);
+ } else {
+ err = -EIO;
+ }
+
+set_reg_error:
+ongoing_request:
+ raw_spin_unlock(&syscfg_lock);
+ *cpu_clock /= HZ_IN_MHZ;
+
+ if (!err)
+ cpu_clock_speed = *cpu_clock;
+
+ iounmap(scc_reg);
+
+scc_reg_map_failed:
+ iounmap(syscfg_reg);
+
+syscfg_reg_map_failed:
+
+ return err;
+}
+
+/**
+ * kbase_get_platform_logic_tile_type - determines which LogicTile type
+ * is used by Versatile Express
+ *
+ * When platform_config build parameter is specified as vexpress, i.e.,
+ * platform_config=vexpress, GPU frequency may vary dependent on the
+ * particular platform. The GPU frequency depends on the LogicTile type.
+ *
+ * This function determines which LogicTile type is used by the platform by
+ * reading the HBI value of the daughterboard which holds the LogicTile:
+ *
+ * 0x217 HBI0217 Virtex-6
+ * 0x192 HBI0192 Virtex-5
+ * 0x247 HBI0247 Virtex-7
+ *
+ * Return: HBI value of the logic tile daughterboard, zero if not accessible
+ */
+static u32 kbase_get_platform_logic_tile_type(void)
+{
+ void __iomem *syscfg_reg = NULL;
+ u32 sys_procid1 = 0;
+
+ syscfg_reg = ioremap(VE_MOTHERBOARD_PERIPHERALS_SMB_CS7 + VE_SYS_PROC_ID1_OFFSET, 4);
+ if (NULL != syscfg_reg) {
+ sys_procid1 = readl(syscfg_reg);
+ iounmap(syscfg_reg);
+ }
+
+ return sys_procid1 & VE_LOGIC_TILE_HBI_MASK;
+}
+
+u32 kbase_get_platform_min_freq(void)
+{
+ u32 ve_logic_tile = kbase_get_platform_logic_tile_type();
+
+ switch (ve_logic_tile) {
+ case 0x217:
+ /* Virtex 6, HBI0217 */
+ return VE_VIRTEX6_GPU_FREQ_MIN;
+ case 0x247:
+ /* Virtex 7, HBI0247 */
+ return VE_VIRTEX7_GPU_FREQ_MIN;
+ default:
+ /* all other logic tiles, i.e., Virtex 5 HBI0192
+ * or unsuccessful reading from the platform -
+ * fall back to some default value */
+ return VE_DEFAULT_GPU_FREQ_MIN;
+ }
+}
+
+u32 kbase_get_platform_max_freq(void)
+{
+ u32 ve_logic_tile = kbase_get_platform_logic_tile_type();
+
+ switch (ve_logic_tile) {
+ case 0x217:
+ /* Virtex 6, HBI0217 */
+ return VE_VIRTEX6_GPU_FREQ_MAX;
+ case 0x247:
+ /* Virtex 7, HBI0247 */
+ return VE_VIRTEX7_GPU_FREQ_MAX;
+ default:
+ /* all other logic tiles, i.e., Virtex 5 HBI0192
+ * or unsuccessful reading from the platform -
+ * fall back to some default value */
+ return VE_DEFAULT_GPU_FREQ_MAX;
+ }
+}
diff --git a/drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_cpu_vexpress.h b/drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_cpu_vexpress.h
new file mode 100644
index 000000000000..da865698133a
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/vexpress/mali_kbase_cpu_vexpress.h
@@ -0,0 +1,38 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_CPU_VEXPRESS_H_
+#define _KBASE_CPU_VEXPRESS_H_
+
+/**
+ * Versatile Express implementation of @ref kbase_cpu_clk_speed_func.
+ */
+int kbase_get_vexpress_cpu_clock_speed(u32 *cpu_clock);
+
+/**
+ * Get the minimum GPU frequency for the attached logic tile
+ */
+u32 kbase_get_platform_min_freq(void);
+
+/**
+ * Get the maximum GPU frequency for the attached logic tile
+ */
+u32 kbase_get_platform_max_freq(void);
+
+#endif /* _KBASE_CPU_VEXPRESS_H_ */
diff --git a/drivers/gpu/arm_gpu/platform/vexpress_1xv7_a57/Kbuild b/drivers/gpu/arm_gpu/platform/vexpress_1xv7_a57/Kbuild
new file mode 100644
index 000000000000..7efe8fa4263b
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/vexpress_1xv7_a57/Kbuild
@@ -0,0 +1,16 @@
+#
+# (C) COPYRIGHT 2013-2014, 2016 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+mali_kbase-y += $(MALI_PLATFORM_THIRDPARTY_DIR)/mali_kbase_config_vexpress.o
diff --git a/drivers/gpu/arm_gpu/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h b/drivers/gpu/arm_gpu/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h
new file mode 100644
index 000000000000..0efbf3962f98
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h
@@ -0,0 +1,73 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX 5000
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN 5000
+
+/**
+ * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock
+ *
+ * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_cpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define CPU_SPEED_FUNC (&kbase_cpuprops_get_default_clock_speed)
+
+/**
+ * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock
+ *
+ * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_gpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define GPU_SPEED_FUNC (NULL)
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
diff --git a/drivers/gpu/arm_gpu/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c b/drivers/gpu/arm_gpu/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
new file mode 100644
index 000000000000..3ff0930fb4a3
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
@@ -0,0 +1,79 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+
+#define HARD_RESET_AT_POWER_OFF 0
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+ .job_irq_number = 68,
+ .mmu_irq_number = 69,
+ .gpu_irq_number = 70,
+ .io_memory_region = {
+ .start = 0x2f010000,
+ .end = 0x2f010000 + (4096 * 4) - 1}
+};
+#endif
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+ /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+ return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+#if HARD_RESET_AT_POWER_OFF
+ /* Cause a GPU hard reset to test whether we have actually idled the GPU
+ * and that we properly reconfigure the GPU on power up.
+ * Usually this would be dangerous, but if the GPU is working correctly it should
+ * be completely safe as the GPU should not be active at this point.
+ * However this is disabled normally because it will most likely interfere with
+ * bus logging etc.
+ */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
+#endif
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+ .power_on_callback = pm_callback_power_on,
+ .power_off_callback = pm_callback_power_off,
+ .power_suspend_callback = NULL,
+ .power_resume_callback = NULL
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+#ifndef CONFIG_OF
+ .io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+ return &versatile_platform_config;
+}
+
+int kbase_platform_early_init(void)
+{
+ /* Nothing needed at this stage */
+ return 0;
+}
diff --git a/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/Kbuild b/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/Kbuild
new file mode 100644
index 000000000000..1caa293666d3
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/Kbuild
@@ -0,0 +1,18 @@
+#
+# (C) COPYRIGHT 2012-2013, 2016 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+mali_kbase-y += \
+ $(MALI_PLATFORM_THIRDPARTY_DIR)/mali_kbase_config_vexpress.o \
+ $(MALI_PLATFORM_THIRDPARTY_DIR)/mali_kbase_cpu_vexpress.o
diff --git a/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h b/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h
new file mode 100644
index 000000000000..dbdf21e009f9
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h
@@ -0,0 +1,75 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include "mali_kbase_cpu_vexpress.h"
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX 10000
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN 10000
+
+/**
+ * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock
+ *
+ * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_cpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define CPU_SPEED_FUNC (&kbase_get_vexpress_cpu_clock_speed)
+
+/**
+ * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock
+ *
+ * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_gpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define GPU_SPEED_FUNC (NULL)
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
diff --git a/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c b/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c
new file mode 100644
index 000000000000..76ffe4a1e59e
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c
@@ -0,0 +1,83 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+#include "mali_kbase_cpu_vexpress.h"
+
+#define HARD_RESET_AT_POWER_OFF 0
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+ .job_irq_number = 75,
+ .mmu_irq_number = 76,
+ .gpu_irq_number = 77,
+ .io_memory_region = {
+ .start = 0x2F000000,
+ .end = 0x2F000000 + (4096 * 4) - 1}
+};
+#endif
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+ /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+ return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+#if HARD_RESET_AT_POWER_OFF
+ /* Cause a GPU hard reset to test whether we have actually idled the GPU
+ * and that we properly reconfigure the GPU on power up.
+ * Usually this would be dangerous, but if the GPU is working correctly it should
+ * be completely safe as the GPU should not be active at this point.
+ * However this is disabled normally because it will most likely interfere with
+ * bus logging etc.
+ */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
+#endif
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+ .power_on_callback = pm_callback_power_on,
+ .power_off_callback = pm_callback_power_off,
+ .power_suspend_callback = NULL,
+ .power_resume_callback = NULL
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+#ifndef CONFIG_OF
+ .io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+ return &versatile_platform_config;
+}
+
+int kbase_platform_early_init(void)
+{
+ /* Nothing needed at this stage */
+ return 0;
+}
+
diff --git a/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c b/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c
new file mode 100644
index 000000000000..816dff49835f
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c
@@ -0,0 +1,71 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <linux/io.h>
+#include <mali_kbase.h>
+#include "mali_kbase_cpu_vexpress.h"
+
+#define HZ_IN_MHZ (1000000)
+
+#define CORETILE_EXPRESS_A9X4_SCC_START (0x100E2000)
+#define MOTHERBOARD_SYS_CFG_START (0x10000000)
+#define SYS_CFGDATA_OFFSET (0x000000A0)
+#define SYS_CFGCTRL_OFFSET (0x000000A4)
+#define SYS_CFGSTAT_OFFSET (0x000000A8)
+
+#define SYS_CFGCTRL_START_BIT_VALUE (1 << 31)
+#define READ_REG_BIT_VALUE (0 << 30)
+#define DCC_DEFAULT_BIT_VALUE (0 << 26)
+#define SYS_CFG_OSC_FUNC_BIT_VALUE (1 << 20)
+#define SITE_DEFAULT_BIT_VALUE (1 << 16)
+#define BOARD_STACK_POS_DEFAULT_BIT_VALUE (0 << 12)
+#define DEVICE_DEFAULT_BIT_VALUE (2 << 0)
+#define SYS_CFG_COMPLETE_BIT_VALUE (1 << 0)
+#define SYS_CFG_ERROR_BIT_VALUE (1 << 1)
+
+#define FEED_REG_BIT_MASK (0x0F)
+#define FCLK_PA_DIVIDE_BIT_SHIFT (0x03)
+#define FCLK_PB_DIVIDE_BIT_SHIFT (0x07)
+#define FCLK_PC_DIVIDE_BIT_SHIFT (0x0B)
+#define AXICLK_PA_DIVIDE_BIT_SHIFT (0x0F)
+#define AXICLK_PB_DIVIDE_BIT_SHIFT (0x13)
+
+#define IS_SINGLE_BIT_SET(val, pos) (val&(1<<pos))
+
+#define CPU_CLOCK_SPEED_UNDEFINED 0
+
+#define CPU_CLOCK_SPEED_6XV7 50
+
+static u32 cpu_clock_speed = CPU_CLOCK_SPEED_UNDEFINED;
+
+static DEFINE_RAW_SPINLOCK(syscfg_lock);
+/**
+ * kbase_get_vendor_specific_cpu_clock_speed
+ * @brief Retrieves the CPU clock speed.
+ * The implementation is platform specific.
+ * @param[out] cpu_clock - the value of CPU clock speed in MHz
+ * @return 0 on success, 1 otherwise
+*/
+int kbase_get_vexpress_cpu_clock_speed(u32 *cpu_clock)
+{
+ /* TODO: MIDBASE-2873 - Provide runtime detection of CPU clock freq for 6XV7 board */
+ *cpu_clock = CPU_CLOCK_SPEED_6XV7;
+
+ return 0;
+}
diff --git a/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.h b/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.h
new file mode 100644
index 000000000000..23647ccb0871
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.h
@@ -0,0 +1,28 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_CPU_VEXPRESS_H_
+#define _KBASE_CPU_VEXPRESS_H_
+
+/**
+ * Versatile Express implementation of @ref kbase_cpu_clk_speed_func.
+ */
+int kbase_get_vexpress_cpu_clock_speed(u32 *cpu_clock);
+
+#endif /* _KBASE_CPU_VEXPRESS_H_ */
diff --git a/drivers/gpu/arm_gpu/platform_dummy/mali_ukk_os.h b/drivers/gpu/arm_gpu/platform_dummy/mali_ukk_os.h
new file mode 100644
index 000000000000..5fa9b39c4bc0
--- /dev/null
+++ b/drivers/gpu/arm_gpu/platform_dummy/mali_ukk_os.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_ukk_os.h
+ * Types and definitions that are common for Linux OSs for the kernel side of the
+ * User-Kernel interface.
+ */
+
+#ifndef _UKK_OS_H_ /* Linux version */
+#define _UKK_OS_H_
+
+#include <linux/fs.h>
+
+/**
+ * @addtogroup uk_api User-Kernel Interface API
+ * @{
+ */
+
+/**
+ * @addtogroup uk_api_kernel UKK (Kernel side)
+ * @{
+ */
+
+/**
+ * Internal OS specific data structure associated with each UKK session. Part
+ * of a ukk_session object.
+ */
+typedef struct ukkp_session {
+ int dummy; /**< No internal OS specific data at this time */
+} ukkp_session;
+
+/** @} end group uk_api_kernel */
+
+/** @} end group uk_api */
+
+#endif /* _UKK_OS_H__ */
diff --git a/drivers/gpu/arm_gpu/protected_mode_switcher.h b/drivers/gpu/arm_gpu/protected_mode_switcher.h
new file mode 100644
index 000000000000..5dc2f3ba8cf6
--- /dev/null
+++ b/drivers/gpu/arm_gpu/protected_mode_switcher.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _PROTECTED_MODE_SWITCH_H_
+#define _PROTECTED_MODE_SWITCH_H_
+
+struct protected_mode_device;
+
+/**
+ * struct protected_mode_ops - Callbacks for protected mode switch operations
+ *
+ * @protected_mode_enable: Callback to enable protected mode for device
+ * @protected_mode_disable: Callback to disable protected mode for device
+ */
+struct protected_mode_ops {
+ /**
+ * protected_mode_enable() - Enable protected mode on device
+ * @dev: The struct device
+ *
+ * Return: 0 on success, non-zero on error
+ */
+ int (*protected_mode_enable)(
+ struct protected_mode_device *protected_dev);
+
+ /**
+ * protected_mode_disable() - Disable protected mode on device, and
+ * reset device
+ * @dev: The struct device
+ *
+ * Return: 0 on success, non-zero on error
+ */
+ int (*protected_mode_disable)(
+ struct protected_mode_device *protected_dev);
+};
+
+/**
+ * struct protected_mode_device - Device structure for protected mode devices
+ *
+ * @ops - Callbacks associated with this device
+ * @data - Pointer to device private data
+ *
+ * This structure should be registered with the platform device using
+ * platform_set_drvdata().
+ */
+struct protected_mode_device {
+ struct protected_mode_ops ops;
+ void *data;
+};
+
+#endif /* _PROTECTED_MODE_SWITCH_H_ */
diff --git a/drivers/gpu/arm_gpu/sconscript b/drivers/gpu/arm_gpu/sconscript
new file mode 100644
index 000000000000..ff23d7aebe6e
--- /dev/null
+++ b/drivers/gpu/arm_gpu/sconscript
@@ -0,0 +1,92 @@
+#
+# (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+import sys
+Import('env')
+
+SConscript( 'tests/sconscript' )
+
+mock_test = 0
+
+# Fake platform is a transient solution for GPL drivers running in kernel that does not provide configuration via platform data.
+# For such kernels fake_platform_device should be set to 1. For kernels providing platform data fake_platform_device should be set to 0.
+if env['platform_config']=='devicetree' or env['platform_config']=='juno_soc':
+ fake_platform_device = 0
+else:
+ fake_platform_device = 1
+
+# Source files required for kbase.
+kbase_src = [
+ Glob('*.c'),
+ Glob('backend/*/*.c'),
+ Glob('internal/*/*.c'),
+ Glob('ipa/*.c')
+]
+
+if env['platform_config']=='juno_soc':
+ kbase_src += [Glob('platform/devicetree/*.c')]
+else:
+ kbase_src += [Glob('platform/%s/*.c' % env['platform_config'])]
+
+if Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock') and env['unit'] == '1':
+ kbase_src += [Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock/*.c')]
+ mock_test = 1
+
+# we need platform config for GPL version using fake platform
+if fake_platform_device==1:
+ # Check if we are compiling for PBX
+ if env.KernelConfigEnabled("CONFIG_MACH_REALVIEW_PBX") and \
+ env["platform_config"] in {"vexpress", "vexpress_6xvirtex7_10mhz"}:
+ sys.stderr.write("WARNING: Building for a PBX kernel but with platform_config=vexpress*\n")
+ # if the file platform config file is in the tpip directory then use that, otherwise use the default config directory
+ if Glob('#kernel/drivers/gpu/arm/midgard/config/tpip/*%s.c' % (env['platform_config'])):
+ kbase_src += Glob('#kernel/drivers/gpu/arm/midgard/config/tpip/*%s.c' % (env['platform_config']))
+ else:
+ kbase_src += Glob('#kernel/drivers/gpu/arm/midgard/config/*%s.c' % (env['platform_config']))
+
+make_args = env.kernel_get_config_defines(ret_list = True,
+ fake = fake_platform_device) + [
+ 'PLATFORM=%s' % env['platform'],
+ 'MALI_ERROR_INJECT_ON=%s' % env['error_inject'],
+ 'MALI_KERNEL_TEST_API=%s' % env['debug'],
+ 'MALI_UNIT_TEST=%s' % env['unit'],
+ 'MALI_RELEASE_NAME=%s' % env['mali_release_name'],
+ 'MALI_MOCK_TEST=%s' % mock_test,
+ 'MALI_CUSTOMER_RELEASE=%s' % env['release'],
+ 'MALI_INSTRUMENTATION_LEVEL=%s' % env['instr'],
+ 'MALI_COVERAGE=%s' % env['coverage'],
+ 'MALI_BUS_LOG=%s' % env['buslog']
+]
+
+kbase = env.BuildKernelModule('$STATIC_LIB_PATH/mali_kbase.ko', kbase_src,
+ make_args = make_args)
+
+# Add a dependency on kds.ko.
+# Only necessary when KDS is not built into the kernel.
+#
+if env['os'] != 'android':
+ if not env.KernelConfigEnabled("CONFIG_KDS"):
+ env.Depends(kbase, '$STATIC_LIB_PATH/kds.ko')
+
+# need Module.symvers from ump.ko build
+if int(env['ump']) == 1:
+ env.Depends(kbase, '$STATIC_LIB_PATH/ump.ko')
+
+if 'smc_protected_mode_switcher' in env:
+ env.Depends('$STATIC_LIB_PATH/mali_kbase.ko', '$STATIC_LIB_PATH/smc_protected_mode_switcher.ko')
+
+env.KernelObjTarget('kbase', kbase)
+
+env.AppendUnique(BASE=['cutils_linked_list'])
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 43cb33dc8333..9ebbf3691c7f 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -107,6 +107,14 @@ config DRM_KMS_CMA_HELPER
help
Choose this if you need the KMS CMA helper functions
+config DRM_CMA_FBDEV_BUFFER_NUM
+ int "Cma Fbdev Buffer Number"
+ depends on DRM_KMS_CMA_HELPER
+ default 1
+ help
+ Defines the buffer number of cma fbdev. Default is one buffer.
+ For double buffer please set to 2 and 3 for triple buffer.
+
source "drivers/gpu/drm/i2c/Kconfig"
source "drivers/gpu/drm/arm/Kconfig"
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index d2b0499ab7d7..2fed567f9943 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -6,6 +6,14 @@ config DRM_I2C_ADV7511
help
Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+config DRM_I2C_ADV7511_AUDIO
+ bool "ADV7511 HDMI Audio driver"
+ depends on DRM_I2C_ADV7511 && SND_SOC
+ select SND_SOC_HDMI_CODEC
+ help
+ Support the ADV7511 HDMI Audio interface. This is used in
+ conjunction with the AV7511 HDMI driver.
+
config DRM_I2C_ADV7533
bool "ADV7533 encoder"
depends on DRM_I2C_ADV7511
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
index 9019327fff4c..5ba675534f6e 100644
--- a/drivers/gpu/drm/bridge/adv7511/Makefile
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -1,3 +1,4 @@
adv7511-y := adv7511_drv.o
+adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o
adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 3e74e1a6584c..0396791a0cd0 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -309,6 +309,8 @@ struct adv7511 {
struct drm_display_mode curr_mode;
unsigned int f_tmds;
+ unsigned int f_audio;
+ unsigned int audio_source;
unsigned int current_edid_segment;
uint8_t edid_buf[256];
@@ -336,6 +338,7 @@ struct adv7511 {
bool use_timing_gen;
enum adv7511_type type;
+ struct platform_device *audio_pdev;
};
#ifdef CONFIG_DRM_I2C_ADV7533
@@ -391,4 +394,17 @@ static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
}
#endif
+#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
+int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
+void adv7511_audio_exit(struct adv7511 *adv7511);
+#else /*CONFIG_DRM_I2C_ADV7511_AUDIO */
+static inline int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
+{
+ return 0;
+}
+static inline void adv7511_audio_exit(struct adv7511 *adv7511)
+{
+}
+#endif /* CONFIG_DRM_I2C_ADV7511_AUDIO */
+
#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
new file mode 100644
index 000000000000..5ce29a5a8b09
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -0,0 +1,213 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Copyright (c) 2016, Linaro Limited
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <sound/core.h>
+#include <sound/hdmi-codec.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#include "adv7511.h"
+
+static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
+ unsigned int *cts, unsigned int *n)
+{
+ switch (fs) {
+ case 32000:
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ *n = 6144;
+ break;
+ }
+
+ *cts = ((f_tmds * *n) / (128 * fs)) * 1000;
+}
+
+static int adv7511_update_cts_n(struct adv7511 *adv7511)
+{
+ unsigned int cts = 0;
+ unsigned int n = 0;
+
+ adv7511_calc_cts_n(adv7511->f_tmds, adv7511->f_audio, &cts, &n);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_N0, (n >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_N1, (n >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_N2, n & 0xff);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL0,
+ (cts >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL1,
+ (cts >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL2,
+ cts & 0xff);
+
+ return 0;
+}
+
+int adv7511_hdmi_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ unsigned int audio_source, i2s_format = 0;
+ unsigned int invert_clock;
+ unsigned int rate;
+ unsigned int len;
+
+ switch (hparms->sample_rate) {
+ case 32000:
+ rate = ADV7511_SAMPLE_FREQ_32000;
+ break;
+ case 44100:
+ rate = ADV7511_SAMPLE_FREQ_44100;
+ break;
+ case 48000:
+ rate = ADV7511_SAMPLE_FREQ_48000;
+ break;
+ case 88200:
+ rate = ADV7511_SAMPLE_FREQ_88200;
+ break;
+ case 96000:
+ rate = ADV7511_SAMPLE_FREQ_96000;
+ break;
+ case 176400:
+ rate = ADV7511_SAMPLE_FREQ_176400;
+ break;
+ case 192000:
+ rate = ADV7511_SAMPLE_FREQ_192000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (hparms->sample_width) {
+ case 16:
+ len = ADV7511_I2S_SAMPLE_LEN_16;
+ break;
+ case 18:
+ len = ADV7511_I2S_SAMPLE_LEN_18;
+ break;
+ case 20:
+ len = ADV7511_I2S_SAMPLE_LEN_20;
+ break;
+ case 24:
+ len = ADV7511_I2S_SAMPLE_LEN_24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt->fmt) {
+ case HDMI_I2S:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_I2S;
+ break;
+ case HDMI_RIGHT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_RIGHT_J;
+ break;
+ case HDMI_LEFT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ invert_clock = fmt->bit_clk_inv;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_SOURCE, 0x70,
+ audio_source << 4);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(6),
+ invert_clock << 6);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2S_CONFIG, 0x03,
+ i2s_format);
+
+ adv7511->audio_source = audio_source;
+
+ adv7511->f_audio = hparms->sample_rate;
+
+ adv7511_update_cts_n(adv7511);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG3,
+ ADV7511_AUDIO_CFG3_LEN_MASK, len);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
+ ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
+ regmap_write(adv7511->regmap, 0x73, 0x1);
+
+ return 0;
+}
+
+static int audio_startup(struct device *dev, void *data)
+{
+ struct adv7511 *adv7511 = dev_get_drvdata(dev);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), 0);
+
+ /* hide Audio infoframe updates */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), BIT(5));
+ /* enable N/CTS, enable Audio sample packets */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ BIT(5), BIT(5));
+ /* enable N/CTS */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ BIT(6), BIT(6));
+ /* not copyrighted */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG1,
+ BIT(5), BIT(5));
+ /* enable audio infoframes */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ BIT(3), BIT(3));
+ /* AV mute disable */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
+ BIT(7) | BIT(6), BIT(7));
+ /* use Audio infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
+ BIT(5), 0);
+ return 0;
+}
+
+static void audio_shutdown(struct device *dev, void *data)
+{
+}
+
+static const struct hdmi_codec_ops adv7511_codec_ops = {
+ .hw_params = adv7511_hdmi_hw_params,
+ .audio_shutdown = audio_shutdown,
+ .audio_startup = audio_startup,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+ .ops = &adv7511_codec_ops,
+ .max_i2s_channels = 2,
+ .i2s = 1,
+};
+
+int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
+{
+ adv7511->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ return PTR_ERR_OR_ZERO(adv7511->audio_pdev);
+}
+
+void adv7511_audio_exit(struct adv7511 *adv7511)
+{
+ if (adv7511->audio_pdev) {
+ platform_device_unregister(adv7511->audio_pdev);
+ adv7511->audio_pdev = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index a68f94daf9b6..19e1c261ca61 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -338,7 +338,7 @@ static void __adv7511_power_on(struct adv7511 *adv7511)
* Still, let's be safe and stick to the documentation.
*/
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
- ADV7511_INT0_EDID_READY);
+ ADV7511_INT0_EDID_READY | ADV7511_INT0_HPD);
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
ADV7511_INT1_DDC_ERROR);
}
@@ -354,6 +354,7 @@ static void __adv7511_power_on(struct adv7511 *adv7511)
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
ADV7511_REG_POWER2_HPD_SRC_MASK,
ADV7511_REG_POWER2_HPD_SRC_NONE);
+ msleep(200);
}
static void adv7511_power_on(struct adv7511 *adv7511)
@@ -647,8 +648,40 @@ static int adv7511_mode_valid(struct adv7511 *adv7511,
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
-
- return MODE_OK;
+ /*
+ * some work well modes which want to put in the front of the mode list.
+ */
+ DRM_DEBUG("Checking mode %ix%i@%i clock: %i...",
+ mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode), mode->clock);
+ if ((mode->hdisplay == 1920 && mode->vdisplay == 1080 && mode->clock == 148500) ||
+ (mode->hdisplay == 1920 && mode->vdisplay == 1080 && mode->clock == 148352) ||
+ (mode->hdisplay == 1920 && mode->vdisplay == 1080 && mode->clock == 80192) ||
+ (mode->hdisplay == 1920 && mode->vdisplay == 1080 && mode->clock == 74250) ||
+ (mode->hdisplay == 1920 && mode->vdisplay == 1080 && mode->clock == 61855) ||
+ (mode->hdisplay == 1680 && mode->vdisplay == 1050 && mode->clock == 147116) ||
+ (mode->hdisplay == 1680 && mode->vdisplay == 1050 && mode->clock == 146250) ||
+ (mode->hdisplay == 1680 && mode->vdisplay == 1050 && mode->clock == 144589) ||
+ (mode->hdisplay == 1600 && mode->vdisplay == 1200 && mode->clock == 160961) ||
+ (mode->hdisplay == 1600 && mode->vdisplay == 900 && mode->clock == 118963) ||
+ (mode->hdisplay == 1440 && mode->vdisplay == 900 && mode->clock == 126991) ||
+ (mode->hdisplay == 1280 && mode->vdisplay == 1024 && mode->clock == 128946) ||
+ (mode->hdisplay == 1280 && mode->vdisplay == 1024 && mode->clock == 98619) ||
+ (mode->hdisplay == 1280 && mode->vdisplay == 960 && mode->clock == 102081) ||
+ (mode->hdisplay == 1280 && mode->vdisplay == 800 && mode->clock == 83496) ||
+ (mode->hdisplay == 1280 && mode->vdisplay == 720 && mode->clock == 74440) ||
+ (mode->hdisplay == 1280 && mode->vdisplay == 720 && mode->clock == 74250) ||
+ (mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 78800) ||
+ (mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 75000) ||
+ (mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 81833) ||
+ (mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 48907) ||
+ (mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000) ||
+ (mode->hdisplay == 800 && mode->vdisplay == 480 && mode->clock == 32000)) {
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG("OK\n");
+ return MODE_OK;
+ }
+ DRM_DEBUG("BAD\n");
+ return MODE_BAD;
}
static void adv7511_mode_set(struct adv7511 *adv7511,
@@ -850,6 +883,10 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
if (adv->type == ADV7533)
ret = adv7533_attach_dsi(adv);
+ if (adv->i2c_main->irq)
+ regmap_write(adv->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_HPD);
+
return ret;
}
@@ -1064,6 +1101,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
goto err_unregister_cec;
}
+ adv7511_audio_init(dev, adv7511);
+
return 0;
err_unregister_cec:
@@ -1085,6 +1124,8 @@ static int adv7511_remove(struct i2c_client *i2c)
drm_bridge_remove(&adv7511->bridge);
+ adv7511_audio_exit(adv7511);
+
i2c_unregister_device(adv7511->i2c_edid);
kfree(adv7511->edid);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index d7f7b7ce8ebe..8b210373cfa2 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -29,6 +29,7 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
{ 0x17, 0xd0 },
{ 0x24, 0x20 },
{ 0x57, 0x11 },
+ { 0x05, 0xc8 },
};
static const struct regmap_config adv7533_cec_regmap_config = {
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 52629b62b002..a7ac41fcc44e 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -30,6 +30,11 @@
#include <linux/reservation.h>
#define DEFAULT_FBDEFIO_DELAY_MS 50
+#ifdef CONFIG_DRM_CMA_FBDEV_BUFFER_NUM
+#define FBDEV_BUFFER_NUM CONFIG_DRM_CMA_FBDEV_BUFFER_NUM
+#else
+#define FBDEV_BUFFER_NUM 1
+#endif
struct drm_fb_cma {
struct drm_framebuffer fb;
@@ -438,7 +443,7 @@ int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
+ mode_cmd.height = sizes->surface_height * FBDEV_BUFFER_NUM;
mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
diff --git a/drivers/gpu/drm/hisilicon/Kconfig b/drivers/gpu/drm/hisilicon/Kconfig
index 558c61b1b8e8..b03095bbf01f 100644
--- a/drivers/gpu/drm/hisilicon/Kconfig
+++ b/drivers/gpu/drm/hisilicon/Kconfig
@@ -3,3 +3,5 @@
# Please keep this list sorted alphabetically
source "drivers/gpu/drm/hisilicon/kirin/Kconfig"
+
+source "drivers/gpu/drm/hisilicon/kirin960/Kconfig"
diff --git a/drivers/gpu/drm/hisilicon/Makefile b/drivers/gpu/drm/hisilicon/Makefile
index e3f6d493c996..5c0fee460c6d 100644
--- a/drivers/gpu/drm/hisilicon/Makefile
+++ b/drivers/gpu/drm/hisilicon/Makefile
@@ -3,3 +3,4 @@
# Please keep this list sorted alphabetically
obj-$(CONFIG_DRM_HISI_KIRIN) += kirin/
+obj-$(CONFIG_DRM_KIRIN_960) += kirin960/
diff --git a/drivers/gpu/drm/hisilicon/kirin960/Kconfig b/drivers/gpu/drm/hisilicon/kirin960/Kconfig
new file mode 100644
index 000000000000..5ac1f65724fb
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/Kconfig
@@ -0,0 +1,39 @@
+config DRM_HISI_KIRIN
+ tristate "DRM Support for Hisilicon Kirin series SoCs Platform"
+ depends on DRM && OF && ARM64
+ select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_CMA_HELPER
+ select HISI_KIRIN_DW_DSI
+ help
+ Choose this option if you have a hisilicon Kirin chipsets(hi6220).
+ If M is selected the module will be called kirin-drm.
+
+config DRM_KIRIN_960
+ tristate "DRM Support for Hisilicon Kirin960 series SoCs Platform"
+ depends on DRM && OF && ARM64
+ select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_CMA_HELPER
+ select HISI_KIRIN_DW_DSI
+ help
+ Choose this option if you have a hisilicon Kirin chipsets(kirin960).
+ If M is selected the module will be called kirin-drm.
+
+config HISI_KIRIN_DW_DSI
+ tristate "HiSilicon Kirin specific extensions for Synopsys DW MIPI DSI"
+ depends on DRM_HISI_KIRIN || DRM_KIRIN_960
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ help
+ This selects support for HiSilicon Kirin SoC specific extensions for
+ the Synopsys DesignWare DSI driver. If you want to enable MIPI DSI on
+ hi6220 based SoC, you should selet this option.
+
+config DRM_PANEL_HIKEY960_NTE300NTS
+ tristate "Hikey960 NTE300NTS video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ help
+ Say Y here if you want to enable LCD panel driver for Hikey960 boadr.
+ Current support panel: NTE300NTS(1920X1200)
diff --git a/drivers/gpu/drm/hisilicon/kirin960/Makefile b/drivers/gpu/drm/hisilicon/kirin960/Makefile
new file mode 100644
index 000000000000..42d1ed179264
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/Makefile
@@ -0,0 +1,12 @@
+EXTRA_CFLAGS += \
+ -Iinclude/drm
+
+kirin-drm-y := kirin_fbdev.o \
+ kirin_fb.o \
+ kirin_drm_drv.o \
+ kirin_drm_dss.o \
+ kirin_drm_dpe_utils.o \
+ kirin_drm_overlay_utils.o \
+
+obj-$(CONFIG_DRM_KIRIN_960) += kirin-drm.o
+obj-$(CONFIG_HISI_KIRIN_DW_DSI) += dw_drm_dsi.o
diff --git a/drivers/gpu/drm/hisilicon/kirin960/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin960/dw_drm_dsi.c
new file mode 100644
index 000000000000..db408beb33ec
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/dw_drm_dsi.c
@@ -0,0 +1,1649 @@
+/*
+ * DesignWare MIPI DSI Host Controller v1.02 driver
+ *
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * Author:
+ * <shizongxuan@huawei.com>
+ * <zhangxiubin@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+#include <linux/iopoll.h>
+#include <video/mipi_display.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_address.h>
+
+#include <drm/drm_of.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_panel.h>
+
+#include "dw_dsi_reg.h"
+#include "kirin_dpe_reg.h"
+#include "kirin_drm_dpe_utils.h"
+
+#define DTS_COMP_DSI_NAME "hisilicon,hi3660-dsi"
+
+#define ROUND(x, y) ((x) / (y) + \
+ ((x) % (y) * 10 / (y) >= 5 ? 1 : 0))
+#define ROUND1(x, y) ((x) / (y) + ((x) % (y) ? 1 : 0))
+#define PHY_REF_CLK_RATE 19200000
+#define PHY_REF_CLK_PERIOD_PS (1000000000 / (PHY_REF_CLK_RATE / 1000))
+
+#define encoder_to_dsi(encoder) \
+ container_of(encoder, struct dw_dsi, encoder)
+#define host_to_dsi(host) \
+ container_of(host, struct dw_dsi, host)
+#define connector_to_dsi(connector) \
+ container_of(connector, struct dw_dsi, connector)
+#define DSS_REDUCE(x) ((x) > 0 ? ((x) - 1) : (x))
+
+enum dsi_output_client {
+ OUT_HDMI = 0,
+ OUT_PANEL,
+ OUT_MAX
+};
+
+struct mipi_phy_params {
+ u64 lane_byte_clk;
+ u32 clk_division;
+
+ u32 clk_lane_lp2hs_time;
+ u32 clk_lane_hs2lp_time;
+ u32 data_lane_lp2hs_time;
+ u32 data_lane_hs2lp_time;
+ u32 clk2data_delay;
+ u32 data2clk_delay;
+
+ u32 clk_pre_delay;
+ u32 clk_post_delay;
+ u32 clk_t_lpx;
+ u32 clk_t_hs_prepare;
+ u32 clk_t_hs_zero;
+ u32 clk_t_hs_trial;
+ u32 clk_t_wakeup;
+ u32 data_pre_delay;
+ u32 data_post_delay;
+ u32 data_t_lpx;
+ u32 data_t_hs_prepare;
+ u32 data_t_hs_zero;
+ u32 data_t_hs_trial;
+ u32 data_t_ta_go;
+ u32 data_t_ta_get;
+ u32 data_t_wakeup;
+
+ u32 phy_stop_wait_time;
+
+ u32 rg_vrefsel_vcm;
+ u32 rg_hstx_ckg_sel;
+ u32 rg_pll_fbd_div5f;
+ u32 rg_pll_fbd_div1f;
+ u32 rg_pll_fbd_2p;
+ u32 rg_pll_enbwt;
+ u32 rg_pll_fbd_p;
+ u32 rg_pll_fbd_s;
+ u32 rg_pll_pre_div1p;
+ u32 rg_pll_pre_p;
+ u32 rg_pll_vco_750m;
+ u32 rg_pll_lpf_rs;
+ u32 rg_pll_lpf_cs;
+ u32 rg_pll_enswc;
+ u32 rg_pll_chp;
+
+ u32 pll_register_override; /*0x1E[0]*/
+ u32 pll_power_down; /*0x1E[1]*/
+ u32 rg_band_sel; /*0x1E[2]*/
+ u32 rg_phase_gen_en; /*0x1E[3]*/
+ u32 reload_sel; /*0x1E[4]*/
+ u32 rg_pll_cp_p; /*0x1E[7:5]*/
+ u32 rg_pll_refsel; /*0x16[1:0]*/
+ u32 rg_pll_cp; /*0x16[7:5]*/
+ u32 load_command;
+};
+
+struct dsi_hw_ctx {
+ void __iomem *base;
+ char __iomem *peri_crg_base;
+
+ struct clk *dss_dphy0_ref_clk;
+ struct clk *dss_dphy1_ref_clk;
+ struct clk *dss_dphy0_cfg_clk;
+ struct clk *dss_dphy1_cfg_clk;
+ struct clk *dss_pclk_dsi0_clk;
+ struct clk *dss_pclk_dsi1_clk;
+};
+
+struct dw_dsi_client {
+ u32 lanes;
+ u32 phy_clock; /* in kHz */
+ enum mipi_dsi_pixel_format format;
+ unsigned long mode_flags;
+};
+
+struct mipi_panel_info {
+ u8 dsi_version;
+ u8 vc;
+ u8 lane_nums;
+ u8 lane_nums_select_support;
+ u8 color_mode;
+ u32 dsi_bit_clk; /* clock lane(p/n) */
+ u32 burst_mode;
+ u32 max_tx_esc_clk;
+ u8 non_continue_en;
+
+ u32 dsi_bit_clk_val1;
+ u32 dsi_bit_clk_val2;
+ u32 dsi_bit_clk_val3;
+ u32 dsi_bit_clk_val4;
+ u32 dsi_bit_clk_val5;
+ u32 dsi_bit_clk_upt;
+ /*uint32_t dsi_pclk_rate;*/
+
+ u32 hs_wr_to_time;
+
+ /* dphy config parameter adjust*/
+ u32 clk_post_adjust;
+ u32 clk_pre_adjust;
+ u32 clk_pre_delay_adjust;
+ u32 clk_t_hs_exit_adjust;
+ u32 clk_t_hs_trial_adjust;
+ u32 clk_t_hs_prepare_adjust;
+ int clk_t_lpx_adjust;
+ u32 clk_t_hs_zero_adjust;
+ u32 data_post_delay_adjust;
+ int data_t_lpx_adjust;
+ u32 data_t_hs_prepare_adjust;
+ u32 data_t_hs_zero_adjust;
+ u32 data_t_hs_trial_adjust;
+ u32 rg_vrefsel_vcm_adjust;
+
+ /*only for Chicago<3660> use*/
+ u32 rg_vrefsel_vcm_clk_adjust;
+ u32 rg_vrefsel_vcm_data_adjust;
+};
+
+struct ldi_panel_info {
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 h_pulse_width;
+
+ /*
+ ** note: vbp > 8 if used overlay compose,
+ ** also lcd vbp > 8 in lcd power on sequence
+ */
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 v_pulse_width;
+
+ u8 hsync_plr;
+ u8 vsync_plr;
+ u8 pixelclk_plr;
+ u8 data_en_plr;
+
+ /* for cabc */
+ u8 dpi0_overlap_size;
+ u8 dpi1_overlap_size;
+};
+
+struct dw_dsi {
+ struct drm_encoder encoder;
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ struct mipi_dsi_host host;
+ struct drm_connector connector; /* connector for panel */
+ struct drm_display_mode cur_mode;
+ struct dsi_hw_ctx *ctx;
+ struct mipi_phy_params phy;
+ struct mipi_panel_info mipi;
+ struct ldi_panel_info ldi;
+ u32 lanes;
+ enum mipi_dsi_pixel_format format;
+ unsigned long mode_flags;
+ struct gpio_desc *gpio_mux;
+ struct dw_dsi_client client[OUT_MAX];
+ enum dsi_output_client cur_client;
+ bool enable;
+};
+
+struct dsi_data {
+ struct dw_dsi dsi;
+ struct dsi_hw_ctx ctx;
+};
+
+struct dsi_phy_range {
+ u32 min_range_kHz;
+ u32 max_range_kHz;
+ u32 pll_vco_750M;
+ u32 hstx_ckg_sel;
+};
+
+static const struct dsi_phy_range dphy_range_info[] = {
+ { 46875, 62500, 1, 7 },
+ { 62500, 93750, 0, 7 },
+ { 93750, 125000, 1, 6 },
+ { 125000, 187500, 0, 6 },
+ { 187500, 250000, 1, 5 },
+ { 250000, 375000, 0, 5 },
+ { 375000, 500000, 1, 4 },
+ { 500000, 750000, 0, 4 },
+ { 750000, 1000000, 1, 0 },
+ { 1000000, 1500000, 0, 0 }
+};
+
+void dsi_set_output_client(struct drm_device *dev)
+{
+ enum dsi_output_client client;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct dw_dsi *dsi;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /* find dsi encoder */
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DSI)
+ break;
+ dsi = encoder_to_dsi(encoder);
+
+ /* find HDMI connector */
+ drm_for_each_connector(connector, dev)
+ if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA)
+ break;
+
+ /*
+ * set the proper dsi output client
+ */
+ client = connector->status == connector_status_connected ?
+ OUT_HDMI : OUT_PANEL;
+ if (client != dsi->cur_client) {
+ /* associate bridge and dsi encoder */
+ if (client == OUT_HDMI)
+ encoder->bridge = dsi->bridge;
+ else
+ encoder->bridge = NULL;
+
+ gpiod_set_value_cansleep(dsi->gpio_mux, client);
+ dsi->cur_client = client;
+ /* let the userspace know panel connector status has changed */
+ drm_sysfs_hotplug_event(dev);
+ DRM_INFO("client change to %s\n", client == OUT_HDMI ?
+ "HDMI" : "panel");
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(dsi_set_output_client);
+
+static void get_dsi_phy_ctrl(struct dw_dsi *dsi,
+ struct mipi_phy_params *phy_ctrl)
+{
+ struct mipi_panel_info *mipi = NULL;
+ struct drm_display_mode *mode = NULL;
+ u32 dphy_req_kHz;
+ int bpp;
+ u32 id = 0;
+ u32 ui = 0;
+ u32 m_pll = 0;
+ u32 n_pll = 0;
+ u32 m_n_fract = 0;
+ u32 m_n_int = 0;
+ u64 lane_clock = 0;
+ u64 vco_div = 1;
+
+ u32 accuracy = 0;
+ u32 unit_tx_byte_clk_hs = 0;
+ u32 clk_post = 0;
+ u32 clk_pre = 0;
+ u32 clk_t_hs_exit = 0;
+ u32 clk_pre_delay = 0;
+ u32 clk_t_hs_prepare = 0;
+ u32 clk_t_lpx = 0;
+ u32 clk_t_hs_zero = 0;
+ u32 clk_t_hs_trial = 0;
+ u32 data_post_delay = 0;
+ u32 data_t_hs_prepare = 0;
+ u32 data_t_hs_zero = 0;
+ u32 data_t_hs_trial = 0;
+ u32 data_t_lpx = 0;
+ u32 clk_pre_delay_reality = 0;
+ u32 clk_t_hs_zero_reality = 0;
+ u32 clk_post_delay_reality = 0;
+ u32 data_t_hs_zero_reality = 0;
+ u32 data_post_delay_reality = 0;
+ u32 data_pre_delay_reality = 0;
+
+ WARN_ON(!phy_ctrl);
+ WARN_ON(!dsi);
+
+ id = dsi->cur_client;
+ mode = &dsi->cur_mode;
+ mipi = &dsi->mipi;
+
+ /*
+ * count phy params
+ */
+ bpp = mipi_dsi_pixel_format_to_bpp(dsi->client[id].format);
+ if (bpp < 0)
+ return;
+ if (mode->clock > 80000)
+ dsi->client[id].lanes = 4;
+ else
+ dsi->client[id].lanes = 3;
+ if (dsi->client[id].phy_clock)
+ dphy_req_kHz = dsi->client[id].phy_clock;
+ else
+ dphy_req_kHz = mode->clock * bpp / dsi->client[id].lanes;
+
+ lane_clock = dphy_req_kHz / 1000;
+ DRM_INFO("Expected : lane_clock = %llu M\n", lane_clock);
+
+ /************************ PLL parameters config *********************/
+ /*chip spec :
+ If the output data rate is below 320 Mbps,
+ RG_BNAD_SEL should be set to 1.
+ At this mode a post divider of 1/4 will be applied to VCO.
+ */
+ if ((320 <= lane_clock) && (lane_clock <= 2500)) {
+ phy_ctrl->rg_band_sel = 0; /*0x1E[2]*/
+ vco_div = 1;
+ } else if ((80 <= lane_clock) && (lane_clock < 320)) {
+ phy_ctrl->rg_band_sel = 1;
+ vco_div = 4;
+ } else {
+ DRM_ERROR("80M <= lane_clock< = 2500M, not support lane_clock = %llu M\n",
+ lane_clock);
+ }
+
+ m_n_int = lane_clock * vco_div * 1000000UL / DEFAULT_MIPI_CLK_RATE;
+ m_n_fract = ((lane_clock * vco_div * 1000000UL * 1000UL / DEFAULT_MIPI_CLK_RATE) % 1000) * 10 / 1000;
+
+ if (m_n_int % 2 == 0) {
+ if (m_n_fract * 6 >= 50) {
+ n_pll = 2;
+ m_pll = (m_n_int + 1) * n_pll;
+ } else if (m_n_fract * 6 >= 30) {
+ n_pll = 3;
+ m_pll = m_n_int * n_pll + 2;
+ } else {
+ n_pll = 1;
+ m_pll = m_n_int * n_pll;
+ }
+ } else {
+ if (m_n_fract * 6 >= 50) {
+ n_pll = 1;
+ m_pll = (m_n_int + 1) * n_pll;
+ } else if (m_n_fract * 6 >= 30) {
+ n_pll = 1;
+ m_pll = (m_n_int + 1) * n_pll;
+ } else if (m_n_fract * 6 >= 10) {
+ n_pll = 3;
+ m_pll = m_n_int * n_pll + 1;
+ } else {
+ n_pll = 2;
+ m_pll = m_n_int * n_pll;
+ }
+ }
+
+ /*if set rg_pll_enswc=1, rg_pll_fbd_s can't be 0*/
+ if (m_pll <= 8) {
+ phy_ctrl->rg_pll_fbd_s = 1;
+ phy_ctrl->rg_pll_enswc = 0;
+
+ if (m_pll % 2 == 0) {
+ phy_ctrl->rg_pll_fbd_p = m_pll / 2;
+ } else {
+ if (n_pll == 1) {
+ n_pll *= 2;
+ phy_ctrl->rg_pll_fbd_p = (m_pll * 2) / 2;
+ } else {
+ DRM_ERROR("phy m_pll not support!m_pll = %d\n", m_pll);
+ return;
+ }
+ }
+ } else if (m_pll <= 300) {
+ if (m_pll % 2 == 0)
+ phy_ctrl->rg_pll_enswc = 0;
+ else
+ phy_ctrl->rg_pll_enswc = 1;
+
+ phy_ctrl->rg_pll_fbd_s = 1;
+ phy_ctrl->rg_pll_fbd_p = m_pll / 2;
+ } else if (m_pll <= 315) {
+ phy_ctrl->rg_pll_fbd_p = 150;
+ phy_ctrl->rg_pll_fbd_s = m_pll - 2 * phy_ctrl->rg_pll_fbd_p;
+ phy_ctrl->rg_pll_enswc = 1;
+ } else {
+ DRM_ERROR("phy m_pll not support!m_pll = %d\n", m_pll);
+ return;
+ }
+
+ phy_ctrl->rg_pll_pre_p = n_pll;
+
+ lane_clock = m_pll * (DEFAULT_MIPI_CLK_RATE / n_pll) / vco_div;
+ DRM_INFO("Config : lane_clock = %llu\n", lane_clock);
+
+ /*FIXME :*/
+ phy_ctrl->rg_pll_cp = 1; /*0x16[7:5]*/
+ phy_ctrl->rg_pll_cp_p = 3; /*0x1E[7:5]*/
+
+ /*test_code_0x14 other parameters config*/
+ phy_ctrl->rg_pll_enbwt = 0; /*0x14[2]*/
+ phy_ctrl->rg_pll_chp = 0; /*0x14[1:0]*/
+
+ /*test_code_0x16 other parameters config, 0x16[3:2] reserved*/
+ phy_ctrl->rg_pll_lpf_cs = 0; /*0x16[4]*/
+ phy_ctrl->rg_pll_refsel = 1; /*0x16[1:0]*/
+
+ /*test_code_0x1E other parameters config*/
+ phy_ctrl->reload_sel = 1; /*0x1E[4]*/
+ phy_ctrl->rg_phase_gen_en = 1; /*0x1E[3]*/
+ phy_ctrl->pll_power_down = 0; /*0x1E[1]*/
+ phy_ctrl->pll_register_override = 1; /*0x1E[0]*/
+
+ /*HSTX select VCM VREF*/
+ phy_ctrl->rg_vrefsel_vcm = 0x55;
+ if (mipi->rg_vrefsel_vcm_clk_adjust != 0)
+ phy_ctrl->rg_vrefsel_vcm = (phy_ctrl->rg_vrefsel_vcm & 0x0F) |
+ ((mipi->rg_vrefsel_vcm_clk_adjust & 0x0F) << 4);
+
+ if (mipi->rg_vrefsel_vcm_data_adjust != 0)
+ phy_ctrl->rg_vrefsel_vcm = (phy_ctrl->rg_vrefsel_vcm & 0xF0) |
+ (mipi->rg_vrefsel_vcm_data_adjust & 0x0F);
+
+ /*if reload_sel = 1, need to set load_command*/
+ phy_ctrl->load_command = 0x5A;
+
+ /******************** clock/data lane parameters config ******************/
+ accuracy = 10;
+ ui = 10 * 1000000000UL * accuracy / lane_clock;
+ /*unit of measurement*/
+ unit_tx_byte_clk_hs = 8 * ui;
+
+ /* D-PHY Specification : 60ns + 52*UI <= clk_post*/
+ clk_post = 600 * accuracy + 52 * ui + mipi->clk_post_adjust * ui;
+
+ /* D-PHY Specification : clk_pre >= 8*UI*/
+ clk_pre = 8 * ui + mipi->clk_pre_adjust * ui;
+
+ /* D-PHY Specification : clk_t_hs_exit >= 100ns*/
+ clk_t_hs_exit = 1000 * accuracy + mipi->clk_t_hs_exit_adjust * ui;
+
+ /* clocked by TXBYTECLKHS*/
+ clk_pre_delay = 0 + mipi->clk_pre_delay_adjust * ui;
+
+ /* D-PHY Specification : clk_t_hs_trial >= 60ns*/
+ /* clocked by TXBYTECLKHS*/
+ clk_t_hs_trial = 600 * accuracy + 3 * unit_tx_byte_clk_hs + mipi->clk_t_hs_trial_adjust * ui;
+
+ /* D-PHY Specification : 38ns <= clk_t_hs_prepare <= 95ns*/
+ /* clocked by TXBYTECLKHS*/
+ if (mipi->clk_t_hs_prepare_adjust == 0)
+ mipi->clk_t_hs_prepare_adjust = 43;
+
+ clk_t_hs_prepare = ((380 * accuracy + mipi->clk_t_hs_prepare_adjust * ui) <= (950 * accuracy - 8 * ui)) ?
+ (380 * accuracy + mipi->clk_t_hs_prepare_adjust * ui) : (950 * accuracy - 8 * ui);
+
+ /* clocked by TXBYTECLKHS*/
+ data_post_delay = 0 + mipi->data_post_delay_adjust * ui;
+
+ /* D-PHY Specification : data_t_hs_trial >= max( n*8*UI, 60ns + n*4*UI ), n = 1*/
+ /* clocked by TXBYTECLKHS*/
+ data_t_hs_trial = ((600 * accuracy + 4 * ui) >= (8 * ui) ? (600 * accuracy + 4 * ui) : (8 * ui)) + 8 * ui +
+ 3 * unit_tx_byte_clk_hs + mipi->data_t_hs_trial_adjust * ui;
+
+ /* D-PHY Specification : 40ns + 4*UI <= data_t_hs_prepare <= 85ns + 6*UI*/
+ /* clocked by TXBYTECLKHS*/
+ if (mipi->data_t_hs_prepare_adjust == 0)
+ mipi->data_t_hs_prepare_adjust = 35;
+
+ data_t_hs_prepare = ((400 * accuracy + 4 * ui + mipi->data_t_hs_prepare_adjust * ui) <= (850 * accuracy + 6 * ui - 8 * ui)) ?
+ (400 * accuracy + 4 * ui + mipi->data_t_hs_prepare_adjust * ui) : (850 * accuracy + 6 * ui - 8 * ui);
+
+ /* D-PHY chip spec : clk_t_lpx + clk_t_hs_prepare > 200ns*/
+ /* D-PHY Specification : clk_t_lpx >= 50ns*/
+ /* clocked by TXBYTECLKHS*/
+ clk_t_lpx = (((2000 * accuracy - clk_t_hs_prepare) >= 500 * accuracy) ?
+ ((2000 * accuracy - clk_t_hs_prepare)) : (500 * accuracy)) +
+ mipi->clk_t_lpx_adjust * ui;
+
+ /* D-PHY Specification : clk_t_hs_zero + clk_t_hs_prepare >= 300 ns*/
+ /* clocked by TXBYTECLKHS*/
+ clk_t_hs_zero = 3000 * accuracy - clk_t_hs_prepare + 3 * unit_tx_byte_clk_hs + mipi->clk_t_hs_zero_adjust * ui;
+
+ /* D-PHY chip spec : data_t_lpx + data_t_hs_prepare > 200ns*/
+ /* D-PHY Specification : data_t_lpx >= 50ns*/
+ /* clocked by TXBYTECLKHS*/
+ data_t_lpx = clk_t_lpx + mipi->data_t_lpx_adjust * ui; /*2000 * accuracy - data_t_hs_prepare;*/
+
+ /* D-PHY Specification : data_t_hs_zero + data_t_hs_prepare >= 145ns + 10*UI*/
+ /* clocked by TXBYTECLKHS*/
+ data_t_hs_zero = 1450 * accuracy + 10 * ui - data_t_hs_prepare +
+ 3 * unit_tx_byte_clk_hs + mipi->data_t_hs_zero_adjust * ui;
+
+ phy_ctrl->clk_pre_delay = ROUND1(clk_pre_delay, unit_tx_byte_clk_hs);
+ phy_ctrl->clk_t_hs_prepare = ROUND1(clk_t_hs_prepare, unit_tx_byte_clk_hs);
+ phy_ctrl->clk_t_lpx = ROUND1(clk_t_lpx, unit_tx_byte_clk_hs);
+ phy_ctrl->clk_t_hs_zero = ROUND1(clk_t_hs_zero, unit_tx_byte_clk_hs);
+ phy_ctrl->clk_t_hs_trial = ROUND1(clk_t_hs_trial, unit_tx_byte_clk_hs);
+
+ phy_ctrl->data_post_delay = ROUND1(data_post_delay, unit_tx_byte_clk_hs);
+ phy_ctrl->data_t_hs_prepare = ROUND1(data_t_hs_prepare, unit_tx_byte_clk_hs);
+ phy_ctrl->data_t_lpx = ROUND1(data_t_lpx, unit_tx_byte_clk_hs);
+ phy_ctrl->data_t_hs_zero = ROUND1(data_t_hs_zero, unit_tx_byte_clk_hs);
+ phy_ctrl->data_t_hs_trial = ROUND1(data_t_hs_trial, unit_tx_byte_clk_hs);
+ phy_ctrl->data_t_ta_go = 4;
+ phy_ctrl->data_t_ta_get = 5;
+
+ clk_pre_delay_reality = phy_ctrl->clk_pre_delay + 2;
+ clk_t_hs_zero_reality = phy_ctrl->clk_t_hs_zero + 8;
+ data_t_hs_zero_reality = phy_ctrl->data_t_hs_zero + 4;
+ data_post_delay_reality = phy_ctrl->data_post_delay + 4;
+
+ phy_ctrl->clk_post_delay = phy_ctrl->data_t_hs_trial + ROUND1(clk_post, unit_tx_byte_clk_hs);
+ phy_ctrl->data_pre_delay = clk_pre_delay_reality + phy_ctrl->clk_t_lpx +
+ phy_ctrl->clk_t_hs_prepare + clk_t_hs_zero_reality + ROUND1(clk_pre, unit_tx_byte_clk_hs) ;
+
+ clk_post_delay_reality = phy_ctrl->clk_post_delay + 4;
+ data_pre_delay_reality = phy_ctrl->data_pre_delay + 2;
+
+ phy_ctrl->clk_lane_lp2hs_time = clk_pre_delay_reality + phy_ctrl->clk_t_lpx +
+ phy_ctrl->clk_t_hs_prepare + clk_t_hs_zero_reality + 3;
+ phy_ctrl->clk_lane_hs2lp_time = clk_post_delay_reality + phy_ctrl->clk_t_hs_trial + 3;
+ phy_ctrl->data_lane_lp2hs_time = data_pre_delay_reality + phy_ctrl->data_t_lpx +
+ phy_ctrl->data_t_hs_prepare + data_t_hs_zero_reality + 3;
+ phy_ctrl->data_lane_hs2lp_time = data_post_delay_reality + phy_ctrl->data_t_hs_trial + 3;
+ phy_ctrl->phy_stop_wait_time = clk_post_delay_reality +
+ phy_ctrl->clk_t_hs_trial + ROUND1(clk_t_hs_exit, unit_tx_byte_clk_hs) -
+ (data_post_delay_reality + phy_ctrl->data_t_hs_trial) + 3;
+
+ phy_ctrl->lane_byte_clk = lane_clock / 8;
+ phy_ctrl->clk_division = (((phy_ctrl->lane_byte_clk / 2) % mipi->max_tx_esc_clk) > 0) ?
+ (phy_ctrl->lane_byte_clk / 2 / mipi->max_tx_esc_clk + 1) :
+ (phy_ctrl->lane_byte_clk / 2 / mipi->max_tx_esc_clk);
+
+ DRM_INFO("PHY clock_lane and data_lane config : \n"
+ "rg_vrefsel_vcm=%u\n"
+ "clk_pre_delay=%u\n"
+ "clk_post_delay=%u\n"
+ "clk_t_hs_prepare=%u\n"
+ "clk_t_lpx=%u\n"
+ "clk_t_hs_zero=%u\n"
+ "clk_t_hs_trial=%u\n"
+ "data_pre_delay=%u\n"
+ "data_post_delay=%u\n"
+ "data_t_hs_prepare=%u\n"
+ "data_t_lpx=%u\n"
+ "data_t_hs_zero=%u\n"
+ "data_t_hs_trial=%u\n"
+ "data_t_ta_go=%u\n"
+ "data_t_ta_get=%u\n",
+ phy_ctrl->rg_vrefsel_vcm,
+ phy_ctrl->clk_pre_delay,
+ phy_ctrl->clk_post_delay,
+ phy_ctrl->clk_t_hs_prepare,
+ phy_ctrl->clk_t_lpx,
+ phy_ctrl->clk_t_hs_zero,
+ phy_ctrl->clk_t_hs_trial,
+ phy_ctrl->data_pre_delay,
+ phy_ctrl->data_post_delay,
+ phy_ctrl->data_t_hs_prepare,
+ phy_ctrl->data_t_lpx,
+ phy_ctrl->data_t_hs_zero,
+ phy_ctrl->data_t_hs_trial,
+ phy_ctrl->data_t_ta_go,
+ phy_ctrl->data_t_ta_get);
+ DRM_INFO("clk_lane_lp2hs_time=%u\n"
+ "clk_lane_hs2lp_time=%u\n"
+ "data_lane_lp2hs_time=%u\n"
+ "data_lane_hs2lp_time=%u\n"
+ "phy_stop_wait_time=%u\n",
+ phy_ctrl->clk_lane_lp2hs_time,
+ phy_ctrl->clk_lane_hs2lp_time,
+ phy_ctrl->data_lane_lp2hs_time,
+ phy_ctrl->data_lane_hs2lp_time,
+ phy_ctrl->phy_stop_wait_time);
+}
+
+static void dw_dsi_set_mode(struct dw_dsi *dsi, enum dsi_work_mode mode)
+{
+ struct dsi_hw_ctx *ctx = dsi->ctx;
+ void __iomem *base = ctx->base;
+
+ writel(RESET, base + PWR_UP);
+ writel(mode, base + MODE_CFG);
+ writel(POWERUP, base + PWR_UP);
+}
+
+static void dsi_set_burst_mode(void __iomem *base, unsigned long flags)
+{
+ u32 val;
+ u32 mode_mask = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ u32 non_burst_sync_pulse = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ u32 non_burst_sync_event = MIPI_DSI_MODE_VIDEO;
+
+ /*
+ * choose video mode type
+ */
+ if ((flags & mode_mask) == non_burst_sync_pulse)
+ val = DSI_NON_BURST_SYNC_PULSES;
+ else if ((flags & mode_mask) == non_burst_sync_event)
+ val = DSI_NON_BURST_SYNC_EVENTS;
+ else
+ val = DSI_BURST_SYNC_PULSES_1;
+
+ set_reg(base + MIPIDSI_VID_MODE_CFG_OFFSET, val, 2, 0);
+}
+
+/*
+ * dsi phy reg write function
+ */
+static void dsi_phy_tst_set(void __iomem *base, u32 reg, u32 val)
+{
+ u32 reg_write = 0x10000 + reg;
+
+ /*
+ * latch reg first
+ */
+ writel(reg_write, base + MIPIDSI_PHY_TST_CTRL1_OFFSET);
+ writel(0x02, base + MIPIDSI_PHY_TST_CTRL0_OFFSET);
+ writel(0x00, base + MIPIDSI_PHY_TST_CTRL0_OFFSET);
+
+ /*
+ * then latch value
+ */
+ writel(val, base + MIPIDSI_PHY_TST_CTRL1_OFFSET);
+ writel(0x02, base + MIPIDSI_PHY_TST_CTRL0_OFFSET);
+ writel(0x00, base + MIPIDSI_PHY_TST_CTRL0_OFFSET);
+}
+
+static void dsi_mipi_init(struct dw_dsi *dsi, char __iomem *mipi_dsi_base)
+{
+ u32 hline_time = 0;
+ u32 hsa_time = 0;
+ u32 hbp_time = 0;
+ u64 pixel_clk = 0;
+ u32 i = 0;
+ u32 id = 0;
+ unsigned long dw_jiffies = 0;
+ u32 tmp = 0;
+ bool is_ready = false;
+ struct mipi_panel_info *mipi = NULL;
+ dss_rect_t rect;
+ u32 cmp_stopstate_val = 0;
+ u32 lanes;
+
+ WARN_ON(!dsi);
+ WARN_ON(!mipi_dsi_base);
+
+ id = dsi->cur_client;
+ mipi = &dsi->mipi;
+
+ if (mipi->max_tx_esc_clk == 0) {
+ DRM_INFO("max_tx_esc_clk is invalid!");
+ mipi->max_tx_esc_clk = DEFAULT_MAX_TX_ESC_CLK;
+ }
+
+ memset(&dsi->phy, 0, sizeof(struct mipi_phy_params));
+ get_dsi_phy_ctrl(dsi, &dsi->phy);
+
+ rect.x = 0;
+ rect.y = 0;
+ rect.w = dsi->cur_mode.hdisplay;
+ rect.h = dsi->cur_mode.vdisplay;
+ lanes = dsi->client[id].lanes - 1;
+ /***************Configure the DPHY start**************/
+
+ set_reg(mipi_dsi_base + MIPIDSI_PHY_IF_CFG_OFFSET, lanes, 2, 0);
+ set_reg(mipi_dsi_base + MIPIDSI_CLKMGR_CFG_OFFSET, dsi->phy.clk_division, 8, 0);
+ set_reg(mipi_dsi_base + MIPIDSI_CLKMGR_CFG_OFFSET, dsi->phy.clk_division, 8, 8);
+
+ outp32(mipi_dsi_base + MIPIDSI_PHY_RSTZ_OFFSET, 0x00000000);
+
+ outp32(mipi_dsi_base + MIPIDSI_PHY_TST_CTRL0_OFFSET, 0x00000000);
+ outp32(mipi_dsi_base + MIPIDSI_PHY_TST_CTRL0_OFFSET, 0x00000001);
+ outp32(mipi_dsi_base + MIPIDSI_PHY_TST_CTRL0_OFFSET, 0x00000000);
+
+ /* physical configuration PLL I*/
+ dsi_phy_tst_set(mipi_dsi_base, 0x14,
+ (dsi->phy.rg_pll_fbd_s << 4) + (dsi->phy.rg_pll_enswc << 3) +
+ (dsi->phy.rg_pll_enbwt << 2) + dsi->phy.rg_pll_chp);
+
+ /* physical configuration PLL II, M*/
+ dsi_phy_tst_set(mipi_dsi_base, 0x15, dsi->phy.rg_pll_fbd_p);
+
+ /* physical configuration PLL III*/
+ dsi_phy_tst_set(mipi_dsi_base, 0x16,
+ (dsi->phy.rg_pll_cp << 5) + (dsi->phy.rg_pll_lpf_cs << 4) +
+ dsi->phy.rg_pll_refsel);
+
+ /* physical configuration PLL IV, N*/
+ dsi_phy_tst_set(mipi_dsi_base, 0x17, dsi->phy.rg_pll_pre_p);
+
+ /* sets the analog characteristic of V reference in D-PHY TX*/
+ dsi_phy_tst_set(mipi_dsi_base, 0x1D, dsi->phy.rg_vrefsel_vcm);
+
+ /* MISC AFE Configuration*/
+ dsi_phy_tst_set(mipi_dsi_base, 0x1E,
+ (dsi->phy.rg_pll_cp_p << 5) + (dsi->phy.reload_sel << 4) +
+ (dsi->phy.rg_phase_gen_en << 3) + (dsi->phy.rg_band_sel << 2) +
+ (dsi->phy.pll_power_down << 1) + dsi->phy.pll_register_override);
+
+ /*reload_command*/
+ dsi_phy_tst_set(mipi_dsi_base, 0x1F, dsi->phy.load_command);
+
+ /* pre_delay of clock lane request setting*/
+ dsi_phy_tst_set(mipi_dsi_base, 0x20, DSS_REDUCE(dsi->phy.clk_pre_delay));
+
+ /* post_delay of clock lane request setting*/
+ dsi_phy_tst_set(mipi_dsi_base, 0x21, DSS_REDUCE(dsi->phy.clk_post_delay));
+
+ /* clock lane timing ctrl - t_lpx*/
+ dsi_phy_tst_set(mipi_dsi_base, 0x22, DSS_REDUCE(dsi->phy.clk_t_lpx));
+
+ /* clock lane timing ctrl - t_hs_prepare*/
+ dsi_phy_tst_set(mipi_dsi_base, 0x23, DSS_REDUCE(dsi->phy.clk_t_hs_prepare));
+
+ /* clock lane timing ctrl - t_hs_zero*/
+ dsi_phy_tst_set(mipi_dsi_base, 0x24, DSS_REDUCE(dsi->phy.clk_t_hs_zero));
+
+ /* clock lane timing ctrl - t_hs_trial*/
+ dsi_phy_tst_set(mipi_dsi_base, 0x25, dsi->phy.clk_t_hs_trial);
+
+ for (i = 0; i <= lanes; i++) {
+ /* data lane pre_delay*/
+ tmp = 0x30 + (i << 4);
+ dsi_phy_tst_set(mipi_dsi_base, tmp, DSS_REDUCE(dsi->phy.data_pre_delay));
+
+ /*data lane post_delay*/
+ tmp = 0x31 + (i << 4);
+ dsi_phy_tst_set(mipi_dsi_base, tmp, DSS_REDUCE(dsi->phy.data_post_delay));
+
+ /* data lane timing ctrl - t_lpx*/
+ dsi_phy_tst_set(mipi_dsi_base, tmp, DSS_REDUCE(dsi->phy.data_t_lpx));
+
+ /* data lane timing ctrl - t_hs_prepare*/
+ tmp = 0x33 + (i << 4);
+ dsi_phy_tst_set(mipi_dsi_base, tmp, DSS_REDUCE(dsi->phy.data_t_hs_prepare));
+
+ /* data lane timing ctrl - t_hs_zero*/
+ tmp = 0x34 + (i << 4);
+ dsi_phy_tst_set(mipi_dsi_base, tmp, DSS_REDUCE(dsi->phy.data_t_hs_zero));
+
+ /* data lane timing ctrl - t_hs_trial*/
+ tmp = 0x35 + (i << 4);
+ dsi_phy_tst_set(mipi_dsi_base, tmp, DSS_REDUCE(dsi->phy.data_t_hs_trial));
+
+ /* data lane timing ctrl - t_ta_go*/
+ tmp = 0x36 + (i << 4);
+ dsi_phy_tst_set(mipi_dsi_base, tmp, DSS_REDUCE(dsi->phy.data_t_ta_go));
+
+ /* data lane timing ctrl - t_ta_get*/
+ tmp = 0x37 + (i << 4);
+ dsi_phy_tst_set(mipi_dsi_base, tmp, DSS_REDUCE(dsi->phy.data_t_ta_get));
+ }
+
+ outp32(mipi_dsi_base + MIPIDSI_PHY_RSTZ_OFFSET, 0x00000007);
+
+ is_ready = false;
+ dw_jiffies = jiffies + HZ / 2;
+ do {
+ tmp = inp32(mipi_dsi_base + MIPIDSI_PHY_STATUS_OFFSET);
+ if ((tmp & 0x00000001) == 0x00000001) {
+ is_ready = true;
+ break;
+ }
+ } while (time_after(dw_jiffies, jiffies));
+
+ if (!is_ready) {
+ DRM_INFO("phylock is not ready!MIPIDSI_PHY_STATUS_OFFSET=0x%x.\n",
+ tmp);
+ }
+
+ if (lanes >= DSI_4_LANES)
+ cmp_stopstate_val = (BIT(4) | BIT(7) | BIT(9) | BIT(11));
+ else if (lanes >= DSI_3_LANES)
+ cmp_stopstate_val = (BIT(4) | BIT(7) | BIT(9));
+ else if (lanes >= DSI_2_LANES)
+ cmp_stopstate_val = (BIT(4) | BIT(7));
+ else
+ cmp_stopstate_val = (BIT(4));
+
+ is_ready = false;
+ dw_jiffies = jiffies + HZ / 2;
+ do {
+ tmp = inp32(mipi_dsi_base + MIPIDSI_PHY_STATUS_OFFSET);
+ if ((tmp & cmp_stopstate_val) == cmp_stopstate_val) {
+ is_ready = true;
+ break;
+ }
+ } while (time_after(dw_jiffies, jiffies));
+
+ if (!is_ready) {
+ DRM_INFO("phystopstateclklane is not ready! MIPIDSI_PHY_STATUS_OFFSET=0x%x.\n",
+ tmp);
+ }
+
+ /*************************Configure the DPHY end*************************/
+
+ /* phy_stop_wait_time*/
+ set_reg(mipi_dsi_base + MIPIDSI_PHY_IF_CFG_OFFSET, dsi->phy.phy_stop_wait_time, 8, 8);
+
+ /*--------------configuring the DPI packet transmission----------------*/
+ /*
+ ** 2. Configure the DPI Interface:
+ ** This defines how the DPI interface interacts with the controller.
+ */
+ set_reg(mipi_dsi_base + MIPIDSI_DPI_VCID_OFFSET, mipi->vc, 2, 0);
+ set_reg(mipi_dsi_base + MIPIDSI_DPI_COLOR_CODING_OFFSET, mipi->color_mode, 4, 0);
+
+ set_reg(mipi_dsi_base + MIPIDSI_DPI_CFG_POL_OFFSET, dsi->ldi.data_en_plr, 1, 0);
+ set_reg(mipi_dsi_base + MIPIDSI_DPI_CFG_POL_OFFSET, dsi->ldi.vsync_plr, 1, 1);
+ set_reg(mipi_dsi_base + MIPIDSI_DPI_CFG_POL_OFFSET, dsi->ldi.hsync_plr, 1, 2);
+ set_reg(mipi_dsi_base + MIPIDSI_DPI_CFG_POL_OFFSET, 0x0, 1, 3);
+ set_reg(mipi_dsi_base + MIPIDSI_DPI_CFG_POL_OFFSET, 0x0, 1, 4);
+
+ /*
+ ** 3. Select the Video Transmission Mode:
+ ** This defines how the processor requires the video line to be
+ ** transported through the DSI link.
+ */
+ /* video mode: low power mode*/
+ set_reg(mipi_dsi_base + MIPIDSI_VID_MODE_CFG_OFFSET, 0x3f, 6, 8);
+ /* set_reg(mipi_dsi_base + MIPIDSI_VID_MODE_CFG_OFFSET, 0x0, 1, 14); */
+
+ /* TODO: fix blank display bug when set backlight*/
+ set_reg(mipi_dsi_base + MIPIDSI_DPI_LP_CMD_TIM_OFFSET, 0x4, 8, 16);
+ /* video mode: send read cmd by lp mode*/
+ set_reg(mipi_dsi_base + MIPIDSI_VID_MODE_CFG_OFFSET, 0x1, 1, 15);
+
+ set_reg(mipi_dsi_base + MIPIDSI_VID_PKT_SIZE_OFFSET, rect.w, 14, 0);
+
+ /* burst mode*/
+ dsi_set_burst_mode(mipi_dsi_base, dsi->client[id].mode_flags);
+ /* for dsi read, BTA enable*/
+ set_reg(mipi_dsi_base + MIPIDSI_PCKHDL_CFG_OFFSET, 0x1, 1, 2);
+
+ /*
+ ** 4. Define the DPI Horizontal timing configuration:
+ **
+ ** Hsa_time = HSA*(PCLK period/Clk Lane Byte Period);
+ ** Hbp_time = HBP*(PCLK period/Clk Lane Byte Period);
+ ** Hline_time = (HSA+HBP+HACT+HFP)*(PCLK period/Clk Lane Byte Period);
+ */
+ pixel_clk = dsi->cur_mode.clock * 1000;
+ /*htot = dsi->cur_mode.htotal;*/
+ /*vtot = dsi->cur_mode.vtotal;*/
+ dsi->ldi.h_front_porch = dsi->cur_mode.hsync_start - dsi->cur_mode.hdisplay;
+ dsi->ldi.h_back_porch = dsi->cur_mode.htotal - dsi->cur_mode.hsync_end;
+ dsi->ldi.h_pulse_width = dsi->cur_mode.hsync_end - dsi->cur_mode.hsync_start;
+ dsi->ldi.v_front_porch = dsi->cur_mode.vsync_start - dsi->cur_mode.vdisplay;
+ dsi->ldi.v_back_porch = dsi->cur_mode.vtotal - dsi->cur_mode.vsync_end;
+ dsi->ldi.v_pulse_width = dsi->cur_mode.vsync_end - dsi->cur_mode.vsync_start;
+ if (dsi->ldi.v_pulse_width > 15) {
+ DRM_DEBUG_DRIVER("vsw exceeded 15\n");
+ dsi->ldi.v_pulse_width = 15;
+ }
+ hsa_time = dsi->ldi.h_pulse_width * dsi->phy.lane_byte_clk / pixel_clk;
+ hbp_time = dsi->ldi.h_back_porch * dsi->phy.lane_byte_clk / pixel_clk;
+ hline_time = ROUND1((dsi->ldi.h_pulse_width + dsi->ldi.h_back_porch +
+ rect.w + dsi->ldi.h_front_porch) * dsi->phy.lane_byte_clk, pixel_clk);
+
+ DRM_INFO("hsa_time=%d, hbp_time=%d, hline_time=%d\n",
+ hsa_time, hbp_time, hline_time);
+ DRM_INFO("lane_byte_clk=%llu, pixel_clk=%llu\n",
+ dsi->phy.lane_byte_clk, pixel_clk);
+ set_reg(mipi_dsi_base + MIPIDSI_VID_HSA_TIME_OFFSET, hsa_time, 12, 0);
+ set_reg(mipi_dsi_base + MIPIDSI_VID_HBP_TIME_OFFSET, hbp_time, 12, 0);
+ set_reg(mipi_dsi_base + MIPIDSI_VID_HLINE_TIME_OFFSET, hline_time, 15, 0);
+
+ /* Define the Vertical line configuration*/
+ set_reg(mipi_dsi_base + MIPIDSI_VID_VSA_LINES_OFFSET, dsi->ldi.v_pulse_width, 10, 0);
+ set_reg(mipi_dsi_base + MIPIDSI_VID_VBP_LINES_OFFSET, dsi->ldi.v_back_porch, 10, 0);
+ set_reg(mipi_dsi_base + MIPIDSI_VID_VFP_LINES_OFFSET, dsi->ldi.v_front_porch, 10, 0);
+ set_reg(mipi_dsi_base + MIPIDSI_VID_VACTIVE_LINES_OFFSET, rect.h, 14, 0);
+ set_reg(mipi_dsi_base + MIPIDSI_TO_CNT_CFG_OFFSET, 0x7FF, 16, 0);
+
+ /* Configure core's phy parameters*/
+ set_reg(mipi_dsi_base + MIPIDSI_PHY_TMR_LPCLK_CFG_OFFSET, dsi->phy.clk_lane_lp2hs_time, 10, 0);
+ set_reg(mipi_dsi_base + MIPIDSI_PHY_TMR_LPCLK_CFG_OFFSET, dsi->phy.clk_lane_hs2lp_time, 10, 16);
+
+ set_reg(mipi_dsi_base + MIPIDSI_PHY_TMR_RD_CFG_OFFSET, 0x7FFF, 15, 0);
+ set_reg(mipi_dsi_base + MIPIDSI_PHY_TMR_CFG_OFFSET, dsi->phy.data_lane_lp2hs_time, 10, 0);
+ set_reg(mipi_dsi_base + MIPIDSI_PHY_TMR_CFG_OFFSET, dsi->phy.data_lane_hs2lp_time, 10, 16);
+
+ /* Waking up Core*/
+ set_reg(mipi_dsi_base + MIPIDSI_PWR_UP_OFFSET, 0x1, 1, 0);
+}
+
+static void dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct dw_dsi *dsi = encoder_to_dsi(encoder);
+ struct dsi_hw_ctx *ctx = dsi->ctx;
+ void __iomem *base = ctx->base;
+
+ if (!dsi->enable)
+ return;
+
+ dw_dsi_set_mode(dsi, DSI_COMMAND_MODE);
+ /* turn off panel's backlight */
+ if (dsi->panel && drm_panel_disable(dsi->panel))
+ DRM_ERROR("failed to disable panel\n");
+
+ /* turn off panel */
+ if (dsi->panel && drm_panel_unprepare(dsi->panel))
+ DRM_ERROR("failed to unprepare panel\n");
+
+ writel(0, base + PWR_UP);
+ writel(0, base + LPCLK_CTRL);
+ writel(0, base + PHY_RSTZ);
+ clk_disable_unprepare(ctx->dss_dphy0_ref_clk);
+ clk_disable_unprepare(ctx->dss_dphy0_cfg_clk);
+ clk_disable_unprepare(ctx->dss_pclk_dsi0_clk);
+
+ dsi->enable = false;
+}
+
+static int mipi_dsi_on_sub1(struct dw_dsi *dsi, char __iomem *mipi_dsi_base)
+{
+ WARN_ON(!mipi_dsi_base);
+
+ /* mipi init */
+ dsi_mipi_init(dsi, mipi_dsi_base);
+ DRM_INFO("dsi_mipi_init ok\n");
+ /* switch to cmd mode */
+ set_reg(mipi_dsi_base + MIPIDSI_MODE_CFG_OFFSET, 0x1, 1, 0);
+ /* cmd mode: low power mode */
+ set_reg(mipi_dsi_base + MIPIDSI_CMD_MODE_CFG_OFFSET, 0x7f, 7, 8);
+ set_reg(mipi_dsi_base + MIPIDSI_CMD_MODE_CFG_OFFSET, 0xf, 4, 16);
+ set_reg(mipi_dsi_base + MIPIDSI_CMD_MODE_CFG_OFFSET, 0x1, 1, 24);
+ /* disable generate High Speed clock */
+ /* delete? */
+ set_reg(mipi_dsi_base + MIPIDSI_LPCLK_CTRL_OFFSET, 0x0, 1, 0);
+
+ return 0;
+}
+
+static int mipi_dsi_on_sub2(struct dw_dsi *dsi, char __iomem *mipi_dsi_base)
+{
+ WARN_ON(!mipi_dsi_base);
+
+ /* switch to video mode */
+ set_reg(mipi_dsi_base + MIPIDSI_MODE_CFG_OFFSET, 0x0, 1, 0);
+
+ /* enable EOTP TX */
+ set_reg(mipi_dsi_base + MIPIDSI_PCKHDL_CFG_OFFSET, 0x1, 1, 0);
+
+ /* enable generate High Speed clock, continue clock */
+ set_reg(mipi_dsi_base + MIPIDSI_LPCLK_CTRL_OFFSET, 0x1, 2, 0);
+
+ return 0;
+}
+
+static void dsi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct dw_dsi *dsi = encoder_to_dsi(encoder);
+ struct dsi_hw_ctx *ctx = dsi->ctx;
+ int ret;
+
+ if (dsi->enable)
+ return;
+
+ ret = clk_prepare_enable(ctx->dss_dphy0_ref_clk);
+ if (ret) {
+ DRM_ERROR("fail to enable dss_dphy0_ref_clk: %d\n", ret);
+ return;
+ }
+
+ ret = clk_prepare_enable(ctx->dss_dphy0_cfg_clk);
+ if (ret) {
+ DRM_ERROR("fail to enable dss_dphy0_cfg_clk: %d\n", ret);
+ return;
+ }
+
+ ret = clk_prepare_enable(ctx->dss_pclk_dsi0_clk);
+ if (ret) {
+ DRM_ERROR("fail to enable dss_pclk_dsi0_clk: %d\n", ret);
+ return;
+ }
+
+ mipi_dsi_on_sub1(dsi, ctx->base);
+
+ mipi_dsi_on_sub2(dsi, ctx->base);
+
+ /* turn on panel */
+ if (dsi->panel && drm_panel_prepare(dsi->panel))
+ DRM_ERROR("failed to prepare panel\n");
+
+ /*dw_dsi_set_mode(dsi, DSI_VIDEO_MODE);*/
+
+ /* turn on panel's back light */
+ if (dsi->panel && drm_panel_enable(dsi->panel))
+ DRM_ERROR("failed to enable panel\n");
+
+ dsi->enable = true;
+}
+
+static void dsi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dw_dsi *dsi = encoder_to_dsi(encoder);
+
+ drm_mode_copy(&dsi->cur_mode, adj_mode);
+}
+
+static int dsi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ /* do nothing */
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
+ .atomic_check = dsi_encoder_atomic_check,
+ .mode_set = dsi_encoder_mode_set,
+ .enable = dsi_encoder_enable,
+ .disable = dsi_encoder_disable
+};
+
+static const struct drm_encoder_funcs dw_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int dw_drm_encoder_init(struct device *dev,
+ struct drm_device *drm_dev,
+ struct drm_encoder *encoder)
+{
+ int ret;
+ u32 crtc_mask = drm_of_find_possible_crtcs(drm_dev, dev->of_node);
+
+ if (!crtc_mask) {
+ DRM_ERROR("failed to find crtc mask\n");
+ return -EINVAL;
+ }
+
+ encoder->possible_crtcs = crtc_mask;
+ ret = drm_encoder_init(drm_dev, encoder, &dw_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ if (ret) {
+ DRM_ERROR("failed to init dsi encoder\n");
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &dw_encoder_helper_funcs);
+
+ return 0;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *mdsi)
+{
+ struct dw_dsi *dsi = host_to_dsi(host);
+ u32 id = mdsi->channel >= 1 ? OUT_PANEL : OUT_HDMI;
+
+ if (mdsi->lanes < 1 || mdsi->lanes > 4) {
+ DRM_ERROR("dsi device params invalid\n");
+ return -EINVAL;
+ }
+
+ dsi->client[id].lanes = mdsi->lanes;
+ dsi->client[id].format = mdsi->format;
+ dsi->client[id].mode_flags = mdsi->mode_flags;
+ dsi->client[id].phy_clock = 0;
+
+ DRM_INFO("host attach, client name=[%s], id=%d\n", mdsi->name, id);
+
+ return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *mdsi)
+{
+ /* do nothing */
+ return 0;
+}
+
+static int dsi_gen_pkt_hdr_write(void __iomem *base, u32 val)
+{
+ u32 status;
+ int ret;
+
+ ret = readx_poll_timeout(readl, base + CMD_PKT_STATUS, status,
+ !(status & GEN_CMD_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_ERROR("failed to get available command FIFO\n");
+ return ret;
+ }
+
+ writel(val, base + GEN_HDR);
+
+ ret = readx_poll_timeout(readl, base + CMD_PKT_STATUS, status,
+ status & (GEN_CMD_EMPTY | GEN_PLD_W_EMPTY),
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_ERROR("failed to write command FIFO\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dsi_dcs_short_write(void __iomem *base,
+ const struct mipi_dsi_msg *msg)
+{
+ const u16 *tx_buf = msg->tx_buf;
+ u32 val = GEN_HDATA(*tx_buf) | GEN_HTYPE(msg->type);
+
+ if (msg->tx_len > 2) {
+ DRM_ERROR("too long tx buf length %zu for short write\n",
+ msg->tx_len);
+ return -EINVAL;
+ }
+
+ return dsi_gen_pkt_hdr_write(base, val);
+}
+
+static int dsi_dcs_long_write(void __iomem *base,
+ const struct mipi_dsi_msg *msg)
+{
+ const u32 *tx_buf = msg->tx_buf;
+ int len = msg->tx_len, pld_data_bytes = sizeof(*tx_buf), ret;
+ u32 val = GEN_HDATA(msg->tx_len) | GEN_HTYPE(msg->type);
+ u32 remainder = 0;
+ u32 status;
+
+ if (msg->tx_len < 3) {
+ DRM_ERROR("wrong tx buf length %zu for long write\n",
+ msg->tx_len);
+ return -EINVAL;
+ }
+
+ while (DIV_ROUND_UP(len, pld_data_bytes)) {
+ if (len < pld_data_bytes) {
+ memcpy(&remainder, tx_buf, len);
+ writel(remainder, base + GEN_PLD_DATA);
+ len = 0;
+ } else {
+ writel(*tx_buf, base + GEN_PLD_DATA);
+ tx_buf++;
+ len -= pld_data_bytes;
+ }
+
+ ret = readx_poll_timeout(readl, base + CMD_PKT_STATUS,
+ status, !(status & GEN_PLD_W_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_ERROR("failed to get available write payload FIFO\n");
+ return ret;
+ }
+ }
+
+ return dsi_gen_pkt_hdr_write(base, val);
+}
+
+static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct dw_dsi *dsi = host_to_dsi(host);
+ struct dsi_hw_ctx *ctx = dsi->ctx;
+ void __iomem *base = ctx->base;
+ int ret;
+
+ switch (msg->type) {
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
+ ret = dsi_dcs_short_write(base, msg);
+ break;
+ case MIPI_DSI_DCS_LONG_WRITE:
+ ret = dsi_dcs_long_write(base, msg);
+ break;
+ default:
+ DRM_ERROR("unsupported message type\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops dsi_host_ops = {
+ .attach = dsi_host_attach,
+ .detach = dsi_host_detach,
+ .transfer = dsi_host_transfer,
+};
+
+static int dsi_host_init(struct device *dev, struct dw_dsi *dsi)
+{
+ struct mipi_dsi_host *host = &dsi->host;
+ struct mipi_panel_info *mipi = &dsi->mipi;
+ int ret;
+
+ host->dev = dev;
+ host->ops = &dsi_host_ops;
+
+ mipi->max_tx_esc_clk = 10 * 1000000UL;
+ mipi->vc = 0;
+ mipi->color_mode = DSI_24BITS_1;
+ mipi->clk_post_adjust = 120;
+ mipi->clk_pre_adjust= 0;
+ mipi->clk_t_hs_prepare_adjust= 0;
+ mipi->clk_t_lpx_adjust= 0;
+ mipi->clk_t_hs_trial_adjust= 0;
+ mipi->clk_t_hs_exit_adjust= 0;
+ mipi->clk_t_hs_zero_adjust= 0;
+
+ dsi->ldi.data_en_plr = 0;
+ dsi->ldi.vsync_plr = 0;
+ dsi->ldi.hsync_plr = 0;
+
+ ret = mipi_dsi_host_register(host);
+ if (ret) {
+ DRM_ERROR("failed to register dsi host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi)
+{
+ struct drm_encoder *encoder = &dsi->encoder;
+ struct drm_bridge *bridge = dsi->bridge;
+ int ret;
+
+ /* associate the bridge to dsi encoder */
+ bridge->encoder = encoder;
+
+ ret = drm_bridge_attach(dev, bridge);
+ if (ret) {
+ DRM_ERROR("failed to attach external bridge\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dsi_connector_get_modes(struct drm_connector *connector)
+{
+ struct dw_dsi *dsi = connector_to_dsi(connector);
+
+ return drm_panel_get_modes(dsi->panel);
+}
+
+static enum drm_mode_status
+dsi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ enum drm_mode_status mode_status = MODE_OK;
+
+ return mode_status;
+}
+
+static struct drm_encoder *
+dsi_connector_best_encoder(struct drm_connector *connector)
+{
+ struct dw_dsi *dsi = connector_to_dsi(connector);
+
+ return &dsi->encoder;
+}
+
+static struct drm_connector_helper_funcs dsi_connector_helper_funcs = {
+ .get_modes = dsi_connector_get_modes,
+ .mode_valid = dsi_connector_mode_valid,
+ .best_encoder = dsi_connector_best_encoder,
+};
+
+static enum drm_connector_status
+dsi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct dw_dsi *dsi = connector_to_dsi(connector);
+ enum drm_connector_status status;
+
+ status = dsi->cur_client == OUT_PANEL ? connector_status_connected :
+ connector_status_disconnected;
+
+ return status;
+}
+
+static void dsi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static struct drm_connector_funcs dsi_atomic_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = dsi_connector_detect,
+ .destroy = dsi_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int dsi_connector_init(struct drm_device *dev, struct dw_dsi *dsi)
+{
+ struct drm_encoder *encoder = &dsi->encoder;
+ struct drm_connector *connector = &dsi->connector;
+ int ret;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ drm_connector_helper_add(connector,
+ &dsi_connector_helper_funcs);
+
+ ret = drm_connector_init(dev, &dsi->connector,
+ &dsi_atomic_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (ret)
+ return ret;
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
+
+ ret = drm_panel_attach(dsi->panel, connector);
+ if (ret)
+ return ret;
+
+ DRM_INFO("connector init\n");
+ return 0;
+}
+static int dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct dsi_data *ddata = dev_get_drvdata(dev);
+ struct dw_dsi *dsi = &ddata->dsi;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ ret = dw_drm_encoder_init(dev, drm_dev, &dsi->encoder);
+ if (ret)
+ return ret;
+
+ if (dsi->bridge) {
+ ret = dsi_bridge_init(drm_dev, dsi);
+ if (ret)
+ return ret;
+ }
+
+ if (dsi->panel) {
+ ret = dsi_connector_init(drm_dev, dsi);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dsi_unbind(struct device *dev, struct device *master, void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops dsi_ops = {
+ .bind = dsi_bind,
+ .unbind = dsi_unbind,
+};
+
+static int dsi_parse_bridge_endpoint(struct dw_dsi *dsi,
+ struct device_node *endpoint)
+{
+ struct device_node *bridge_node;
+ struct drm_bridge *bridge;
+
+ bridge_node = of_graph_get_remote_port_parent(endpoint);
+ if (!bridge_node) {
+ DRM_ERROR("no valid bridge node\n");
+ return -ENODEV;
+ }
+ of_node_put(bridge_node);
+
+ bridge = of_drm_find_bridge(bridge_node);
+ if (!bridge) {
+ DRM_INFO("wait for external HDMI bridge driver.\n");
+ return -EPROBE_DEFER;
+ }
+ dsi->bridge = bridge;
+
+ return 0;
+}
+
+static int dsi_parse_panel_endpoint(struct dw_dsi *dsi,
+ struct device_node *endpoint)
+{
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+
+ panel_node = of_graph_get_remote_port_parent(endpoint);
+ if (!panel_node) {
+ DRM_ERROR("no valid panel node\n");
+ return -ENODEV;
+ }
+ of_node_put(panel_node);
+
+ panel = of_drm_find_panel(panel_node);
+ if (!panel) {
+ DRM_DEBUG_DRIVER("skip this panel endpoint.\n");
+ return 0;
+ }
+ dsi->panel = panel;
+
+ return 0;
+}
+
+static int dsi_parse_endpoint(struct dw_dsi *dsi,
+ struct device_node *np,
+ enum dsi_output_client client)
+{
+ struct device_node *ep_node;
+ struct of_endpoint ep;
+ int ret = 0;
+
+ if (client == OUT_MAX)
+ return -EINVAL;
+
+ for_each_endpoint_of_node(np, ep_node) {
+ ret = of_graph_parse_endpoint(ep_node, &ep);
+ if (ret) {
+ of_node_put(ep_node);
+ return ret;
+ }
+
+ /* skip dsi input port, port == 0 is input port */
+ if (ep.port == 0)
+ continue;
+
+ /* parse bridge endpoint */
+ if (client == OUT_HDMI) {
+ if (ep.id == 0) {
+ ret = dsi_parse_bridge_endpoint(dsi, ep_node);
+ if (dsi->bridge)
+ break;
+ }
+ } else { /* parse panel endpoint */
+ if (ep.id > 0) {
+ ret = dsi_parse_panel_endpoint(dsi, ep_node);
+ if (dsi->panel)
+ break;
+ }
+ }
+
+ if (ret) {
+ of_node_put(ep_node);
+ return ret;
+ }
+ }
+
+ if (!dsi->bridge && !dsi->panel) {
+ DRM_ERROR("at least one bridge or panel node is required\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
+{
+ struct dsi_hw_ctx *ctx = dsi->ctx;
+ int ret = 0;
+ struct device_node *np = NULL;
+
+ np = of_find_compatible_node(NULL, NULL, DTS_COMP_DSI_NAME);
+ if (!np) {
+ DRM_ERROR("NOT FOUND device node %s!\n",
+ DTS_COMP_DSI_NAME);
+ return -ENXIO;
+ }
+
+ ctx->base = of_iomap(np, 0);
+ if (!(ctx->base)) {
+ DRM_ERROR ("failed to get base resource.\n");
+ return -ENXIO;
+ }
+
+ ctx->peri_crg_base = of_iomap(np, 1);
+ if (!(ctx->peri_crg_base)) {
+ DRM_ERROR ("failed to get peri_crg_base resource.\n");
+ return -ENXIO;
+ }
+
+ dsi->gpio_mux = devm_gpiod_get(&pdev->dev, "mux", GPIOD_OUT_HIGH);
+ if (IS_ERR(dsi->gpio_mux))
+ return PTR_ERR(dsi->gpio_mux);
+ /* set dsi default output to panel */
+ dsi->cur_client = OUT_PANEL;
+
+ /*dis-reset*/
+ /*ip_reset_dis_dsi0, ip_reset_dis_dsi1*/
+ outp32(ctx->peri_crg_base + PERRSTDIS3, 0x30000000);
+
+ ctx->dss_dphy0_ref_clk = devm_clk_get(&pdev->dev, "clk_txdphy0_ref");
+ if (IS_ERR(ctx->dss_dphy0_ref_clk)) {
+ DRM_ERROR("failed to get dss_dphy0_ref_clk clock\n");
+ return PTR_ERR(ctx->dss_dphy0_ref_clk);
+ }
+
+ ret = clk_set_rate(ctx->dss_dphy0_ref_clk, DEFAULT_MIPI_CLK_RATE);
+ if (ret < 0) {
+ DRM_ERROR("dss_dphy0_ref_clk clk_set_rate(%lu) failed, error=%d!\n",
+ DEFAULT_MIPI_CLK_RATE, ret);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("dss_dphy0_ref_clk:[%lu]->[%lu].\n",
+ DEFAULT_MIPI_CLK_RATE, clk_get_rate(ctx->dss_dphy0_ref_clk));
+
+ ctx->dss_dphy0_cfg_clk = devm_clk_get(&pdev->dev, "clk_txdphy0_cfg");
+ if (IS_ERR(ctx->dss_dphy0_cfg_clk)) {
+ DRM_ERROR("failed to get dss_dphy0_cfg_clk clock\n");
+ return PTR_ERR(ctx->dss_dphy0_cfg_clk);
+ }
+
+ ret = clk_set_rate(ctx->dss_dphy0_cfg_clk, DEFAULT_MIPI_CLK_RATE);
+ if (ret < 0) {
+ DRM_ERROR("dss_dphy0_cfg_clk clk_set_rate(%lu) failed, error=%d!\n",
+ DEFAULT_MIPI_CLK_RATE, ret);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("dss_dphy0_cfg_clk:[%lu]->[%lu].\n",
+ DEFAULT_MIPI_CLK_RATE, clk_get_rate(ctx->dss_dphy0_cfg_clk));
+
+ ctx->dss_pclk_dsi0_clk = devm_clk_get(&pdev->dev, "pclk_dsi0");
+ if (IS_ERR(ctx->dss_pclk_dsi0_clk)) {
+ DRM_ERROR("failed to get dss_pclk_dsi0_clk clock\n");
+ return PTR_ERR(ctx->dss_pclk_dsi0_clk);
+ }
+
+ return 0;
+}
+
+static int dsi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct dsi_data *data;
+ struct dw_dsi *dsi;
+ struct dsi_hw_ctx *ctx;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ DRM_ERROR("failed to allocate dsi data.\n");
+ return -ENOMEM;
+ }
+ dsi = &data->dsi;
+ ctx = &data->ctx;
+ dsi->ctx = ctx;
+
+ /* parse HDMI bridge endpoint */
+ ret = dsi_parse_endpoint(dsi, np, OUT_HDMI);
+ if (ret)
+ return ret;
+
+ ret = dsi_host_init(dev, dsi);
+ if (ret)
+ return ret;
+
+ /* parse panel endpoint */
+ ret = dsi_parse_endpoint(dsi, np, OUT_PANEL);
+ if (ret)
+ goto err_host_unregister;
+
+ ret = dsi_parse_dt(pdev, dsi);
+ if (ret)
+ goto err_host_unregister;
+
+ platform_set_drvdata(pdev, data);
+
+ ret = component_add(dev, &dsi_ops);
+ if (ret)
+ goto err_host_unregister;
+
+ return 0;
+
+err_host_unregister:
+ mipi_dsi_host_unregister(&dsi->host);
+ return ret;
+}
+
+static int dsi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dsi_ops);
+
+ return 0;
+}
+
+static const struct of_device_id dsi_of_match[] = {
+ {.compatible = "hisilicon,hi3660-dsi"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, dsi_of_match);
+
+static struct platform_driver dsi_driver = {
+ .probe = dsi_probe,
+ .remove = dsi_remove,
+ .driver = {
+ .name = "dw-dsi",
+ .of_match_table = dsi_of_match,
+ },
+};
+
+module_platform_driver(dsi_driver);
+
+MODULE_DESCRIPTION("DesignWare MIPI DSI Host Controller v1.02 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/kirin960/dw_dsi_reg.h b/drivers/gpu/drm/hisilicon/kirin960/dw_dsi_reg.h
new file mode 100644
index 000000000000..00fac1f35265
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/dw_dsi_reg.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DW_DSI_REG_H__
+#define __DW_DSI_REG_H__
+
+#define MASK(x) (BIT(x) - 1)
+#define DEFAULT_MAX_TX_ESC_CLK (10 * 1000000UL)
+/*
+ * regs
+ */
+#define PWR_UP 0x04 /* Core power-up */
+#define RESET 0
+#define POWERUP BIT(0)
+#define PHY_IF_CFG 0xA4 /* D-PHY interface configuration */
+#define CLKMGR_CFG 0x08 /* the internal clock dividers */
+#define PHY_RSTZ 0xA0 /* D-PHY reset control */
+#define PHY_ENABLECLK BIT(2)
+#define PHY_UNRSTZ BIT(1)
+#define PHY_UNSHUTDOWNZ BIT(0)
+#define PHY_TST_CTRL0 0xB4 /* D-PHY test interface control 0 */
+#define PHY_TST_CTRL1 0xB8 /* D-PHY test interface control 1 */
+#define CLK_TLPX 0x10
+#define CLK_THS_PREPARE 0x11
+#define CLK_THS_ZERO 0x12
+#define CLK_THS_TRAIL 0x13
+#define CLK_TWAKEUP 0x14
+#define DATA_TLPX(x) (0x20 + ((x) << 4))
+#define DATA_THS_PREPARE(x) (0x21 + ((x) << 4))
+#define DATA_THS_ZERO(x) (0x22 + ((x) << 4))
+#define DATA_THS_TRAIL(x) (0x23 + ((x) << 4))
+#define DATA_TTA_GO(x) (0x24 + ((x) << 4))
+#define DATA_TTA_GET(x) (0x25 + ((x) << 4))
+#define DATA_TWAKEUP(x) (0x26 + ((x) << 4))
+#define PHY_CFG_I 0x60
+#define PHY_CFG_PLL_I 0x63
+#define PHY_CFG_PLL_II 0x64
+#define PHY_CFG_PLL_III 0x65
+#define PHY_CFG_PLL_IV 0x66
+#define PHY_CFG_PLL_V 0x67
+#define DPI_COLOR_CODING 0x10 /* DPI color coding */
+#define DPI_CFG_POL 0x14 /* DPI polarity configuration */
+#define VID_HSA_TIME 0x48 /* Horizontal Sync Active time */
+#define VID_HBP_TIME 0x4C /* Horizontal Back Porch time */
+#define VID_HLINE_TIME 0x50 /* Line time */
+#define VID_VSA_LINES 0x54 /* Vertical Sync Active period */
+#define VID_VBP_LINES 0x58 /* Vertical Back Porch period */
+#define VID_VFP_LINES 0x5C /* Vertical Front Porch period */
+#define VID_VACTIVE_LINES 0x60 /* Vertical resolution */
+#define VID_PKT_SIZE 0x3C /* Video packet size */
+#define VID_MODE_CFG 0x38 /* Video mode configuration */
+#define GEN_HDR 0x6c
+#define GEN_HDATA(data) (((data) & 0xffff) << 8)
+#define GEN_HDATA_MASK (0xffff << 8)
+#define GEN_HTYPE(type) (((type) & 0xff) << 0)
+#define GEN_HTYPE_MASK 0xff
+#define GEN_PLD_DATA 0x70
+#define CMD_PKT_STATUS 0x74
+#define GEN_CMD_EMPTY BIT(0)
+#define GEN_CMD_FULL BIT(1)
+#define GEN_PLD_W_EMPTY BIT(2)
+#define GEN_PLD_W_FULL BIT(3)
+#define GEN_PLD_R_EMPTY BIT(4)
+#define GEN_PLD_R_FULL BIT(5)
+#define GEN_RD_CMD_BUSY BIT(6)
+#define CMD_MODE_CFG 0x68
+#define MAX_RD_PKT_SIZE_LP BIT(24)
+#define DCS_LW_TX_LP BIT(19)
+#define DCS_SR_0P_TX_LP BIT(18)
+#define DCS_SW_1P_TX_LP BIT(17)
+#define DCS_SW_0P_TX_LP BIT(16)
+#define GEN_LW_TX_LP BIT(14)
+#define GEN_SR_2P_TX_LP BIT(13)
+#define GEN_SR_1P_TX_LP BIT(12)
+#define GEN_SR_0P_TX_LP BIT(11)
+#define GEN_SW_2P_TX_LP BIT(10)
+#define GEN_SW_1P_TX_LP BIT(9)
+#define GEN_SW_0P_TX_LP BIT(8)
+#define EN_ACK_RQST BIT(1)
+#define EN_TEAR_FX BIT(0)
+#define CMD_MODE_ALL_LP (MAX_RD_PKT_SIZE_LP | \
+ DCS_LW_TX_LP | \
+ DCS_SR_0P_TX_LP | \
+ DCS_SW_1P_TX_LP | \
+ DCS_SW_0P_TX_LP | \
+ GEN_LW_TX_LP | \
+ GEN_SR_2P_TX_LP | \
+ GEN_SR_1P_TX_LP | \
+ GEN_SR_0P_TX_LP | \
+ GEN_SW_2P_TX_LP | \
+ GEN_SW_1P_TX_LP | \
+ GEN_SW_0P_TX_LP)
+#define PHY_TMR_CFG 0x9C /* Data lanes timing configuration */
+#define BTA_TO_CNT 0x8C /* Response timeout definition */
+#define PHY_TMR_LPCLK_CFG 0x98 /* clock lane timing configuration */
+#define CLK_DATA_TMR_CFG 0xCC
+#define LPCLK_CTRL 0x94 /* Low-power in clock lane */
+#define PHY_TXREQUESTCLKHS BIT(0)
+#define MODE_CFG 0x34 /* Video or Command mode selection */
+#define PHY_STATUS 0xB0 /* D-PHY PPI status interface */
+
+#define PHY_STOP_WAIT_TIME 0x30
+#define CMD_PKT_STATUS_TIMEOUT_US 20000
+
+/*
+ * regs relevant enum
+ */
+enum dpi_color_coding {
+ DSI_24BITS_1 = 5,
+};
+
+enum dsi_video_mode_type {
+ DSI_NON_BURST_SYNC_PULSES = 0,
+ DSI_NON_BURST_SYNC_EVENTS,
+ DSI_BURST_SYNC_PULSES_1,
+ DSI_BURST_SYNC_PULSES_2
+};
+
+enum dsi_work_mode {
+ DSI_VIDEO_MODE = 0,
+ DSI_COMMAND_MODE
+};
+
+/*
+ * Register Write/Read Helper functions
+ */
+static inline void dw_update_bits(void __iomem *addr, u32 bit_start,
+ u32 mask, u32 val)
+{
+ u32 tmp, orig;
+
+ orig = readl(addr);
+ tmp = orig & ~(mask << bit_start);
+ tmp |= (val & mask) << bit_start;
+ writel(tmp, addr);
+}
+
+#endif /* __DW_DRM_DSI_H__ */
diff --git a/drivers/gpu/drm/hisilicon/kirin960/kirin_dpe_reg.h b/drivers/gpu/drm/hisilicon/kirin960/kirin_dpe_reg.h
new file mode 100644
index 000000000000..adaa71f6dcd5
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/kirin_dpe_reg.h
@@ -0,0 +1,3115 @@
+/*
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __KIRIN_DPE_REG_H__
+#define __KIRIN_DPE_REG_H__
+
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/bug.h>
+#include <linux/iommu.h>
+
+#include <linux/ion.h>
+#include <linux/hisi/hisi_ion.h>
+
+/*******************************************************************************
+**
+*/
+enum dss_chn_idx {
+ DSS_RCHN_NONE = -1,
+ DSS_RCHN_D2 = 0,
+ DSS_RCHN_D3,
+ DSS_RCHN_V0,
+ DSS_RCHN_G0,
+ DSS_RCHN_V1,
+ DSS_RCHN_G1,
+ DSS_RCHN_D0,
+ DSS_RCHN_D1,
+
+ DSS_WCHN_W0,
+ DSS_WCHN_W1,
+
+ DSS_CHN_MAX,
+
+ DSS_RCHN_V2 = DSS_CHN_MAX, /*for copybit, only supported in chicago*/
+ DSS_WCHN_W2,
+
+ DSS_COPYBIT_MAX,
+};
+
+enum dss_channel {
+ DSS_CH1 = 0, /* channel 1 for primary plane */
+ DSS_CH_NUM
+};
+
+#define PRIMARY_CH DSS_CH1 /* primary plane */
+
+typedef struct dss_rect {
+ s32 x;
+ s32 y;
+ s32 w;
+ s32 h;
+} dss_rect_t;
+
+typedef struct dss_rect_ltrb {
+ s32 left;
+ s32 top;
+ s32 right;
+ s32 bottom;
+} dss_rect_ltrb_t;
+
+enum {
+ DSI_1_LANES = 0,
+ DSI_2_LANES,
+ DSI_3_LANES,
+ DSI_4_LANES,
+};
+
+enum dss_ovl_idx {
+ DSS_OVL0 = 0,
+ DSS_OVL1,
+ DSS_OVL2,
+ DSS_OVL3,
+ DSS_OVL_IDX_MAX,
+};
+
+#define DSS_WCH_MAX (2)
+
+typedef struct dss_img {
+ uint32_t format;
+ uint32_t width;
+ uint32_t height;
+ uint32_t bpp; /* bytes per pixel */
+ uint32_t buf_size;
+ uint32_t stride;
+ uint32_t stride_plane1;
+ uint32_t stride_plane2;
+ uint64_t phy_addr;
+ uint64_t vir_addr;
+ uint32_t offset_plane1;
+ uint32_t offset_plane2;
+
+ uint64_t afbc_header_addr;
+ uint64_t afbc_payload_addr;
+ uint32_t afbc_header_stride;
+ uint32_t afbc_payload_stride;
+ uint32_t afbc_scramble_mode;
+ uint32_t mmbuf_base;
+ uint32_t mmbuf_size;
+
+ uint32_t mmu_enable;
+ uint32_t csc_mode;
+ uint32_t secure_mode;
+ int32_t shared_fd;
+ uint32_t reserved0;
+} dss_img_t;
+
+typedef struct drm_dss_layer {
+ dss_img_t img;
+ dss_rect_t src_rect;
+ dss_rect_t src_rect_mask;
+ dss_rect_t dst_rect;
+ uint32_t transform;
+ int32_t blending;
+ uint32_t glb_alpha;
+ uint32_t color; /* background color or dim color */
+ int32_t layer_idx;
+ int32_t chn_idx;
+ uint32_t need_cap;
+ int32_t acquire_fence;
+} drm_dss_layer_t;
+
+
+/*******************************************************************************
+**
+*/
+#define DEFAULT_MIPI_CLK_RATE (192 * 100000L)
+#define DEFAULT_PCLK_DSI_RATE (120 * 1000000L)
+
+#define DEFAULT_DSS_CORE_CLK_08V_RATE (535000000UL)
+#define DEFAULT_DSS_CORE_CLK_07V_RATE (400000000UL)
+#define DEFAULT_PCLK_DSS_RATE (114000000UL)
+#define DEFAULT_PCLK_PCTRL_RATE (80000000UL)
+#define DSS_MAX_PXL0_CLK_288M (288000000UL)
+#define DSS_MAX_PXL0_CLK_144M (144000000UL)
+
+#define DSS_ADDR 0xE8600000
+#define DSS_DSI_ADDR (DSS_ADDR + 0x01000)
+#define DSS_LDI_ADDR (DSS_ADDR + 0x7d000)
+#define PMC_BASE (0xFFF31000)
+#define PERI_CRG_BASE (0xFFF35000)
+#define SCTRL_BASE (0xFFF0A000)
+
+#define GPIO_LCD_POWER_1V2 (54)
+#define GPIO_LCD_STANDBY (67)
+#define GPIO_LCD_RESETN (65)
+#define GPIO_LCD_GATING (60)
+#define GPIO_LCD_PCLK_GATING (58)
+#define GPIO_LCD_REFCLK_GATING (59)
+#define GPIO_LCD_SPICS (168)
+#define GPIO_LCD_DRV_EN (73)
+
+#define GPIO_PG_SEL_A (72)
+#define GPIO_TX_RX_A (74)
+#define GPIO_PG_SEL_B (76)
+#define GPIO_TX_RX_B (78)
+
+/*******************************************************************************
+ **
+ */
+#define CRGPERI_PLL0_CLK_RATE (1600000000UL)
+#define CRGPERI_PLL2_CLK_RATE (960000000UL)
+#define CRGPERI_PLL3_CLK_RATE (1600000000UL)
+
+#define DEFAULT_DSS_CORE_CLK_08V_RATE (535000000UL)
+#define DEFAULT_DSS_CORE_CLK_07V_RATE (400000000UL)
+#define DEFAULT_PCLK_DSS_RATE (114000000UL)
+#define DEFAULT_PCLK_PCTRL_RATE (80000000UL)
+#define DSS_MAX_PXL0_CLK_288M (288000000UL)
+
+#define MMBUF_SIZE_MAX (288 * 1024)
+#define HISI_DSS_CMDLIST_MAX (16)
+#define HISI_DSS_CMDLIST_IDXS_MAX (0xFFFF)
+#define HISI_DSS_COPYBIT_CMDLIST_IDXS (0xC000)
+#define HISI_DSS_DPP_MAX_SUPPORT_BIT (0x7ff)
+#define HISIFB_DSS_PLATFORM_TYPE (FB_ACCEL_HI366x | FB_ACCEL_PLATFORM_TYPE_ASIC)
+
+#define DSS_MIF_SMMU_SMRX_IDX_STEP (16)
+#define CRG_PERI_DIS3_DEFAULT_VAL (0x0002F000)
+#define SCF_LINE_BUF (2560)
+#define DSS_GLB_MODULE_CLK_SEL_DEFAULT_VAL (0xF0000008)
+#define DSS_LDI_CLK_SEL_DEFAULT_VAL (0x00000004)
+#define DSS_DBUF_MEM_CTRL_DEFAULT_VAL (0x00000008)
+#define DSS_SMMU_RLD_EN0_DEFAULT_VAL (0xffffffff)
+#define DSS_SMMU_RLD_EN1_DEFAULT_VAL (0xffffff8f)
+#define DSS_SMMU_OUTSTANDING_VAL (0xf)
+#define DSS_MIF_CTRL2_INVAL_SEL3_STRIDE_MASK (0xc)
+#define DSS_AFBCE_ENC_OS_CFG_DEFAULT_VAL (0x7)
+#define TUI_SEC_RCH (DSS_RCHN_V0)
+#define DSS_CHN_MAX_DEFINE (DSS_COPYBIT_MAX)
+
+/* perf stat */
+#define DSS_DEVMEM_PERF_BASE (0xFDF10000)
+#define CRG_PERIPH_APB_PERRSTSTAT0_REG (0x68)
+#define CRG_PERIPH_APB_IP_RST_PERF_STAT_BIT (18)
+#define PERF_SAMPSTOP_REG (0x10)
+#define DEVMEM_PERF_SIZE (0x100)
+
+/*
+ * DSS Registers
+*/
+
+/* MACROS */
+#define DSS_WIDTH(width) ((width) - 1)
+#define DSS_HEIGHT(height) ((height) - 1)
+
+#define RES_540P (960 * 540)
+#define RES_720P (1280 * 720)
+#define RES_1080P (1920 * 1080)
+#define RES_1200P (1920 * 1200)
+#define RES_1440P (2560 * 1440)
+#define RES_1600P (2560 * 1600)
+#define RES_4K_PHONE (3840 * 2160)
+#define RES_4K_PAD (3840 * 2400)
+
+#define DFC_MAX_CLIP_NUM (31)
+
+/* for DFS */
+/* 1480 * 144bits */
+#define DFS_TIME (80)
+#define DFS_TIME_MIN (50)
+#define DFS_TIME_MIN_4K (10)
+#define DBUF0_DEPTH (1408)
+#define DBUF1_DEPTH (512)
+#define DBUF_WIDTH_BIT (144)
+
+#define GET_THD_RQOS_IN(max_depth) ((max_depth) * 10 / 100)
+#define GET_THD_RQOS_OUT(max_depth) ((max_depth) * 30 / 100)
+#define GET_THD_WQOS_IN(max_depth) ((max_depth) * 95 / 100)
+#define GET_THD_WQOS_OUT(max_depth) ((max_depth) * 70 / 100)
+#define GET_THD_CG_IN(max_depth) ((max_depth) - 1)
+#define GET_THD_CG_OUT(max_depth) ((max_depth) * 70 / 100)
+#define GET_FLUX_REQ_IN(max_depth) ((max_depth) * 50 / 100)
+#define GET_FLUX_REQ_OUT(max_depth) ((max_depth) * 90 / 100)
+#define GET_THD_OTHER_DFS_CG_HOLD(max_depth) (0x20)
+#define GET_THD_OTHER_WR_WAIT(max_depth) ((max_depth) * 90 / 100)
+
+#define GET_RDMA_ROT_HQOS_ASSERT_LEV(max_depth) ((max_depth) * 30 / 100)
+#define GET_RDMA_ROT_HQOS_REMOVE_LEV(max_depth) ((max_depth) * 60 / 100)
+
+enum lcd_orientation {
+ LCD_LANDSCAPE = 0,
+ LCD_PORTRAIT,
+};
+
+enum lcd_format {
+ LCD_RGB888 = 0,
+ LCD_RGB101010,
+ LCD_RGB565,
+};
+
+enum lcd_rgb_order {
+ LCD_RGB = 0,
+ LCD_BGR,
+};
+
+enum dss_addr {
+ DSS_ADDR_PLANE0 = 0,
+ DSS_ADDR_PLANE1,
+ DSS_ADDR_PLANE2,
+};
+
+enum dss_transform {
+ DSS_TRANSFORM_NOP = 0x0,
+ DSS_TRANSFORM_FLIP_H = 0x01,
+ DSS_TRANSFORM_FLIP_V = 0x02,
+ DSS_TRANSFORM_ROT = 0x04,
+};
+
+enum dss_dfc_format {
+ DFC_PIXEL_FORMAT_RGB_565 = 0,
+ DFC_PIXEL_FORMAT_XRGB_4444,
+ DFC_PIXEL_FORMAT_ARGB_4444,
+ DFC_PIXEL_FORMAT_XRGB_5551,
+ DFC_PIXEL_FORMAT_ARGB_5551,
+ DFC_PIXEL_FORMAT_XRGB_8888,
+ DFC_PIXEL_FORMAT_ARGB_8888,
+ DFC_PIXEL_FORMAT_BGR_565,
+ DFC_PIXEL_FORMAT_XBGR_4444,
+ DFC_PIXEL_FORMAT_ABGR_4444,
+ DFC_PIXEL_FORMAT_XBGR_5551,
+ DFC_PIXEL_FORMAT_ABGR_5551,
+ DFC_PIXEL_FORMAT_XBGR_8888,
+ DFC_PIXEL_FORMAT_ABGR_8888,
+
+ DFC_PIXEL_FORMAT_YUV444,
+ DFC_PIXEL_FORMAT_YVU444,
+ DFC_PIXEL_FORMAT_YUYV422,
+ DFC_PIXEL_FORMAT_YVYU422,
+ DFC_PIXEL_FORMAT_VYUY422,
+ DFC_PIXEL_FORMAT_UYVY422,
+};
+
+enum dss_dma_format {
+ DMA_PIXEL_FORMAT_RGB_565 = 0,
+ DMA_PIXEL_FORMAT_ARGB_4444,
+ DMA_PIXEL_FORMAT_XRGB_4444,
+ DMA_PIXEL_FORMAT_ARGB_5551,
+ DMA_PIXEL_FORMAT_XRGB_5551,
+ DMA_PIXEL_FORMAT_ARGB_8888,
+ DMA_PIXEL_FORMAT_XRGB_8888,
+
+ DMA_PIXEL_FORMAT_RESERVED0,
+
+ DMA_PIXEL_FORMAT_YUYV_422_Pkg,
+ DMA_PIXEL_FORMAT_YUV_420_SP_HP,
+ DMA_PIXEL_FORMAT_YUV_420_P_HP,
+ DMA_PIXEL_FORMAT_YUV_422_SP_HP,
+ DMA_PIXEL_FORMAT_YUV_422_P_HP,
+ DMA_PIXEL_FORMAT_AYUV_4444,
+};
+
+enum dss_buf_format {
+ DSS_BUF_LINEAR = 0,
+ DSS_BUF_TILE,
+};
+
+enum dss_blend_mode {
+ DSS_BLEND_CLEAR = 0,
+ DSS_BLEND_SRC,
+ DSS_BLEND_DST,
+ DSS_BLEND_SRC_OVER_DST,
+ DSS_BLEND_DST_OVER_SRC,
+ DSS_BLEND_SRC_IN_DST,
+ DSS_BLEND_DST_IN_SRC,
+ DSS_BLEND_SRC_OUT_DST,
+ DSS_BLEND_DST_OUT_SRC,
+ DSS_BLEND_SRC_ATOP_DST,
+ DSS_BLEND_DST_ATOP_SRC,
+ DSS_BLEND_SRC_XOR_DST,
+ DSS_BLEND_SRC_ADD_DST,
+ DSS_BLEND_FIX_OVER,
+ DSS_BLEND_FIX_PER0,
+ DSS_BLEND_FIX_PER1,
+ DSS_BLEND_FIX_PER2,
+ DSS_BLEND_FIX_PER3,
+ DSS_BLEND_FIX_PER4,
+ DSS_BLEND_FIX_PER5,
+ DSS_BLEND_FIX_PER6,
+ DSS_BLEND_FIX_PER7,
+ DSS_BLEND_FIX_PER8,
+ DSS_BLEND_FIX_PER9,
+ DSS_BLEND_FIX_PER10,
+ DSS_BLEND_FIX_PER11,
+ DSS_BLEND_FIX_PER12,
+ DSS_BLEND_FIX_PER13,
+ DSS_BLEND_FIX_PER14,
+ DSS_BLEND_FIX_PER15,
+ DSS_BLEND_FIX_PER16,
+ DSS_BLEND_FIX_PER17,
+
+ DSS_BLEND_MAX,
+};
+
+enum dss_chn_module {
+ MODULE_MIF_CHN,
+ MODULE_AIF0_CHN,
+ MODULE_AIF1_CHN,
+ MODULE_MCTL_CHN_MUTEX,
+ MODULE_MCTL_CHN_FLUSH_EN,
+ MODULE_MCTL_CHN_OV_OEN,
+ MODULE_MCTL_CHN_STARTY,
+ MODULE_MCTL_CHN_MOD_DBG,
+ MODULE_DMA,
+ MODULE_DFC,
+ MODULE_SCL,
+ MODULE_SCL_LUT,
+ MODULE_ARSR2P,
+ MODULE_ARSR2P_LUT,
+ MODULE_POST_CLIP,
+ MODULE_PCSC,
+ MODULE_CSC,
+ MODULE_CHN_MAX,
+};
+
+enum dss_chn_cap {
+ MODULE_CAP_ROT,
+ MODULE_CAP_SCL,
+ MODULE_CAP_CSC,
+ MODULE_CAP_SHARPNESS_1D,
+ MODULE_CAP_SHARPNESS_2D,
+ MODULE_CAP_CE,
+ MODULE_CAP_AFBCD,
+ MODULE_CAP_AFBCE,
+ MODULE_CAP_YUV_PLANAR,
+ MODULE_CAP_YUV_SEMI_PLANAR,
+ MODULE_CAP_YUV_PACKAGE,
+ MODULE_CAP_MAX,
+};
+
+enum dss_ovl_module {
+ MODULE_OVL_BASE,
+ MODULE_MCTL_BASE,
+ MODULE_OVL_MAX,
+};
+
+enum dss_axi_idx {
+ AXI_CHN0 = 0,
+ AXI_CHN1,
+ AXI_CHN_MAX,
+};
+
+#define AXI0_MAX_DSS_CHN_THRESHOLD (3)
+#define AXI1_MAX_DSS_CHN_THRESHOLD (3)
+
+#define DEFAULT_AXI_CLK_RATE0 (120 * 1000000)
+#define DEFAULT_AXI_CLK_RATE1 (240 * 1000000)
+#define DEFAULT_AXI_CLK_RATE2 (360 * 1000000)
+#define DEFAULT_AXI_CLK_RATE3 (480 * 1000000)
+#define DEFAULT_AXI_CLK_RATE4 (667 * 1000000)
+#define DEFAULT_AXI_CLK_RATE5 (800 * 1000000)
+
+enum dss_rdma_idx {
+ DSS_RDMA0 = 0,
+ DSS_RDMA1,
+ DSS_RDMA2,
+ DSS_RDMA3,
+ DSS_RDMA4,
+ DSS_RDMA_MAX,
+};
+
+/*******************************************************************************
+ **
+ */
+
+#define PEREN0 (0x000)
+#define PERDIS0 (0x004)
+#define PEREN2 (0x020)
+#define PERDIS2 (0x024)
+#define PERCLKEN2 (0x028)
+#define PERSTAT2 (0x02C)
+#define PEREN3 (0x030)
+#define PERDIS3 (0x034)
+#define PERCLKEN3 (0x038)
+#define PERSTAT3 (0x03C)
+#define PEREN5 (0x050)
+#define PERDIS5 (0x054)
+#define PERCLKEN5 (0x058)
+#define PERSTAT5 (0x05C)
+#define PERRSTDIS0 (0x064)
+#define PERRSTEN2 (0x078)
+#define PERRSTDIS2 (0x07C)
+#define PERRSTEN3 (0x084)
+#define PERRSTDIS3 (0x088)
+#define PERRSTSTAT3 (0x08c)
+#define PERRSTEN4 (0x090)
+#define PERRSTDIS4 (0x094)
+#define PERRSTSTAT4 (0x098)
+#define CLKDIV3 (0x0B4)
+#define CLKDIV5 (0x0BC)
+#define CLKDIV10 (0x0D0)
+#define CLKDIV18 (0x0F0)
+#define CLKDIV20 (0x0F8)
+#define ISOEN (0x144)
+#define ISODIS (0x148)
+#define ISOSTAT (0x14c)
+#define PERPWREN (0x150)
+#define PERPWRDIS (0x154)
+#define PERPWRSTAT (0x158)
+#define PERI_AUTODIV8 (0x380)
+#define PERI_AUTODIV9 (0x384)
+#define PERI_AUTODIV10 (0x388)
+
+#define NOC_POWER_IDLEREQ (0x380)
+#define NOC_POWER_IDLEACK (0x384)
+#define NOC_POWER_IDLE (0x388)
+
+#define SCPWREN (0x0D0)
+#define SCPEREN1 (0x040)
+#define SCPERDIS1 (0x044)
+#define SCPERCLKEN1 (0x048)
+#define SCPERRSTDIS1 (0x090)
+#define SCISODIS (0x0C4)
+#define SCCLKDIV2 (0x258)
+#define SCCLKDIV4 (0x260)
+
+#define PERI_CTRL23 (0x060)
+#define PERI_CTRL29 (0x078)
+#define PERI_CTRL30 (0x07C)
+#define PERI_CTRL32 (0x084)
+#define PERI_STAT0 (0x094)
+#define PERI_STAT1 (0x098)
+#define PERI_STAT16 (0x0D4)
+
+#define PCTRL_DPHYTX_ULPSEXIT1 BIT(4)
+#define PCTRL_DPHYTX_ULPSEXIT0 BIT(3)
+
+#define PCTRL_DPHYTX_CTRL1 BIT(1)
+#define PCTRL_DPHYTX_CTRL0 BIT(0)
+
+/*******************************************************************************
+ **
+ */
+#define BIT_DSS_GLB_INTS BIT(30)
+#define BIT_MMU_IRPT_S BIT(29)
+#define BIT_MMU_IRPT_NS BIT(28)
+#define BIT_DBG_MCTL_INTS BIT(27)
+#define BIT_DBG_WCH1_INTS BIT(26)
+#define BIT_DBG_WCH0_INTS BIT(25)
+#define BIT_DBG_RCH7_INTS BIT(24)
+#define BIT_DBG_RCH6_INTS BIT(23)
+#define BIT_DBG_RCH5_INTS BIT(22)
+#define BIT_DBG_RCH4_INTS BIT(21)
+#define BIT_DBG_RCH3_INTS BIT(20)
+#define BIT_DBG_RCH2_INTS BIT(19)
+#define BIT_DBG_RCH1_INTS BIT(18)
+#define BIT_DBG_RCH0_INTS BIT(17)
+#define BIT_ITF0_INTS BIT(16)
+#define BIT_DPP_INTS BIT(15)
+#define BIT_CMDLIST13 BIT(14)
+#define BIT_CMDLIST12 BIT(13)
+#define BIT_CMDLIST11 BIT(12)
+#define BIT_CMDLIST10 BIT(11)
+#define BIT_CMDLIST9 BIT(10)
+#define BIT_CMDLIST8 BIT(9)
+#define BIT_CMDLIST7 BIT(8)
+#define BIT_CMDLIST6 BIT(7)
+#define BIT_CMDLIST5 BIT(6)
+#define BIT_CMDLIST4 BIT(5)
+#define BIT_CMDLIST3 BIT(4)
+#define BIT_CMDLIST2 BIT(3)
+#define BIT_CMDLIST1 BIT(2)
+#define BIT_CMDLIST0 BIT(1)
+
+#define BIT_SDP_DSS_GLB_INTS BIT(29)
+#define BIT_SDP_MMU_IRPT_S BIT(28)
+#define BIT_SDP_MMU_IRPT_NS BIT(27)
+#define BIT_SDP_DBG_MCTL_INTS BIT(26)
+#define BIT_SDP_DBG_WCH1_INTS BIT(25)
+#define BIT_SDP_DBG_WCH0_INTS BIT(24)
+#define BIT_SDP_DBG_RCH7_INTS BIT(23)
+#define BIT_SDP_DBG_RCH6_INTS BIT(22)
+#define BIT_SDP_DBG_RCH5_INTS BIT(21)
+#define BIT_SDP_DBG_RCH4_INTS BIT(20)
+#define BIT_SDP_DBG_RCH3_INTS BIT(19)
+#define BIT_SDP_DBG_RCH2_INTS BIT(18)
+#define BIT_SDP_DBG_RCH1_INTS BIT(17)
+#define BIT_SDP_DBG_RCH0_INTS BIT(16)
+#define BIT_SDP_ITF1_INTS BIT(15)
+#define BIT_SDP_CMDLIST13 BIT(14)
+#define BIT_SDP_CMDLIST12 BIT(13)
+#define BIT_SDP_CMDLIST11 BIT(12)
+#define BIT_SDP_CMDLIST10 BIT(11)
+#define BIT_SDP_CMDLIST9 BIT(10)
+#define BIT_SDP_CMDLIST8 BIT(9)
+#define BIT_SDP_CMDLIST7 BIT(8)
+#define BIT_SDP_CMDLIST6 BIT(7)
+#define BIT_SDP_CMDLIST5 BIT(6)
+#define BIT_SDP_CMDLIST4 BIT(5)
+#define BIT_SDP_CMDLIST3 BIT(4)
+#define BIT_SDP_SDP_CMDLIST2 BIT(3)
+#define BIT_SDP_CMDLIST1 BIT(2)
+#define BIT_SDP_CMDLIST0 BIT(1)
+#define BIT_SDP_RCH_CE_INTS BIT(0)
+
+#define BIT_OFF_DSS_GLB_INTS BIT(31)
+#define BIT_OFF_MMU_IRPT_S BIT(30)
+#define BIT_OFF_MMU_IRPT_NS BIT(29)
+#define BIT_OFF_DBG_MCTL_INTS BIT(28)
+#define BIT_OFF_DBG_WCH1_INTS BIT(27)
+#define BIT_OFF_DBG_WCH0_INTS BIT(26)
+#define BIT_OFF_DBG_RCH7_INTS BIT(25)
+#define BIT_OFF_DBG_RCH6_INTS BIT(24)
+#define BIT_OFF_DBG_RCH5_INTS BIT(23)
+#define BIT_OFF_DBG_RCH4_INTS BIT(22)
+#define BIT_OFF_DBG_RCH3_INTS BIT(21)
+#define BIT_OFF_DBG_RCH2_INTS BIT(20)
+#define BIT_OFF_DBG_RCH1_INTS BIT(19)
+#define BIT_OFF_DBG_RCH0_INTS BIT(18)
+#define BIT_OFF_WCH1_INTS BIT(17)
+#define BIT_OFF_WCH0_INTS BIT(16)
+#define BIT_OFF_WCH0_WCH1_FRM_END_INT BIT(15)
+#define BIT_OFF_CMDLIST13 BIT(14)
+#define BIT_OFF_CMDLIST12 BIT(13)
+#define BIT_OFF_CMDLIST11 BIT(12)
+#define BIT_OFF_CMDLIST10 BIT(11)
+#define BIT_OFF_CMDLIST9 BIT(10)
+#define BIT_OFF_CMDLIST8 BIT(9)
+#define BIT_OFF_CMDLIST7 BIT(8)
+#define BIT_OFF_CMDLIST6 BIT(7)
+#define BIT_OFF_CMDLIST5 BIT(6)
+#define BIT_OFF_CMDLIST4 BIT(5)
+#define BIT_OFF_CMDLIST3 BIT(4)
+#define BIT_OFF_CMDLIST2 BIT(3)
+#define BIT_OFF_CMDLIST1 BIT(2)
+#define BIT_OFF_CMDLIST0 BIT(1)
+#define BIT_OFF_RCH_CE_INTS BIT(0)
+
+#define BIT_OFF_CAM_DBG_WCH2_INTS BIT(4)
+#define BIT_OFF_CAM_DBG_RCH8_INTS BIT(3)
+#define BIT_OFF_CAM_WCH2_FRMEND_INTS BIT(2)
+#define BIT_OFF_CAM_CMDLIST15_INTS BIT(1)
+#define BIT_OFF_CAM_CMDLIST14_INTS BIT(0)
+
+#define BIT_VACTIVE_CNT BIT(14)
+#define BIT_DSI_TE_TRI BIT(13)
+#define BIT_LCD_TE0_PIN BIT(12)
+#define BIT_LCD_TE1_PIN BIT(11)
+#define BIT_VACTIVE1_END BIT(10)
+#define BIT_VACTIVE1_START BIT(9)
+#define BIT_VACTIVE0_END BIT(8)
+#define BIT_VACTIVE0_START BIT(7)
+#define BIT_VFRONTPORCH BIT(6)
+#define BIT_VBACKPORCH BIT(5)
+#define BIT_VSYNC BIT(4)
+#define BIT_VFRONTPORCH_END BIT(3)
+#define BIT_LDI_UNFLOW BIT(2)
+#define BIT_FRM_END BIT(1)
+#define BIT_FRM_START BIT(0)
+
+#define BIT_CTL_FLUSH_EN BIT(21)
+#define BIT_SCF_FLUSH_EN BIT(19)
+#define BIT_DPP0_FLUSH_EN BIT(18)
+#define BIT_DBUF1_FLUSH_EN BIT(17)
+#define BIT_DBUF0_FLUSH_EN BIT(16)
+#define BIT_OV3_FLUSH_EN BIT(15)
+#define BIT_OV2_FLUSH_EN BIT(14)
+#define BIT_OV1_FLUSH_EN BIT(13)
+#define BIT_OV0_FLUSH_EN BIT(12)
+#define BIT_WB1_FLUSH_EN BIT(11)
+#define BIT_WB0_FLUSH_EN BIT(10)
+#define BIT_DMA3_FLUSH_EN BIT(9)
+#define BIT_DMA2_FLUSH_EN BIT(8)
+#define BIT_DMA1_FLUSH_EN BIT(7)
+#define BIT_DMA0_FLUSH_EN BIT(6)
+#define BIT_RGB1_FLUSH_EN BIT(4)
+#define BIT_RGB0_FLUSH_EN BIT(3)
+#define BIT_VIG1_FLUSH_EN BIT(1)
+#define BIT_VIG0_FLUSH_EN BIT(0)
+
+#define BIT_BUS_DBG_INT BIT(5)
+#define BIT_CRC_SUM_INT BIT(4)
+#define BIT_CRC_ITF1_INT BIT(3)
+#define BIT_CRC_ITF0_INT BIT(2)
+#define BIT_CRC_OV1_INT BIT(1)
+#define BIT_CRC_OV0_INT BIT(0)
+
+#define BIT_SBL_SEND_FRAME_OUT BIT(19)
+#define BIT_SBL_STOP_FRAME_OUT BIT(18)
+#define BIT_SBL_BACKLIGHT_OUT BIT(17)
+#define BIT_SBL_DARKENH_OUT BIT(16)
+#define BIT_SBL_BRIGHTPTR_OUT BIT(15)
+#define BIT_STRENGTH_INROI_OUT BIT(14)
+#define BIT_STRENGTH_OUTROI_OUT BIT(13)
+#define BIT_DONE_OUT BIT(12)
+#define BIT_PPROC_DONE_OUT BIT(11)
+
+#define BIT_HIACE_IND BIT(8)
+#define BIT_STRENGTH_INTP BIT(7)
+#define BIT_BACKLIGHT_INTP BIT(6)
+#define BIT_CE_END_IND BIT(5)
+#define BIT_CE_CANCEL_IND BIT(4)
+#define BIT_CE_LUT1_RW_COLLIDE_IND BIT(3)
+#define BIT_CE_LUT0_RW_COLLIDE_IND BIT(2)
+#define BIT_CE_HIST1_RW_COLLIDE_IND BIT(1)
+#define BIT_CE_HIST0_RW_COLLIDE_IND BIT(0)
+
+/*******************************************************************************
+ ** MODULE BASE ADDRESS
+ */
+
+#define DSS_MIPI_DSI0_OFFSET (0x00001000)
+#define DSS_MIPI_DSI1_OFFSET (0x00001400)
+
+#define DSS_GLB0_OFFSET (0x12000)
+
+#define DSS_DBG_OFFSET (0x11000)
+
+#define DSS_CMDLIST_OFFSET (0x2000)
+
+#define DSS_SMMU_OFFSET (0x8000)
+
+#define DSS_VBIF0_AIF (0x7000)
+#define DSS_VBIF1_AIF (0x9000)
+
+#define DSS_MIF_OFFSET (0xA000)
+
+#define DSS_MCTRL_SYS_OFFSET (0x10000)
+
+#define DSS_MCTRL_CTL0_OFFSET (0x10800)
+#define DSS_MCTRL_CTL1_OFFSET (0x10900)
+#define DSS_MCTRL_CTL2_OFFSET (0x10A00)
+#define DSS_MCTRL_CTL3_OFFSET (0x10B00)
+#define DSS_MCTRL_CTL4_OFFSET (0x10C00)
+#define DSS_MCTRL_CTL5_OFFSET (0x10D00)
+
+#define DSS_RCH_VG0_DMA_OFFSET (0x20000)
+#define DSS_RCH_VG0_DFC_OFFSET (0x20100)
+#define DSS_RCH_VG0_SCL_OFFSET (0x20200)
+#define DSS_RCH_VG0_ARSR_OFFSET (0x20300)
+#define DSS_RCH_VG0_POST_CLIP_OFFSET (0x203A0)
+#define DSS_RCH_VG0_PCSC_OFFSET (0x20400)
+#define DSS_RCH_VG0_CSC_OFFSET (0x20500)
+#define DSS_RCH_VG0_DEBUG_OFFSET (0x20600)
+#define DSS_RCH_VG0_VPP_OFFSET (0x20700)
+#define DSS_RCH_VG0_DMA_BUF_OFFSET (0x20800)
+#define DSS_RCH_VG0_AFBCD_OFFSET (0x20900)
+#define DSS_RCH_VG0_REG_DEFAULT_OFFSET (0x20A00)
+#define DSS_RCH_VG0_SCL_LUT_OFFSET (0x21000)
+#define DSS_RCH_VG0_ARSR_LUT_OFFSET (0x25000)
+
+#define DSS_RCH_VG1_DMA_OFFSET (0x28000)
+#define DSS_RCH_VG1_DFC_OFFSET (0x28100)
+#define DSS_RCH_VG1_SCL_OFFSET (0x28200)
+#define DSS_RCH_VG1_POST_CLIP_OFFSET (0x283A0)
+#define DSS_RCH_VG1_CSC_OFFSET (0x28500)
+#define DSS_RCH_VG1_DEBUG_OFFSET (0x28600)
+#define DSS_RCH_VG1_VPP_OFFSET (0x28700)
+#define DSS_RCH_VG1_DMA_BUF_OFFSET (0x28800)
+#define DSS_RCH_VG1_AFBCD_OFFSET (0x28900)
+#define DSS_RCH_VG1_REG_DEFAULT_OFFSET (0x28A00)
+#define DSS_RCH_VG1_SCL_LUT_OFFSET (0x29000)
+
+#define DSS_RCH_VG2_DMA_OFFSET (0x30000)
+#define DSS_RCH_VG2_DFC_OFFSET (0x30100)
+#define DSS_RCH_VG2_SCL_OFFSET (0x30200)
+#define DSS_RCH_VG2_POST_CLIP_OFFSET (0x303A0)
+#define DSS_RCH_VG2_CSC_OFFSET (0x30500)
+#define DSS_RCH_VG2_DEBUG_OFFSET (0x30600)
+#define DSS_RCH_VG2_VPP_OFFSET (0x30700)
+#define DSS_RCH_VG2_DMA_BUF_OFFSET (0x30800)
+#define DSS_RCH_VG2_AFBCD_OFFSET (0x30900)
+#define DSS_RCH_VG2_REG_DEFAULT_OFFSET (0x30A00)
+#define DSS_RCH_VG2_SCL_LUT_OFFSET (0x31000)
+
+#define DSS_RCH_G0_DMA_OFFSET (0x38000)
+#define DSS_RCH_G0_DFC_OFFSET (0x38100)
+#define DSS_RCH_G0_SCL_OFFSET (0x38200)
+#define DSS_RCH_G0_POST_CLIP_OFFSET (0x383A0)
+#define DSS_RCH_G0_CSC_OFFSET (0x38500)
+#define DSS_RCH_G0_DEBUG_OFFSET (0x38600)
+#define DSS_RCH_G0_DMA_BUF_OFFSET (0x38800)
+#define DSS_RCH_G0_AFBCD_OFFSET (0x38900)
+#define DSS_RCH_G0_REG_DEFAULT_OFFSET (0x38A00)
+
+#define DSS_RCH_G1_DMA_OFFSET (0x40000)
+#define DSS_RCH_G1_DFC_OFFSET (0x40100)
+#define DSS_RCH_G1_SCL_OFFSET (0x40200)
+#define DSS_RCH_G1_POST_CLIP_OFFSET (0x403A0)
+#define DSS_RCH_G1_CSC_OFFSET (0x40500)
+#define DSS_RCH_G1_DEBUG_OFFSET (0x40600)
+#define DSS_RCH_G1_DMA_BUF_OFFSET (0x40800)
+#define DSS_RCH_G1_AFBCD_OFFSET (0x40900)
+#define DSS_RCH_G1_REG_DEFAULT_OFFSET (0x40A00)
+
+#define DSS_RCH_D2_DMA_OFFSET (0x50000)
+#define DSS_RCH_D2_DFC_OFFSET (0x50100)
+#define DSS_RCH_D2_CSC_OFFSET (0x50500)
+#define DSS_RCH_D2_DEBUG_OFFSET (0x50600)
+#define DSS_RCH_D2_DMA_BUF_OFFSET (0x50800)
+#define DSS_RCH_D2_AFBCD_OFFSET (0x50900)
+
+#define DSS_RCH_D3_DMA_OFFSET (0x51000)
+#define DSS_RCH_D3_DFC_OFFSET (0x51100)
+#define DSS_RCH_D3_CSC_OFFSET (0x51500)
+#define DSS_RCH_D3_DEBUG_OFFSET (0x51600)
+#define DSS_RCH_D3_DMA_BUF_OFFSET (0x51800)
+#define DSS_RCH_D3_AFBCD_OFFSET (0x51900)
+
+#define DSS_RCH_D0_DMA_OFFSET (0x52000)
+#define DSS_RCH_D0_DFC_OFFSET (0x52100)
+#define DSS_RCH_D0_CSC_OFFSET (0x52500)
+#define DSS_RCH_D0_DEBUG_OFFSET (0x52600)
+#define DSS_RCH_D0_DMA_BUF_OFFSET (0x52800)
+#define DSS_RCH_D0_AFBCD_OFFSET (0x52900)
+
+#define DSS_RCH_D1_DMA_OFFSET (0x53000)
+#define DSS_RCH_D1_DFC_OFFSET (0x53100)
+#define DSS_RCH_D1_CSC_OFFSET (0x53500)
+#define DSS_RCH_D1_DEBUG_OFFSET (0x53600)
+#define DSS_RCH_D1_DMA_BUF_OFFSET (0x53800)
+#define DSS_RCH_D1_AFBCD_OFFSET (0x53900)
+
+#define DSS_WCH0_DMA_OFFSET (0x5A000)
+#define DSS_WCH0_DFC_OFFSET (0x5A100)
+#define DSS_WCH0_CSC_OFFSET (0x5A500)
+#define DSS_WCH0_ROT_OFFSET (0x5A500)
+#define DSS_WCH0_DEBUG_OFFSET (0x5A600)
+#define DSS_WCH0_DMA_BUFFER_OFFSET (0x5A800)
+#define DSS_WCH0_AFBCE_OFFSET (0x5A900)
+
+#define DSS_WCH1_DMA_OFFSET (0x5C000)
+#define DSS_WCH1_DFC_OFFSET (0x5C100)
+#define DSS_WCH1_CSC_OFFSET (0x5C500)
+#define DSS_WCH1_ROT_OFFSET (0x5C500)
+#define DSS_WCH1_DEBUG_OFFSET (0x5C600)
+#define DSS_WCH1_DMA_BUFFER_OFFSET (0x5C800)
+#define DSS_WCH1_AFBCE_OFFSET (0x5C900)
+
+#define DSS_WCH2_DMA_OFFSET (0x5E000)
+#define DSS_WCH2_DFC_OFFSET (0x5E100)
+#define DSS_WCH2_CSC_OFFSET (0x5E500)
+#define DSS_WCH2_ROT_OFFSET (0x5E500)
+#define DSS_WCH2_DEBUG_OFFSET (0x5E600)
+#define DSS_WCH2_DMA_BUFFER_OFFSET (0x5E800)
+#define DSS_WCH2_AFBCE_OFFSET (0x5E900)
+
+#define DSS_OVL0_OFFSET (0x60000)
+#define DSS_OVL1_OFFSET (0x60400)
+#define DSS_OVL2_OFFSET (0x60800)
+#define DSS_OVL3_OFFSET (0x60C00)
+
+#define DSS_DBUF0_OFFSET (0x6D000)
+#define DSS_DBUF1_OFFSET (0x6E000)
+
+#define DSS_HI_ACE_OFFSET (0x6F000)
+
+#define DSS_DPP_OFFSET (0x70000)
+#define DSS_TOP_OFFSET (0x70000)
+#define DSS_DPP_COLORBAR_OFFSET (0x70100)
+#define DSS_DPP_DITHER_OFFSET (0x70200)
+#define DSS_DPP_CSC_RGB2YUV10B_OFFSET (0x70300)
+#define DSS_DPP_CSC_YUV2RGB10B_OFFSET (0x70400)
+#define DSS_DPP_DEGAMA_OFFSET (0x70500)
+#define DSS_DPP_GAMA_OFFSET (0x70600)
+#define DSS_DPP_ACM_OFFSET (0x70700)
+#define DSS_DPP_ACE_OFFSET (0x70800)
+#define DSS_DPP_LCP_OFFSET (0x70900)
+#define DSS_DPP_ARSR1P_OFFSET (0x70A00)
+#define DSS_DPP_BITEXT0_OFFSET (0x70B00)
+#define DSS_DPP_GAMA_LUT_OFFSET (0x71000)
+#define DSS_DPP_ACM_LUT_OFFSET (0x72000)
+#define DSS_DPP_LCP_LUT_OFFSET (0x73000)
+#define DSS_DPP_ACE_LUT_OFFSET (0x79000)
+#define DSS_DPP_ARSR1P_LUT_OFFSET (0x7B000)
+
+#define DSS_POST_SCF_OFFSET DSS_DPP_ARSR1P_OFFSET
+#define DSS_POST_SCF_LUT_OFFSET DSS_DPP_ARSR1P_LUT_OFFSET
+
+#define DSS_DPP_SBL_OFFSET (0x7C000)
+#define DSS_LDI0_OFFSET (0x7D000)
+#define DSS_IFBC_OFFSET (0x7D800)
+#define DSS_DSC_OFFSET (0x7DC00)
+#define DSS_LDI1_OFFSET (0x7E000)
+
+/*******************************************************************************
+ ** GLB
+ */
+#define GLB_DSS_TAG (DSS_GLB0_OFFSET + 0x0000)
+
+#define GLB_APB_CTL (DSS_GLB0_OFFSET + 0x0004)
+
+#define GLB_DSS_AXI_RST_EN (DSS_GLB0_OFFSET + 0x0118)
+#define GLB_DSS_APB_RST_EN (DSS_GLB0_OFFSET + 0x011C)
+#define GLB_DSS_CORE_RST_EN (DSS_GLB0_OFFSET + 0x0120)
+#define GLB_PXL0_DIV2_RST_EN (DSS_GLB0_OFFSET + 0x0124)
+#define GLB_PXL0_DIV4_RST_EN (DSS_GLB0_OFFSET + 0x0128)
+#define GLB_PXL0_RST_EN (DSS_GLB0_OFFSET + 0x012C)
+#define GLB_PXL0_DSI_RST_EN (DSS_GLB0_OFFSET + 0x0130)
+#define GLB_DSS_PXL1_RST_EN (DSS_GLB0_OFFSET + 0x0134)
+#define GLB_MM_AXI_CLK_RST_EN (DSS_GLB0_OFFSET + 0x0138)
+#define GLB_AFBCD0_IP_RST_EN (DSS_GLB0_OFFSET + 0x0140)
+#define GLB_AFBCD1_IP_RST_EN (DSS_GLB0_OFFSET + 0x0144)
+#define GLB_AFBCD2_IP_RST_EN (DSS_GLB0_OFFSET + 0x0148)
+#define GLB_AFBCD3_IP_RST_EN (DSS_GLB0_OFFSET + 0x014C)
+#define GLB_AFBCD4_IP_RST_EN (DSS_GLB0_OFFSET + 0x0150)
+#define GLB_AFBCD5_IP_RST_EN (DSS_GLB0_OFFSET + 0x0154)
+#define GLB_AFBCD6_IP_RST_EN (DSS_GLB0_OFFSET + 0x0158)
+#define GLB_AFBCD7_IP_RST_EN (DSS_GLB0_OFFSET + 0x015C)
+#define GLB_AFBCE0_IP_RST_EN (DSS_GLB0_OFFSET + 0x0160)
+#define GLB_AFBCE1_IP_RST_EN (DSS_GLB0_OFFSET + 0x0164)
+
+#define GLB_MCU_PDP_INTS (DSS_GLB0_OFFSET + 0x20C)
+#define GLB_MCU_PDP_INT_MSK (DSS_GLB0_OFFSET + 0x210)
+#define GLB_MCU_SDP_INTS (DSS_GLB0_OFFSET + 0x214)
+#define GLB_MCU_SDP_INT_MSK (DSS_GLB0_OFFSET + 0x218)
+#define GLB_MCU_OFF_INTS (DSS_GLB0_OFFSET + 0x21C)
+#define GLB_MCU_OFF_INT_MSK (DSS_GLB0_OFFSET + 0x220)
+#define GLB_MCU_OFF_CAM_INTS (DSS_GLB0_OFFSET + 0x2B4)
+#define GLB_MCU_OFF_CAM_INT_MSK (DSS_GLB0_OFFSET + 0x2B8)
+#define GLB_CPU_PDP_INTS (DSS_GLB0_OFFSET + 0x224)
+#define GLB_CPU_PDP_INT_MSK (DSS_GLB0_OFFSET + 0x228)
+#define GLB_CPU_SDP_INTS (DSS_GLB0_OFFSET + 0x22C)
+#define GLB_CPU_SDP_INT_MSK (DSS_GLB0_OFFSET + 0x230)
+#define GLB_CPU_OFF_INTS (DSS_GLB0_OFFSET + 0x234)
+#define GLB_CPU_OFF_INT_MSK (DSS_GLB0_OFFSET + 0x238)
+#define GLB_CPU_OFF_CAM_INTS (DSS_GLB0_OFFSET + 0x2AC)
+#define GLB_CPU_OFF_CAM_INT_MSK (DSS_GLB0_OFFSET + 0x2B0)
+
+#define GLB_MODULE_CLK_SEL (DSS_GLB0_OFFSET + 0x0300)
+#define GLB_MODULE_CLK_EN (DSS_GLB0_OFFSET + 0x0304)
+
+#define GLB_GLB0_DBG_SEL (DSS_GLB0_OFFSET + 0x310)
+#define GLB_GLB1_DBG_SEL (DSS_GLB0_OFFSET + 0x314)
+#define GLB_DBG_IRQ_CPU (DSS_GLB0_OFFSET + 0x320)
+#define GLB_DBG_IRQ_MCU (DSS_GLB0_OFFSET + 0x324)
+
+#define GLB_TP_SEL (DSS_GLB0_OFFSET + 0x0400)
+#define GLB_CRC_DBG_LDI0 (DSS_GLB0_OFFSET + 0x0404)
+#define GLB_CRC_DBG_LDI1 (DSS_GLB0_OFFSET + 0x0408)
+#define GLB_CRC_LDI0_EN (DSS_GLB0_OFFSET + 0x040C)
+#define GLB_CRC_LDI0_FRM (DSS_GLB0_OFFSET + 0x0410)
+#define GLB_CRC_LDI1_EN (DSS_GLB0_OFFSET + 0x0414)
+#define GLB_CRC_LDI1_FRM (DSS_GLB0_OFFSET + 0x0418)
+
+#define GLB_DSS_MEM_CTRL (DSS_GLB0_OFFSET + 0x0600)
+#define GLB_DSS_PM_CTRL (DSS_GLB0_OFFSET + 0x0604)
+
+/*******************************************************************************
+ ** DBG
+ */
+#define DBG_CRC_DBG_OV0 (0x0000)
+#define DBG_CRC_DBG_OV1 (0x0004)
+#define DBG_CRC_DBG_SUM (0x0008)
+#define DBG_CRC_OV0_EN (0x000C)
+#define DBG_DSS_GLB_DBG_O (0x0010)
+#define DBG_DSS_GLB_DBG_I (0x0014)
+#define DBG_CRC_OV0_FRM (0x0018)
+#define DBG_CRC_OV1_EN (0x001C)
+#define DBG_CRC_OV1_FRM (0x0020)
+#define DBG_CRC_SUM_EN (0x0024)
+#define DBG_CRC_SUM_FRM (0x0028)
+
+#define DBG_MCTL_INTS (0x023C)
+#define DBG_MCTL_INT_MSK (0x0240)
+#define DBG_WCH0_INTS (0x0244)
+#define DBG_WCH0_INT_MSK (0x0248)
+#define DBG_WCH1_INTS (0x024C)
+#define DBG_WCH1_INT_MSK (0x0250)
+#define DBG_RCH0_INTS (0x0254)
+#define DBG_RCH0_INT_MSK (0x0258)
+#define DBG_RCH1_INTS (0x025C)
+#define DBG_RCH1_INT_MSK (0x0260)
+#define DBG_RCH2_INTS (0x0264)
+#define DBG_RCH2_INT_MSK (0x0268)
+#define DBG_RCH3_INTS (0x026C)
+#define DBG_RCH3_INT_MSK (0x0270)
+#define DBG_RCH4_INTS (0x0274)
+#define DBG_RCH4_INT_MSK (0x0278)
+#define DBG_RCH5_INTS (0x027C)
+#define DBG_RCH5_INT_MSK (0x0280)
+#define DBG_RCH6_INTS (0x0284)
+#define DBG_RCH6_INT_MSK (0x0288)
+#define DBG_RCH7_INTS (0x028C)
+#define DBG_RCH7_INT_MSK (0x0290)
+#define DBG_DSS_GLB_INTS (0x0294)
+#define DBG_DSS_GLB_INT_MSK (0x0298)
+#define DBG_WCH2_INTS (0x029C)
+#define DBG_WCH2_INT_MSK (0x02A0)
+#define DBG_RCH8_INTS (0x02A4)
+#define DBG_RCH8_INT_MSK (0x02A8)
+
+/*******************************************************************************
+ ** CMDLIST
+ */
+
+#define CMDLIST_CH0_PENDING_CLR (0x0000)
+#define CMDLIST_CH0_CTRL (0x0004)
+#define CMDLIST_CH0_STATUS (0x0008)
+#define CMDLIST_CH0_STAAD (0x000C)
+#define CMDLIST_CH0_CURAD (0x0010)
+#define CMDLIST_CH0_INTE (0x0014)
+#define CMDLIST_CH0_INTC (0x0018)
+#define CMDLIST_CH0_INTS (0x001C)
+#define CMDLIST_CH0_SCENE (0x0020)
+#define CMDLIST_CH0_DBG (0x0028)
+
+#define CMDLIST_DBG (0x0700)
+#define CMDLIST_BUF_DBG_EN (0x0704)
+#define CMDLIST_BUF_DBG_CNT_CLR (0x0708)
+#define CMDLIST_BUF_DBG_CNT (0x070C)
+#define CMDLIST_TIMEOUT_TH (0x0710)
+#define CMDLIST_START (0x0714)
+#define CMDLIST_ADDR_MASK_EN (0x0718)
+#define CMDLIST_ADDR_MASK_DIS (0x071C)
+#define CMDLIST_ADDR_MASK_STATUS (0x0720)
+#define CMDLIST_TASK_CONTINUE (0x0724)
+#define CMDLIST_TASK_STATUS (0x0728)
+#define CMDLIST_CTRL (0x072C)
+#define CMDLIST_SECU (0x0730)
+#define CMDLIST_INTS (0x0734)
+#define CMDLIST_SWRST (0x0738)
+#define CMD_MEM_CTRL (0x073C)
+#define CMD_CLK_SEL (0x0740)
+#define CMD_CLK_EN (0x0744)
+
+#define HISI_DSS_MIN_ROT_AFBCE_BLOCK_SIZE (256)
+#define HISI_DSS_MAX_ROT_AFBCE_BLOCK_SIZE (480)
+
+#define BIT_CMDLIST_CH_TASKDONE_INTS BIT(7)
+#define BIT_CMDLIST_CH_TIMEOUT_INTS BIT(6)
+#define BIT_CMDLIST_CH_BADCMD_INTS BIT(5)
+#define BIT_CMDLIST_CH_START_INTS BIT(4)
+#define BIT_CMDLIST_CH_PENDING_INTS BIT(3)
+#define BIT_CMDLIST_CH_AXIERR_INTS BIT(2)
+#define BIT_CMDLIST_CH_ALLDONE_INTS BIT(1)
+#define BIT_CMDLIST_CH_ONEDONE_INTS BIT(0)
+
+#define BIT_CMDLIST_CH15_INTS BIT(15)
+#define BIT_CMDLIST_CH14_INTS BIT(14)
+#define BIT_CMDLIST_CH13_INTS BIT(13)
+#define BIT_CMDLIST_CH12_INTS BIT(12)
+#define BIT_CMDLIST_CH11_INTS BIT(11)
+#define BIT_CMDLIST_CH10_INTS BIT(10)
+#define BIT_CMDLIST_CH9_INTS BIT(9)
+#define BIT_CMDLIST_CH8_INTS BIT(8)
+#define BIT_CMDLIST_CH7_INTS BIT(7)
+#define BIT_CMDLIST_CH6_INTS BIT(6)
+#define BIT_CMDLIST_CH5_INTS BIT(5)
+#define BIT_CMDLIST_CH4_INTS BIT(4)
+#define BIT_CMDLIST_CH3_INTS BIT(3)
+#define BIT_CMDLIST_CH2_INTS BIT(2)
+#define BIT_CMDLIST_CH1_INTS BIT(1)
+#define BIT_CMDLIST_CH0_INTS BIT(0)
+
+/*******************************************************************************
+ ** AIF
+ */
+#define AIF0_CH0_OFFSET (DSS_VBIF0_AIF + 0x00)
+#define AIF0_CH0_ADD_OFFSET (DSS_VBIF0_AIF + 0x04)
+#define AIF0_CH1_OFFSET (DSS_VBIF0_AIF + 0x20)
+#define AIF0_CH1_ADD_OFFSET (DSS_VBIF0_AIF + 0x24)
+#define AIF0_CH2_OFFSET (DSS_VBIF0_AIF + 0x40)
+#define AIF0_CH2_ADD_OFFSET (DSS_VBIF0_AIF + 0x44)
+#define AIF0_CH3_OFFSET (DSS_VBIF0_AIF + 0x60)
+#define AIF0_CH3_ADD_OFFSET (DSS_VBIF0_AIF + 0x64)
+#define AIF0_CH4_OFFSET (DSS_VBIF0_AIF + 0x80)
+#define AIF0_CH4_ADD_OFFSET (DSS_VBIF0_AIF + 0x84)
+#define AIF0_CH5_OFFSET (DSS_VBIF0_AIF + 0xA0)
+#define AIF0_CH5_ADD_OFFSET (DSS_VBIF0_AIF + 0xa4)
+#define AIF0_CH6_OFFSET (DSS_VBIF0_AIF + 0xC0)
+#define AIF0_CH6_ADD_OFFSET (DSS_VBIF0_AIF + 0xc4)
+#define AIF0_CH7_OFFSET (DSS_VBIF0_AIF + 0xE0)
+#define AIF0_CH7_ADD_OFFSET (DSS_VBIF0_AIF + 0xe4)
+#define AIF0_CH8_OFFSET (DSS_VBIF0_AIF + 0x100)
+#define AIF0_CH8_ADD_OFFSET (DSS_VBIF0_AIF + 0x104)
+#define AIF0_CH9_OFFSET (DSS_VBIF0_AIF + 0x120)
+#define AIF0_CH9_ADD_OFFSET (DSS_VBIF0_AIF + 0x124)
+#define AIF0_CH10_OFFSET (DSS_VBIF0_AIF + 0x140)
+#define AIF0_CH10_ADD_OFFSET (DSS_VBIF0_AIF + 0x144)
+#define AIF0_CH11_OFFSET (DSS_VBIF0_AIF + 0x160)
+#define AIF0_CH11_ADD_OFFSET (DSS_VBIF0_AIF + 0x164)
+#define AIF0_CH12_OFFSET (DSS_VBIF0_AIF + 0x180)
+#define AIF0_CH12_ADD_OFFSET (DSS_VBIF0_AIF + 0x184)
+
+#define AIF1_CH0_OFFSET (DSS_VBIF1_AIF + 0x00)
+#define AIF1_CH0_ADD_OFFSET (DSS_VBIF1_AIF + 0x04)
+#define AIF1_CH1_OFFSET (DSS_VBIF1_AIF + 0x20)
+#define AIF1_CH1_ADD_OFFSET (DSS_VBIF1_AIF + 0x24)
+#define AIF1_CH2_OFFSET (DSS_VBIF1_AIF + 0x40)
+#define AIF1_CH2_ADD_OFFSET (DSS_VBIF1_AIF + 0x44)
+#define AIF1_CH3_OFFSET (DSS_VBIF1_AIF + 0x60)
+#define AIF1_CH3_ADD_OFFSET (DSS_VBIF1_AIF + 0x64)
+#define AIF1_CH4_OFFSET (DSS_VBIF1_AIF + 0x80)
+#define AIF1_CH4_ADD_OFFSET (DSS_VBIF1_AIF + 0x84)
+#define AIF1_CH5_OFFSET (DSS_VBIF1_AIF + 0xA0)
+#define AIF1_CH5_ADD_OFFSET (DSS_VBIF1_AIF + 0xa4)
+#define AIF1_CH6_OFFSET (DSS_VBIF1_AIF + 0xC0)
+#define AIF1_CH6_ADD_OFFSET (DSS_VBIF1_AIF + 0xc4)
+#define AIF1_CH7_OFFSET (DSS_VBIF1_AIF + 0xE0)
+#define AIF1_CH7_ADD_OFFSET (DSS_VBIF1_AIF + 0xe4)
+#define AIF1_CH8_OFFSET (DSS_VBIF1_AIF + 0x100)
+#define AIF1_CH8_ADD_OFFSET (DSS_VBIF1_AIF + 0x104)
+#define AIF1_CH9_OFFSET (DSS_VBIF1_AIF + 0x120)
+#define AIF1_CH9_ADD_OFFSET (DSS_VBIF1_AIF + 0x124)
+#define AIF1_CH10_OFFSET (DSS_VBIF1_AIF + 0x140)
+#define AIF1_CH10_ADD_OFFSET (DSS_VBIF1_AIF + 0x144)
+#define AIF1_CH11_OFFSET (DSS_VBIF1_AIF + 0x160)
+#define AIF1_CH11_ADD_OFFSET (DSS_VBIF1_AIF + 0x164)
+#define AIF1_CH12_OFFSET (DSS_VBIF1_AIF + 0x180)
+#define AIF1_CH12_ADD_OFFSET (DSS_VBIF1_AIF + 0x184)
+
+/* aif dmax */
+
+#define AIF_CH_CTL (0x0000)
+
+#define AIF_CH_CTL_ADD (0x0004)
+
+/* aif common */
+#define AXI0_RID_MSK0 (0x0800)
+#define AXI0_RID_MSK1 (0x0804)
+#define AXI0_WID_MSK (0x0808)
+#define AXI0_R_QOS_MAP (0x080c)
+#define AXI1_RID_MSK0 (0x0810)
+#define AXI1_RID_MSK1 (0x0814)
+#define AXI1_WID_MSK (0x0818)
+#define AXI1_R_QOS_MAP (0x081c)
+#define AIF_CLK_SEL0 (0x0820)
+#define AIF_CLK_SEL1 (0x0824)
+#define AIF_CLK_EN0 (0x0828)
+#define AIF_CLK_EN1 (0x082c)
+#define MONITOR_CTRL (0x0830)
+#define MONITOR_TIMER_INI (0x0834)
+#define DEBUG_BUF_BASE (0x0838)
+#define DEBUG_CTRL (0x083C)
+#define AIF_SHADOW_READ (0x0840)
+#define AIF_MEM_CTRL (0x0844)
+#define AIF_MONITOR_EN (0x0848)
+#define AIF_MONITOR_CTRL (0x084C)
+#define AIF_MONITOR_SAMPLE_MUN (0x0850)
+#define AIF_MONITOR_SAMPLE_TIME (0x0854)
+#define AIF_MONITOR_SAMPLE_FLOW (0x0858)
+
+/* aif debug */
+#define AIF_MONITOR_READ_DATA (0x0880)
+#define AIF_MONITOR_WRITE_DATA (0x0884)
+#define AIF_MONITOR_WINDOW_CYCLE (0x0888)
+#define AIF_MONITOR_WBURST_CNT (0x088C)
+#define AIF_MONITOR_MIN_WR_CYCLE (0x0890)
+#define AIF_MONITOR_MAX_WR_CYCLE (0x0894)
+#define AIF_MONITOR_AVR_WR_CYCLE (0x0898)
+#define AIF_MONITOR_MIN_WRW_CYCLE (0x089C)
+#define AIF_MONITOR_MAX_WRW_CYCLE (0x08A0)
+#define AIF_MONITOR_AVR_WRW_CYCLE (0x08A4)
+#define AIF_MONITOR_RBURST_CNT (0x08A8)
+#define AIF_MONITOR_MIN_RD_CYCLE (0x08AC)
+#define AIF_MONITOR_MAX_RD_CYCLE (0x08B0)
+#define AIF_MONITOR_AVR_RD_CYCLE (0x08B4)
+#define AIF_MONITOR_MIN_RDW_CYCLE (0x08B8)
+#define AIF_MONITOR_MAX_RDW_CYCLE (0x08BC)
+#define AIF_MONITOR_AVR_RDW_CYCLE (0x08C0)
+#define AIF_CH_STAT_0 (0x08C4)
+#define AIF_CH_STAT_1 (0x08C8)
+
+#define AIF_MODULE_CLK_SEL (0x0A04)
+#define AIF_MODULE_CLK_EN (0x0A08)
+
+typedef struct dss_aif {
+ u32 aif_ch_ctl;
+ u32 aif_ch_ctl_add;
+} dss_aif_t;
+
+typedef struct dss_aif_bw {
+ u64 bw;
+ u8 chn_idx;
+ s8 axi_sel;
+ u8 is_used;
+} dss_aif_bw_t;
+
+/*******************************************************************************
+ ** MIF
+ */
+#define MIF_ENABLE (0x0000)
+#define MIF_MEM_CTRL (0x0004)
+
+#define MIF_CTRL0 (0x000)
+#define MIF_CTRL1 (0x004)
+#define MIF_CTRL2 (0x008)
+#define MIF_CTRL3 (0x00C)
+#define MIF_CTRL4 (0x010)
+#define MIF_CTRL5 (0x014)
+#define REG_DEFAULT (0x0500)
+#define MIF_SHADOW_READ (0x0504)
+#define MIF_CLK_CTL (0x0508)
+
+#define MIF_STAT0 (0x0600)
+
+#define MIF_STAT1 (0x0604)
+
+#define MIF_STAT2 (0x0608)
+
+#define MIF_CTRL_OFFSET (0x20)
+#define MIF_CH0_OFFSET (DSS_MIF_OFFSET + MIF_CTRL_OFFSET * 1)
+#define MIF_CH1_OFFSET (DSS_MIF_OFFSET + MIF_CTRL_OFFSET * 2)
+#define MIF_CH2_OFFSET (DSS_MIF_OFFSET + MIF_CTRL_OFFSET * 3)
+#define MIF_CH3_OFFSET (DSS_MIF_OFFSET + MIF_CTRL_OFFSET * 4)
+#define MIF_CH4_OFFSET (DSS_MIF_OFFSET + MIF_CTRL_OFFSET * 5)
+#define MIF_CH5_OFFSET (DSS_MIF_OFFSET + MIF_CTRL_OFFSET * 6)
+#define MIF_CH6_OFFSET (DSS_MIF_OFFSET + MIF_CTRL_OFFSET * 7)
+#define MIF_CH7_OFFSET (DSS_MIF_OFFSET + MIF_CTRL_OFFSET * 8)
+#define MIF_CH8_OFFSET (DSS_MIF_OFFSET + MIF_CTRL_OFFSET * 9)
+#define MIF_CH9_OFFSET (DSS_MIF_OFFSET + MIF_CTRL_OFFSET * 10)
+#define MIF_CH10_OFFSET (DSS_MIF_OFFSET + MIF_CTRL_OFFSET * 11)
+#define MIF_CH11_OFFSET (DSS_MIF_OFFSET + MIF_CTRL_OFFSET * 12)
+#define MIF_CTRL_NUM (12)
+
+#define LITTLE_LAYER_BUF_SIZE (256 * 1024)
+#define MIF_STRIDE_UNIT (4 * 1024)
+
+typedef struct dss_mif {
+ u32 mif_ctrl1;
+ u32 mif_ctrl2;
+ u32 mif_ctrl3;
+ u32 mif_ctrl4;
+ u32 mif_ctrl5;
+} dss_mif_t;
+
+/*
+ ** stretch blt, linear/tile, rotation, pixel format
+ ** 0 0 000
+ */
+enum dss_mmu_tlb_tag_org {
+ MMU_TLB_TAG_ORG_0x0 = 0x0,
+ MMU_TLB_TAG_ORG_0x1 = 0x1,
+ MMU_TLB_TAG_ORG_0x2 = 0x2,
+ MMU_TLB_TAG_ORG_0x3 = 0x3,
+ MMU_TLB_TAG_ORG_0x4 = 0x4,
+ MMU_TLB_TAG_ORG_0x7 = 0x7,
+
+ MMU_TLB_TAG_ORG_0x8 = 0x8,
+ MMU_TLB_TAG_ORG_0x9 = 0x9,
+ MMU_TLB_TAG_ORG_0xA = 0xA,
+ MMU_TLB_TAG_ORG_0xB = 0xB,
+ MMU_TLB_TAG_ORG_0xC = 0xC,
+ MMU_TLB_TAG_ORG_0xF = 0xF,
+
+ MMU_TLB_TAG_ORG_0x10 = 0x10,
+ MMU_TLB_TAG_ORG_0x11 = 0x11,
+ MMU_TLB_TAG_ORG_0x12 = 0x12,
+ MMU_TLB_TAG_ORG_0x13 = 0x13,
+ MMU_TLB_TAG_ORG_0x14 = 0x14,
+ MMU_TLB_TAG_ORG_0x17 = 0x17,
+
+ MMU_TLB_TAG_ORG_0x18 = 0x18,
+ MMU_TLB_TAG_ORG_0x19 = 0x19,
+ MMU_TLB_TAG_ORG_0x1A = 0x1A,
+ MMU_TLB_TAG_ORG_0x1B = 0x1B,
+ MMU_TLB_TAG_ORG_0x1C = 0x1C,
+ MMU_TLB_TAG_ORG_0x1F = 0x1F,
+};
+
+/*******************************************************************************
+ **SMMU
+ */
+#define SMMU_SCR (0x0000)
+#define SMMU_MEMCTRL (0x0004)
+#define SMMU_LP_CTRL (0x0008)
+#define SMMU_PRESS_REMAP (0x000C)
+#define SMMU_INTMASK_NS (0x0010)
+#define SMMU_INTRAW_NS (0x0014)
+#define SMMU_INTSTAT_NS (0x0018)
+#define SMMU_INTCLR_NS (0x001C)
+
+#define SMMU_SMRx_NS (0x0020)
+#define SMMU_RLD_EN0_NS (0x01F0)
+#define SMMU_RLD_EN1_NS (0x01F4)
+#define SMMU_RLD_EN2_NS (0x01F8)
+#define SMMU_CB_SCTRL (0x0200)
+#define SMMU_CB_TTBR0 (0x0204)
+#define SMMU_CB_TTBR1 (0x0208)
+#define SMMU_CB_TTBCR (0x020C)
+#define SMMU_OFFSET_ADDR_NS (0x0210)
+#define SMMU_SCACHEI_ALL (0x0214)
+#define SMMU_SCACHEI_L1 (0x0218)
+#define SMMU_SCACHEI_L2L3 (0x021C)
+#define SMMU_FAMA_CTRL0 (0x0220)
+#define SMMU_FAMA_CTRL1 (0x0224)
+#define SMMU_ADDR_MSB (0x0300)
+#define SMMU_ERR_RDADDR (0x0304)
+#define SMMU_ERR_WRADDR (0x0308)
+#define SMMU_FAULT_ADDR_TCU (0x0310)
+#define SMMU_FAULT_ID_TCU (0x0314)
+
+#define SMMU_FAULT_ADDR_TBUx (0x0320)
+#define SMMU_FAULT_ID_TBUx (0x0324)
+#define SMMU_FAULT_INFOx (0x0328)
+#define SMMU_DBGRPTR_TLB (0x0380)
+#define SMMU_DBGRDATA_TLB (0x0380)
+#define SMMU_DBGRDATA0_CACHE (0x038C)
+#define SMMU_DBGRDATA1_CACHE (0x0390)
+#define SMMU_DBGAXI_CTRL (0x0394)
+#define SMMU_OVA_ADDR (0x0398)
+#define SMMU_OPA_ADDR (0x039C)
+#define SMMU_OVA_CTRL (0x03A0)
+#define SMMU_OPREF_ADDR (0x03A4)
+#define SMMU_OPREF_CTRL (0x03A8)
+#define SMMU_OPREF_CNT (0x03AC)
+
+#define SMMU_SMRx_S (0x0500)
+#define SMMU_RLD_EN0_S (0x06F0)
+#define SMMU_RLD_EN1_S (0x06F4)
+#define SMMU_RLD_EN2_S (0x06F8)
+#define SMMU_INTMAS_S (0x0700)
+#define SMMU_INTRAW_S (0x0704)
+#define SMMU_INTSTAT_S (0x0708)
+#define SMMU_INTCLR_S (0x070C)
+#define SMMU_SCR_S (0x0710)
+#define SMMU_SCB_SCTRL (0x0714)
+#define SMMU_SCB_TTBR (0x0718)
+#define SMMU_SCB_TTBCR (0x071C)
+#define SMMU_OFFSET_ADDR_S (0x0720)
+
+#define SMMU_SID_NUM (64)
+
+typedef struct dss_smmu {
+ u32 smmu_scr;
+ u32 smmu_memctrl;
+ u32 smmu_lp_ctrl;
+ u32 smmu_press_remap;
+ u32 smmu_intmask_ns;
+ u32 smmu_intraw_ns;
+ u32 smmu_intstat_ns;
+ u32 smmu_intclr_ns;
+ u32 smmu_smrx_ns[SMMU_SID_NUM];
+ u32 smmu_rld_en0_ns;
+ u32 smmu_rld_en1_ns;
+ u32 smmu_rld_en2_ns;
+ u32 smmu_cb_sctrl;
+ u32 smmu_cb_ttbr0;
+ u32 smmu_cb_ttbr1;
+ u32 smmu_cb_ttbcr;
+ u32 smmu_offset_addr_ns;
+ u32 smmu_scachei_all;
+ u32 smmu_scachei_l1;
+ u32 smmu_scachei_l2l3;
+ u32 smmu_fama_ctrl0_ns;
+ u32 smmu_fama_ctrl1_ns;
+ u32 smmu_addr_msb;
+ u32 smmu_err_rdaddr;
+ u32 smmu_err_wraddr;
+ u32 smmu_fault_addr_tcu;
+ u32 smmu_fault_id_tcu;
+ u32 smmu_fault_addr_tbux;
+ u32 smmu_fault_id_tbux;
+ u32 smmu_fault_infox;
+ u32 smmu_dbgrptr_tlb;
+ u32 smmu_dbgrdata_tlb;
+ u32 smmu_dbgrptr_cache;
+ u32 smmu_dbgrdata0_cache;
+ u32 smmu_dbgrdata1_cache;
+ u32 smmu_dbgaxi_ctrl;
+ u32 smmu_ova_addr;
+ u32 smmu_opa_addr;
+ u32 smmu_ova_ctrl;
+ u32 smmu_opref_addr;
+ u32 smmu_opref_ctrl;
+ u32 smmu_opref_cnt;
+ u32 smmu_smrx_s[SMMU_SID_NUM];
+ u32 smmu_rld_en0_s;
+ u32 smmu_rld_en1_s;
+ u32 smmu_rld_en2_s;
+ u32 smmu_intmas_s;
+ u32 smmu_intraw_s;
+ u32 smmu_intstat_s;
+ u32 smmu_intclr_s;
+ u32 smmu_scr_s;
+ u32 smmu_scb_sctrl;
+ u32 smmu_scb_ttbr;
+ u32 smmu_scb_ttbcr;
+ u32 smmu_offset_addr_s;
+
+ u8 smmu_smrx_ns_used[DSS_CHN_MAX_DEFINE];
+} dss_smmu_t;
+
+/*******************************************************************************
+ ** RDMA
+ */
+
+#define DMA_OFT_X0 (0x0000)
+#define DMA_OFT_Y0 (0x0004)
+#define DMA_OFT_X1 (0x0008)
+#define DMA_OFT_Y1 (0x000C)
+#define DMA_MASK0 (0x0010)
+#define DMA_MASK1 (0x0014)
+#define DMA_STRETCH_SIZE_VRT (0x0018)
+#define DMA_CTRL (0x001C)
+#define DMA_TILE_SCRAM (0x0020)
+
+#define DMA_PULSE (0x0028)
+#define DMA_CORE_GT (0x002C)
+#define RWCH_CFG0 (0x0030)
+
+#define WDMA_DMA_SW_MASK_EN (0x004C)
+#define WDMA_DMA_START_MASK0 (0x0050)
+#define WDMA_DMA_END_MASK0 (0x0054)
+#define WDMA_DMA_START_MASK1 (0x0058)
+#define WDMA_DMA_END_MASK1 (0x005C)
+
+#define DMA_DATA_ADDR0 (0x0060)
+#define DMA_STRIDE0 (0x0064)
+#define DMA_STRETCH_STRIDE0 (0x0068)
+#define DMA_DATA_NUM0 (0x006C)
+
+#define DMA_TEST0 (0x0070)
+#define DMA_TEST1 (0x0074)
+#define DMA_TEST3 (0x0078)
+#define DMA_TEST4 (0x007C)
+#define DMA_STATUS_Y (0x0080)
+
+#define DMA_DATA_ADDR1 (0x0084)
+#define DMA_STRIDE1 (0x0088)
+#define DMA_STRETCH_STRIDE1 (0x008C)
+#define DMA_DATA_NUM1 (0x0090)
+
+#define DMA_TEST0_U (0x0094)
+#define DMA_TEST1_U (0x0098)
+#define DMA_TEST3_U (0x009C)
+#define DMA_TEST4_U (0x00A0)
+#define DMA_STATUS_U (0x00A4)
+
+#define DMA_DATA_ADDR2 (0x00A8)
+#define DMA_STRIDE2 (0x00AC)
+#define DMA_STRETCH_STRIDE2 (0x00B0)
+#define DMA_DATA_NUM2 (0x00B4)
+
+#define DMA_TEST0_V (0x00B8)
+#define DMA_TEST1_V (0x00BC)
+#define DMA_TEST3_V (0x00C0)
+#define DMA_TEST4_V (0x00C4)
+#define DMA_STATUS_V (0x00C8)
+
+#define CH_RD_SHADOW (0x00D0)
+#define CH_CTL (0x00D4)
+#define CH_SECU_EN (0x00D8)
+#define CH_SW_END_REQ (0x00DC)
+#define CH_CLK_SEL (0x00E0)
+#define CH_CLK_EN (0x00E4)
+
+/*******************************************************************************
+ ** DFC
+ */
+#define DFC_DISP_SIZE (0x0000)
+#define DFC_PIX_IN_NUM (0x0004)
+#define DFC_GLB_ALPHA (0x0008)
+#define DFC_DISP_FMT (0x000C)
+#define DFC_CLIP_CTL_HRZ (0x0010)
+#define DFC_CLIP_CTL_VRZ (0x0014)
+#define DFC_CTL_CLIP_EN (0x0018)
+#define DFC_ICG_MODULE (0x001C)
+#define DFC_DITHER_ENABLE (0x0020)
+#define DFC_PADDING_CTL (0x0024)
+
+typedef struct dss_dfc {
+ u32 disp_size;
+ u32 pix_in_num;
+ u32 disp_fmt;
+ u32 clip_ctl_hrz;
+ u32 clip_ctl_vrz;
+ u32 ctl_clip_en;
+ u32 icg_module;
+ u32 dither_enable;
+ u32 padding_ctl;
+} dss_dfc_t;
+
+/*******************************************************************************
+ ** SCF
+ */
+#define DSS_SCF_H0_Y_COEF_OFFSET (0x0000)
+#define DSS_SCF_Y_COEF_OFFSET (0x2000)
+#define DSS_SCF_UV_COEF_OFFSET (0x2800)
+
+#define SCF_EN_HSCL_STR (0x0000)
+#define SCF_EN_VSCL_STR (0x0004)
+#define SCF_H_V_ORDER (0x0008)
+#define SCF_SCF_CORE_GT (0x000C)
+#define SCF_INPUT_WIDTH_HEIGHT (0x0010)
+#define SCF_OUTPUT_WIDTH_HEIGHT (0x0014)
+#define SCF_COEF_MEM_CTRL (0x0018)
+#define SCF_EN_HSCL (0x001C)
+#define SCF_EN_VSCL (0x0020)
+#define SCF_ACC_HSCL (0x0024)
+#define SCF_ACC_HSCL1 (0x0028)
+#define SCF_INC_HSCL (0x0034)
+#define SCF_ACC_VSCL (0x0038)
+#define SCF_ACC_VSCL1 (0x003C)
+#define SCF_INC_VSCL (0x0048)
+#define SCF_EN_NONLINEAR (0x004C)
+#define SCF_EN_MMP (0x007C)
+#define SCF_DB_H0 (0x0080)
+#define SCF_DB_H1 (0x0084)
+#define SCF_DB_V0 (0x0088)
+#define SCF_DB_V1 (0x008C)
+#define SCF_LB_MEM_CTRL (0x0090)
+#define SCF_RD_SHADOW (0x00F0)
+#define SCF_CLK_SEL (0x00F8)
+#define SCF_CLK_EN (0x00FC)
+
+/* MACROS */
+#define SCF_MIN_INPUT (16)
+#define SCF_MIN_OUTPUT (16)
+
+/* Threshold for SCF Stretch and SCF filter */
+#define RDMA_STRETCH_THRESHOLD (2)
+#define SCF_INC_FACTOR (1 << 18)
+#define SCF_UPSCALE_MAX (60)
+#define SCF_DOWNSCALE_MAX (60)
+#define SCF_EDGE_FACTOR (3)
+#define ARSR2P_INC_FACTOR (65536)
+
+typedef struct dss_scl {
+ u32 en_hscl_str;
+ u32 en_vscl_str;
+ u32 h_v_order;
+ u32 input_width_height;
+ u32 output_width_height;
+ u32 en_hscl;
+ u32 en_vscl;
+ u32 acc_hscl;
+ u32 inc_hscl;
+ u32 inc_vscl;
+ u32 en_mmp;
+ u32 scf_ch_core_gt;
+ u32 fmt;
+} dss_scl_t;
+
+enum scl_coef_lut_idx {
+ SCL_COEF_NONE_IDX = -1,
+ SCL_COEF_YUV_IDX = 0,
+ SCL_COEF_RGB_IDX = 1,
+ SCL_COEF_IDX_MAX = 2,
+};
+
+/*******************************************************************************
+ ** ARSR2P v0
+ */
+#define ARSR2P_INPUT_WIDTH_HEIGHT (0x000)
+#define ARSR2P_OUTPUT_WIDTH_HEIGHT (0x004)
+#define ARSR2P_IHLEFT (0x008)
+#define ARSR2P_IHRIGHT (0x00C)
+#define ARSR2P_IVTOP (0x010)
+#define ARSR2P_IVBOTTOM (0x014)
+#define ARSR2P_IHINC (0x018)
+#define ARSR2P_IVINC (0x01C)
+#define ARSR2P_UV_OFFSET (0x020)
+#define ARSR2P_MODE (0x024)
+#define ARSR2P_SKIN_THRES_Y (0x028)
+#define ARSR2P_SKIN_THRES_U (0x02C)
+#define ARSR2P_SKIN_THRES_V (0x030)
+#define ARSR2P_SKIN_CFG0 (0x034)
+#define ARSR2P_SKIN_CFG1 (0x038)
+#define ARSR2P_SKIN_CFG2 (0x03C)
+#define ARSR2P_SHOOT_CFG1 (0x040)
+#define ARSR2P_SHOOT_CFG2 (0x044)
+#define ARSR2P_SHARP_CFG1 (0x048)
+#define ARSR2P_SHARP_CFG2 (0x04C)
+#define ARSR2P_SHARP_CFG3 (0x050)
+#define ARSR2P_SHARP_CFG4 (0x054)
+#define ARSR2P_SHARP_CFG5 (0x058)
+#define ARSR2P_SHARP_CFG6 (0x05C)
+#define ARSR2P_SHARP_CFG7 (0x060)
+#define ARSR2P_SHARP_CFG8 (0x064)
+#define ARSR2P_SHARP_CFG9 (0x068)
+#define ARSR2P_TEXTURW_ANALYSTS (0x06C)
+#define ARSR2P_INTPLSHOOTCTRL (0x070)
+#define ARSR2P_DEBUG0 (0x074)
+#define ARSR2P_DEBUG1 (0x078)
+#define ARSR2P_DEBUG2 (0x07C)
+#define ARSR2P_DEBUG3 (0x080)
+#define ARSR2P_LB_MEM_CTRL (0x084)
+#define ARSR2P_IHLEFT1 (0x088)
+#define ARSR2P_IHRIGHT1 (0x090)
+#define ARSR2P_IVBOTTOM1 (0x094)
+
+#define ARSR2P_LUT_COEFY_V_OFFSET (0x0000)
+#define ARSR2P_LUT_COEFY_H_OFFSET (0x0100)
+#define ARSR2P_LUT_COEFA_V_OFFSET (0x0300)
+#define ARSR2P_LUT_COEFA_H_OFFSET (0x0400)
+#define ARSR2P_LUT_COEFUV_V_OFFSET (0x0600)
+#define ARSR2P_LUT_COEFUV_H_OFFSET (0x0700)
+
+typedef struct dss_arsr2p_effect {
+ u32 skin_thres_y;
+ u32 skin_thres_u;
+ u32 skin_thres_v;
+ u32 skin_cfg0;
+ u32 skin_cfg1;
+ u32 skin_cfg2;
+ u32 shoot_cfg1;
+ u32 shoot_cfg2;
+ u32 sharp_cfg1;
+ u32 sharp_cfg2;
+ u32 sharp_cfg3;
+ u32 sharp_cfg4;
+ u32 sharp_cfg5;
+ u32 sharp_cfg6;
+ u32 sharp_cfg7;
+ u32 sharp_cfg8;
+ u32 sharp_cfg9;
+ u32 texturw_analysts;
+ u32 intplshootctrl;
+} dss_arsr2p_effect_t;
+
+typedef struct dss_arsr2p {
+ u32 arsr_input_width_height;
+ u32 arsr_output_width_height;
+ u32 ihleft;
+ u32 ihright;
+ u32 ivtop;
+ u32 ivbottom;
+ u32 ihinc;
+ u32 ivinc;
+ u32 offset;
+ u32 mode;
+ dss_arsr2p_effect_t arsr2p_effect;
+ u32 ihleft1;
+ u32 ihright1;
+ u32 ivbottom1;
+} dss_arsr2p_t;
+
+/*******************************************************************************
+ ** POST_CLIP v g
+ */
+#define POST_CLIP_DISP_SIZE (0x0000)
+#define POST_CLIP_CTL_HRZ (0x0010)
+#define POST_CLIP_CTL_VRZ (0x0014)
+#define POST_CLIP_EN (0x0018)
+
+typedef struct dss_post_clip {
+ u32 disp_size;
+ u32 clip_ctl_hrz;
+ u32 clip_ctl_vrz;
+ u32 ctl_clip_en;
+} dss_post_clip_t;
+
+/*******************************************************************************
+ ** PCSC v
+ */
+#define PCSC_IDC0 (0x0000)
+#define PCSC_IDC2 (0x0004)
+#define PCSC_ODC0 (0x0008)
+#define PCSC_ODC2 (0x000C)
+#define PCSC_P0 (0x0010)
+#define PCSC_P1 (0x0014)
+#define PCSC_P2 (0x0018)
+#define PCSC_P3 (0x001C)
+#define PCSC_P4 (0x0020)
+#define PCSC_ICG_MODULE (0x0024)
+#define PCSC_MPREC (0x0028)
+
+typedef struct dss_pcsc {
+ u32 pcsc_idc0;
+} dss_pcsc_t;
+
+/*******************************************************************************
+ ** CSC
+ */
+#define CSC_IDC0 (0x0000)
+#define CSC_IDC2 (0x0004)
+#define CSC_ODC0 (0x0008)
+#define CSC_ODC2 (0x000C)
+#define CSC_P0 (0x0010)
+#define CSC_P1 (0x0014)
+#define CSC_P2 (0x0018)
+#define CSC_P3 (0x001C)
+#define CSC_P4 (0x0020)
+#define CSC_ICG_MODULE (0x0024)
+#define CSC_MPREC (0x0028)
+
+typedef struct dss_csc {
+ u32 idc0;
+ u32 idc2;
+ u32 odc0;
+ u32 odc2;
+ u32 p0;
+ u32 p1;
+ u32 p2;
+ u32 p3;
+ u32 p4;
+ u32 icg_module;
+ u32 mprec;
+} dss_csc_t;
+
+/*******************************************************************************
+ **channel DEBUG
+ */
+#define CH_DEBUG_SEL (0x600)
+
+/*******************************************************************************
+ ** VPP
+ */
+#define VPP_CTRL (0x700)
+#define VPP_MEM_CTRL (0x704)
+
+/*******************************************************************************
+ **DMA BUF
+ */
+#define DMA_BUF_CTRL (0x800)
+#define DMA_BUF_SIZE (0x850)
+#define DMA_BUF_MEM_CTRL (0x854)
+#define DMA_BUF_DBG0 (0x0838)
+#define DMA_BUF_DBG1 (0x083c)
+
+#define AFBCD_HREG_HDR_PTR_LO (0x900)
+#define AFBCD_HREG_PIC_WIDTH (0x904)
+#define AFBCD_HREG_PIC_HEIGHT (0x90C)
+#define AFBCD_HREG_FORMAT (0x910)
+#define AFBCD_CTL (0x914)
+#define AFBCD_STR (0x918)
+#define AFBCD_LINE_CROP (0x91C)
+#define AFBCD_INPUT_HEADER_STRIDE (0x920)
+#define AFBCD_PAYLOAD_STRIDE (0x924)
+#define AFBCD_MM_BASE_0 (0x928)
+#define AFBCD_AFBCD_PAYLOAD_POINTER (0x930)
+#define AFBCD_HEIGHT_BF_STR (0x934)
+#define AFBCD_OS_CFG (0x938)
+#define AFBCD_MEM_CTRL (0x93C)
+#define AFBCD_SCRAMBLE_MODE (0x940)
+#define AFBCD_HEADER_POINTER_OFFSET (0x944)
+#define AFBCD_MONITOR_REG1_OFFSET (0x948)
+#define AFBCD_MONITOR_REG2_OFFSET (0x94C)
+#define AFBCD_MONITOR_REG3_OFFSET (0x950)
+#define AFBCD_DEBUG_REG0_OFFSET (0x954)
+
+#define AFBCE_HREG_PIC_BLKS (0x900)
+#define AFBCE_HREG_FORMAT (0x904)
+#define AFBCE_HREG_HDR_PTR_LO (0x908)
+#define AFBCE_HREG_PLD_PTR_LO (0x90C)
+#define AFBCE_PICTURE_SIZE (0x910)
+#define AFBCE_CTL (0x914)
+#define AFBCE_HEADER_SRTIDE (0x918)
+#define AFBCE_PAYLOAD_STRIDE (0x91C)
+#define AFBCE_ENC_OS_CFG (0x920)
+#define AFBCE_MEM_CTRL (0x924)
+#define AFBCE_QOS_CFG (0x928)
+#define AFBCE_THRESHOLD (0x92C)
+#define AFBCE_SCRAMBLE_MODE (0x930)
+#define AFBCE_HEADER_POINTER_OFFSET (0x934)
+
+#define ROT_FIRST_LNS (0x530)
+#define ROT_STATE (0x534)
+#define ROT_MEM_CTRL (0x538)
+#define ROT_SIZE (0x53C)
+#define ROT_CPU_CTL0 (0x540)
+#define ROT_CPU_START0 (0x544)
+#define ROT_CPU_ADDR0 (0x548)
+#define ROT_CPU_RDATA0 (0x54C)
+#define ROT_CPU_RDATA1 (0x550)
+#define ROT_CPU_WDATA0 (0x554)
+#define ROT_CPU_WDATA1 (0x558)
+#define ROT_CPU_CTL1 (0x55C)
+#define ROT_CPU_START1 (0x560)
+#define ROT_CPU_ADDR1 (0x564)
+#define ROT_CPU_RDATA2 (0x568)
+#define ROT_CPU_RDATA3 (0x56C)
+#define ROT_CPU_WDATA2 (0x570)
+#define ROT_CPU_WDATA3 (0x574)
+
+#define CH_REG_DEFAULT (0x0A00)
+
+/* MACROS */
+#define MIN_INTERLEAVE (7)
+#define MAX_TILE_SURPORT_NUM (6)
+
+/* DMA aligned limited: 128bits aligned */
+#define DMA_ALIGN_BYTES (128 / BITS_PER_BYTE)
+#define DMA_ADDR_ALIGN (128 / BITS_PER_BYTE)
+#define DMA_STRIDE_ALIGN (128 / BITS_PER_BYTE)
+
+#define TILE_DMA_ADDR_ALIGN (256 * 1024)
+
+#define DMA_IN_WIDTH_MAX (2048)
+#define DMA_IN_HEIGHT_MAX (8192)
+
+#define AFBC_PIC_WIDTH_MIN (16)
+#define AFBC_PIC_WIDTH_MAX (8192)
+#define AFBC_PIC_HEIGHT_MIN (16)
+#define AFBC_PIC_HEIGHT_MAX (4096)
+
+#define AFBCD_TOP_CROP_MAX (15)
+#define AFBCD_BOTTOM_CROP_MAX (15)
+
+#define AFBC_HEADER_STRIDE_BLOCK (16)
+
+#define AFBC_PAYLOAD_STRIDE_BLOCK (1024)
+
+#define AFBC_SUPER_GRAPH_HEADER_ADDR_ALIGN (128)
+#define AFBC_HEADER_ADDR_ALIGN (64)
+#define AFBC_HEADER_STRIDE_ALIGN (64)
+
+#define AFBC_PAYLOAD_ADDR_ALIGN_32 (1024)
+#define AFBC_PAYLOAD_STRIDE_ALIGN_32 (1024)
+#define AFBC_PAYLOAD_ADDR_ALIGN_16 (512)
+#define AFBC_PAYLOAD_STRIDE_ALIGN_16 (512)
+
+#define AFBC_BLOCK_ALIGN (16)
+
+#define AFBCE_IN_WIDTH_MAX (512)
+#define WROT_IN_WIDTH_MAX (512)
+
+#define MMBUF_BASE (0x40)
+#define MMBUF_LINE_NUM (8)
+#define MMBUF_ADDR_ALIGN (64)
+
+enum DSS_AFBC_HALF_BLOCK_MODE {
+ AFBC_HALF_BLOCK_UPPER_LOWER_ALL = 0,
+ AFBC_HALF_BLOCK_LOWER_UPPER_ALL,
+ AFBC_HALF_BLOCK_UPPER_ONLY,
+ AFBC_HALF_BLOCK_LOWER_ONLY,
+};
+
+typedef struct dss_rdma {
+ u32 oft_x0;
+ u32 oft_y0;
+ u32 oft_x1;
+ u32 oft_y1;
+ u32 mask0;
+ u32 mask1;
+ u32 stretch_size_vrt;
+ u32 ctrl;
+ u32 tile_scram;
+
+ u32 data_addr0;
+ u32 stride0;
+ u32 stretch_stride0;
+ u32 data_num0;
+
+ u32 data_addr1;
+ u32 stride1;
+ u32 stretch_stride1;
+ u32 data_num1;
+
+ u32 data_addr2;
+ u32 stride2;
+ u32 stretch_stride2;
+ u32 data_num2;
+
+ u32 ch_rd_shadow;
+ u32 ch_ctl;
+
+ u32 dma_buf_ctrl;
+
+ u32 vpp_ctrl;
+ u32 vpp_mem_ctrl;
+
+ u32 afbcd_hreg_hdr_ptr_lo;
+ u32 afbcd_hreg_pic_width;
+ u32 afbcd_hreg_pic_height;
+ u32 afbcd_hreg_format;
+ u32 afbcd_ctl;
+ u32 afbcd_str;
+ u32 afbcd_line_crop;
+ u32 afbcd_input_header_stride;
+ u32 afbcd_payload_stride;
+ u32 afbcd_mm_base_0;
+
+ u32 afbcd_afbcd_payload_pointer;
+ u32 afbcd_height_bf_str;
+ u32 afbcd_os_cfg;
+ u32 afbcd_mem_ctrl;
+ u32 afbcd_scramble_mode;
+ u32 afbcd_header_pointer_offset;
+
+ u8 vpp_used;
+ u8 afbc_used;
+} dss_rdma_t;
+
+typedef struct dss_wdma {
+ u32 oft_x0;
+ u32 oft_y0;
+ u32 oft_x1;
+ u32 oft_y1;
+
+ u32 mask0;
+ u32 mask1;
+ u32 stretch_size_vrt;
+ u32 ctrl;
+ u32 tile_scram;
+
+ u32 sw_mask_en;
+ u32 start_mask0;
+ u32 end_mask0;
+ u32 start_mask1;
+ u32 end_mask1;
+
+ u32 data_addr;
+ u32 stride0;
+ u32 data1_addr;
+ u32 stride1;
+
+ u32 stretch_stride;
+ u32 data_num;
+
+ u32 ch_rd_shadow;
+ u32 ch_ctl;
+ u32 ch_secu_en;
+ u32 ch_sw_end_req;
+
+ u32 dma_buf_ctrl;
+ u32 dma_buf_size;
+
+ u32 rot_size;
+
+ u32 afbce_hreg_pic_blks;
+ u32 afbce_hreg_format;
+ u32 afbce_hreg_hdr_ptr_lo;
+ u32 afbce_hreg_pld_ptr_lo;
+ u32 afbce_picture_size;
+ u32 afbce_ctl;
+ u32 afbce_header_srtide;
+ u32 afbce_payload_stride;
+ u32 afbce_enc_os_cfg;
+ u32 afbce_mem_ctrl;
+ u32 afbce_qos_cfg;
+ u32 afbce_threshold;
+ u32 afbce_scramble_mode;
+ u32 afbce_header_pointer_offset;
+
+ u8 afbc_used;
+ u8 rot_used;
+} dss_wdma_t;
+
+/*******************************************************************************
+ ** MCTL MUTEX0 1 2 3 4 5
+ */
+#define MCTL_CTL_EN (0x0000)
+#define MCTL_CTL_MUTEX (0x0004)
+#define MCTL_CTL_MUTEX_STATUS (0x0008)
+#define MCTL_CTL_MUTEX_ITF (0x000C)
+#define MCTL_CTL_MUTEX_DBUF (0x0010)
+#define MCTL_CTL_MUTEX_SCF (0x0014)
+#define MCTL_CTL_MUTEX_OV (0x0018)
+#define MCTL_CTL_MUTEX_WCH0 (0x0020)
+#define MCTL_CTL_MUTEX_WCH1 (0x0024)
+#define MCTL_CTL_MUTEX_WCH2 (0x0028)
+#define MCTL_CTL_MUTEX_RCH8 (0x002C)
+#define MCTL_CTL_MUTEX_RCH0 (0x0030)
+#define MCTL_CTL_MUTEX_RCH1 (0x0034)
+#define MCTL_CTL_MUTEX_RCH2 (0x0038)
+#define MCTL_CTL_MUTEX_RCH3 (0x003C)
+#define MCTL_CTL_MUTEX_RCH4 (0x0040)
+#define MCTL_CTL_MUTEX_RCH5 (0x0044)
+#define MCTL_CTL_MUTEX_RCH6 (0x0048)
+#define MCTL_CTL_MUTEX_RCH7 (0x004C)
+#define MCTL_CTL_TOP (0x0050)
+#define MCTL_CTL_FLUSH_STATUS (0x0054)
+#define MCTL_CTL_CLEAR (0x0058)
+#define MCTL_CTL_CACK_TOUT (0x0060)
+#define MCTL_CTL_MUTEX_TOUT (0x0064)
+#define MCTL_CTL_STATUS (0x0068)
+#define MCTL_CTL_INTEN (0x006C)
+#define MCTL_CTL_SW_ST (0x0070)
+#define MCTL_CTL_ST_SEL (0x0074)
+#define MCTL_CTL_END_SEL (0x0078)
+#define MCTL_CTL_CLK_SEL (0x0080)
+#define MCTL_CTL_CLK_EN (0x0084)
+#define MCTL_CTL_DBG (0x00E0)
+
+/*******************************************************************************
+ ** MCTL SYS
+ */
+#define MCTL_CTL_SECU_CFG (0x0000)
+#define MCTL_PAY_SECU_FLUSH_EN (0x0018)
+#define MCTL_CTL_SECU_GATE0 (0x0080)
+#define MCTL_CTL_SECU_GATE1 (0x0084)
+#define MCTL_CTL_SECU_GATE2 (0x0088)
+#define MCTL_DSI0_SECU_CFG_EN (0x00A0)
+#define MCTL_DSI1_SECU_CFG_EN (0x00A4)
+
+#define MCTL_RCH0_FLUSH_EN (0x0100)
+#define MCTL_RCH1_FLUSH_EN (0x0104)
+#define MCTL_RCH2_FLUSH_EN (0x0108)
+#define MCTL_RCH3_FLUSH_EN (0x010C)
+#define MCTL_RCH4_FLUSH_EN (0x0110)
+#define MCTL_RCH5_FLUSH_EN (0x0114)
+#define MCTL_RCH6_FLUSH_EN (0x0118)
+#define MCTL_RCH7_FLUSH_EN (0x011C)
+#define MCTL_WCH0_FLUSH_EN (0x0120)
+#define MCTL_WCH1_FLUSH_EN (0x0124)
+#define MCTL_OV0_FLUSH_EN (0x0128)
+#define MCTL_OV1_FLUSH_EN (0x012C)
+#define MCTL_OV2_FLUSH_EN (0x0130)
+#define MCTL_OV3_FLUSH_EN (0x0134)
+#define MCTL_RCH8_FLUSH_EN (0x0138)
+#define MCTL_WCH2_FLUSH_EN (0x013C)
+
+#define MCTL_RCH0_OV_OEN (0x0160)
+#define MCTL_RCH1_OV_OEN (0x0164)
+#define MCTL_RCH2_OV_OEN (0x0168)
+#define MCTL_RCH3_OV_OEN (0x016C)
+#define MCTL_RCH4_OV_OEN (0x0170)
+#define MCTL_RCH5_OV_OEN (0x0174)
+#define MCTL_RCH6_OV_OEN (0x0178)
+#define MCTL_RCH7_OV_OEN (0x017C)
+
+#define MCTL_RCH_OV0_SEL (0x0180)
+#define MCTL_RCH_OV1_SEL (0x0184)
+#define MCTL_RCH_OV2_SEL (0x0188)
+#define MCTL_RCH_OV3_SEL (0x018C)
+
+#define MCTL_WCH0_OV_IEN (0x01A0)
+#define MCTL_WCH1_OV_IEN (0x01A4)
+
+#define MCTL_WCH_OV2_SEL (0x01A8)
+#define MCTL_WCH_OV3_SEL (0x01AC)
+
+#define MCTL_WB_ENC_SEL (0x01B0)
+#define MCTL_DSI_MUX_SEL (0x01B4)
+
+#define MCTL_RCH0_STARTY (0x01C0)
+#define MCTL_RCH1_STARTY (0x01C4)
+#define MCTL_RCH2_STARTY (0x01C8)
+#define MCTL_RCH3_STARTY (0x01CC)
+#define MCTL_RCH4_STARTY (0x01D0)
+#define MCTL_RCH5_STARTY (0x01D4)
+#define MCTL_RCH6_STARTY (0x01D8)
+#define MCTL_RCH7_STARTY (0x01DC)
+
+#define MCTL_MCTL_CLK_SEL (0x01F0)
+#define MCTL_MCTL_CLK_EN (0x01F4)
+#define MCTL_MOD_CLK_SEL (0x01F8)
+#define MCTL_MOD_CLK_EN (0x01FC)
+
+#define MCTL_MOD0_DBG (0x0200)
+#define MCTL_MOD1_DBG (0x0204)
+#define MCTL_MOD2_DBG (0x0208)
+#define MCTL_MOD3_DBG (0x020C)
+#define MCTL_MOD4_DBG (0x0210)
+#define MCTL_MOD5_DBG (0x0214)
+#define MCTL_MOD6_DBG (0x0218)
+#define MCTL_MOD7_DBG (0x021C)
+#define MCTL_MOD8_DBG (0x0220)
+#define MCTL_MOD9_DBG (0x0224)
+#define MCTL_MOD10_DBG (0x0228)
+#define MCTL_MOD11_DBG (0x022C)
+#define MCTL_MOD12_DBG (0x0230)
+#define MCTL_MOD13_DBG (0x0234)
+#define MCTL_MOD14_DBG (0x0238)
+#define MCTL_MOD15_DBG (0x023C)
+#define MCTL_MOD16_DBG (0x0240)
+#define MCTL_MOD17_DBG (0x0244)
+#define MCTL_MOD18_DBG (0x0248)
+#define MCTL_MOD19_DBG (0x024C)
+#define MCTL_MOD20_DBG (0x0250)
+#define MCTL_MOD0_STATUS (0x0280)
+#define MCTL_MOD1_STATUS (0x0284)
+#define MCTL_MOD2_STATUS (0x0288)
+#define MCTL_MOD3_STATUS (0x028C)
+#define MCTL_MOD4_STATUS (0x0290)
+#define MCTL_MOD5_STATUS (0x0294)
+#define MCTL_MOD6_STATUS (0x0298)
+#define MCTL_MOD7_STATUS (0x029C)
+#define MCTL_MOD8_STATUS (0x02A0)
+#define MCTL_MOD9_STATUS (0x02A4)
+#define MCTL_MOD10_STATUS (0x02A8)
+#define MCTL_MOD11_STATUS (0x02AC)
+#define MCTL_MOD12_STATUS (0x02B0)
+#define MCTL_MOD13_STATUS (0x02B4)
+#define MCTL_MOD14_STATUS (0x02B8)
+#define MCTL_MOD15_STATUS (0x02BC)
+#define MCTL_MOD16_STATUS (0x02C0)
+#define MCTL_MOD17_STATUS (0x02C4)
+#define MCTL_MOD18_STATUS (0x02C8)
+#define MCTL_MOD19_STATUS (0x02CC)
+#define MCTL_MOD20_STATUS (0x02D0)
+#define MCTL_SW_DBG (0x0300)
+#define MCTL_SW0_STATUS0 (0x0304)
+#define MCTL_SW0_STATUS1 (0x0308)
+#define MCTL_SW0_STATUS2 (0x030C)
+#define MCTL_SW0_STATUS3 (0x0310)
+#define MCTL_SW0_STATUS4 (0x0314)
+#define MCTL_SW0_STATUS5 (0x0318)
+#define MCTL_SW0_STATUS6 (0x031C)
+#define MCTL_SW0_STATUS7 (0x0320)
+#define MCTL_SW1_STATUS (0x0324)
+
+#define MCTL_MOD_DBG_CH_NUM (10)
+#define MCTL_MOD_DBG_OV_NUM (4)
+#define MCTL_MOD_DBG_DBUF_NUM (2)
+#define MCTL_MOD_DBG_SCF_NUM (1)
+#define MCTL_MOD_DBG_ITF_NUM (2)
+#define MCTL_MOD_DBG_ADD_CH_NUM (2)
+
+enum dss_mctl_idx {
+ DSS_MCTL0 = 0,
+ DSS_MCTL1,
+ DSS_MCTL2,
+ DSS_MCTL3,
+ DSS_MCTL4,
+ DSS_MCTL5,
+ DSS_MCTL_IDX_MAX,
+};
+
+typedef struct dss_mctl {
+ u32 ctl_mutex_itf;
+ u32 ctl_mutex_dbuf;
+ u32 ctl_mutex_scf;
+ u32 ctl_mutex_ov;
+} dss_mctl_t;
+
+typedef struct dss_mctl_ch_base {
+ char __iomem *chn_mutex_base;
+ char __iomem *chn_flush_en_base;
+ char __iomem *chn_ov_en_base;
+ char __iomem *chn_starty_base;
+ char __iomem *chn_mod_dbg_base;
+} dss_mctl_ch_base_t;
+
+typedef struct dss_mctl_ch {
+ u32 chn_mutex;
+ u32 chn_flush_en;
+ u32 chn_ov_oen;
+ u32 chn_starty;
+ u32 chn_mod_dbg;
+} dss_mctl_ch_t;
+
+typedef struct dss_mctl_sys {
+ u32 ov_flush_en[DSS_OVL_IDX_MAX];
+ u32 chn_ov_sel[DSS_OVL_IDX_MAX];
+ u32 wchn_ov_sel[DSS_WCH_MAX];
+ u8 ov_flush_en_used[DSS_OVL_IDX_MAX];
+ u8 chn_ov_sel_used[DSS_OVL_IDX_MAX];
+ u8 wch_ov_sel_used[DSS_WCH_MAX];
+} dss_mctl_sys_t;
+
+/*******************************************************************************
+ ** OVL
+ */
+#define OVL_SIZE (0x0000)
+#define OVL_BG_COLOR (0x4)
+#define OVL_DST_STARTPOS (0x8)
+#define OVL_DST_ENDPOS (0xC)
+#define OVL_GCFG (0x10)
+#define OVL_LAYER0_POS (0x14)
+#define OVL_LAYER0_SIZE (0x18)
+#define OVL_LAYER0_SRCLOKEY (0x1C)
+#define OVL_LAYER0_SRCHIKEY (0x20)
+#define OVL_LAYER0_DSTLOKEY (0x24)
+#define OVL_LAYER0_DSTHIKEY (0x28)
+#define OVL_LAYER0_PATTERN (0x2C)
+#define OVL_LAYER0_ALPHA (0x30)
+#define OVL_LAYER0_CFG (0x34)
+#define OVL_LAYER0_INFO_ALPHA (0x40)
+#define OVL_LAYER0_INFO_SRCCOLOR (0x44)
+#define OVL_LAYER1_POS (0x50)
+#define OVL_LAYER1_SIZE (0x54)
+#define OVL_LAYER1_SRCLOKEY (0x58)
+#define OVL_LAYER1_SRCHIKEY (0x5C)
+#define OVL_LAYER1_DSTLOKEY (0x60)
+#define OVL_LAYER1_DSTHIKEY (0x64)
+#define OVL_LAYER1_PATTERN (0x68)
+#define OVL_LAYER1_ALPHA (0x6C)
+#define OVL_LAYER1_CFG (0x70)
+#define OVL_LAYER1_INFO_ALPHA (0x7C)
+#define OVL_LAYER1_INFO_SRCCOLOR (0x80)
+#define OVL_LAYER2_POS (0x8C)
+#define OVL_LAYER2_SIZE (0x90)
+#define OVL_LAYER2_SRCLOKEY (0x94)
+#define OVL_LAYER2_SRCHIKEY (0x98)
+#define OVL_LAYER2_DSTLOKEY (0x9C)
+#define OVL_LAYER2_DSTHIKEY (0xA0)
+#define OVL_LAYER2_PATTERN (0xA4)
+#define OVL_LAYER2_ALPHA (0xA8)
+#define OVL_LAYER2_CFG (0xAC)
+#define OVL_LAYER2_INFO_ALPHA (0xB8)
+#define OVL_LAYER2_INFO_SRCCOLOR (0xBC)
+#define OVL_LAYER3_POS (0xC8)
+#define OVL_LAYER3_SIZE (0xCC)
+#define OVL_LAYER3_SRCLOKEY (0xD0)
+#define OVL_LAYER3_SRCHIKEY (0xD4)
+#define OVL_LAYER3_DSTLOKEY (0xD8)
+#define OVL_LAYER3_DSTHIKEY (0xDC)
+#define OVL_LAYER3_PATTERN (0xE0)
+#define OVL_LAYER3_ALPHA (0xE4)
+#define OVL_LAYER3_CFG (0xE8)
+#define OVL_LAYER3_INFO_ALPHA (0xF4)
+#define OVL_LAYER3_INFO_SRCCOLOR (0xF8)
+#define OVL_LAYER4_POS (0x104)
+#define OVL_LAYER4_SIZE (0x108)
+#define OVL_LAYER4_SRCLOKEY (0x10C)
+#define OVL_LAYER4_SRCHIKEY (0x110)
+#define OVL_LAYER4_DSTLOKEY (0x114)
+#define OVL_LAYER4_DSTHIKEY (0x118)
+#define OVL_LAYER4_PATTERN (0x11C)
+#define OVL_LAYER4_ALPHA (0x120)
+#define OVL_LAYER4_CFG (0x124)
+#define OVL_LAYER4_INFO_ALPHA (0x130)
+#define OVL_LAYER4_INFO_SRCCOLOR (0x134)
+#define OVL_LAYER5_POS (0x140)
+#define OVL_LAYER5_SIZE (0x144)
+#define OVL_LAYER5_SRCLOKEY (0x148)
+#define OVL_LAYER5_SRCHIKEY (0x14C)
+#define OVL_LAYER5_DSTLOKEY (0x150)
+#define OVL_LAYER5_DSTHIKEY (0x154)
+#define OVL_LAYER5_PATTERN (0x158)
+#define OVL_LAYER5_ALPHA (0x15C)
+#define OVL_LAYER5_CFG (0x160)
+#define OVL_LAYER5_INFO_ALPHA (0x16C)
+#define OVL_LAYER5_INFO_SRCCOLOR (0x170)
+#define OVL_LAYER6_POS (0x14)
+#define OVL_LAYER6_SIZE (0x18)
+#define OVL_LAYER6_SRCLOKEY (0x1C)
+#define OVL_LAYER6_SRCHIKEY (0x20)
+#define OVL_LAYER6_DSTLOKEY (0x24)
+#define OVL_LAYER6_DSTHIKEY (0x28)
+#define OVL_LAYER6_PATTERN (0x2C)
+#define OVL_LAYER6_ALPHA (0x30)
+#define OVL_LAYER6_CFG (0x34)
+#define OVL_LAYER6_INFO_ALPHA (0x40)
+#define OVL_LAYER6_INFO_SRCCOLOR (0x44)
+#define OVL_LAYER7_POS (0x50)
+#define OVL_LAYER7_SIZE (0x54)
+#define OVL_LAYER7_SRCLOKEY (0x58)
+#define OVL_LAYER7_SRCHIKEY (0x5C)
+#define OVL_LAYER7_DSTLOKEY (0x60)
+#define OVL_LAYER7_DSTHIKEY (0x64)
+#define OVL_LAYER7_PATTERN (0x68)
+#define OVL_LAYER7_ALPHA (0x6C)
+#define OVL_LAYER7_CFG (0x70)
+#define OVL_LAYER7_INFO_ALPHA (0x7C)
+#define OVL_LAYER7_INFO_SRCCOLOR (0x80)
+#define OVL_LAYER0_ST_INFO (0x48)
+#define OVL_LAYER1_ST_INFO (0x84)
+#define OVL_LAYER2_ST_INFO (0xC0)
+#define OVL_LAYER3_ST_INFO (0xFC)
+#define OVL_LAYER4_ST_INFO (0x138)
+#define OVL_LAYER5_ST_INFO (0x174)
+#define OVL_LAYER6_ST_INFO (0x48)
+#define OVL_LAYER7_ST_INFO (0x84)
+#define OVL_LAYER0_IST_INFO (0x4C)
+#define OVL_LAYER1_IST_INFO (0x88)
+#define OVL_LAYER2_IST_INFO (0xC4)
+#define OVL_LAYER3_IST_INFO (0x100)
+#define OVL_LAYER4_IST_INFO (0x13C)
+#define OVL_LAYER5_IST_INFO (0x178)
+#define OVL_LAYER6_IST_INFO (0x4C)
+#define OVL_LAYER7_IST_INFO (0x88)
+#define OVL_LAYER0_PSPOS (0x38)
+#define OVL_LAYER0_PEPOS (0x3C)
+#define OVL_LAYER1_PSPOS (0x74)
+#define OVL_LAYER1_PEPOS (0x78)
+#define OVL_LAYER2_PSPOS (0xB0)
+#define OVL_LAYER2_PEPOS (0xB4)
+#define OVL_LAYER3_PSPOS (0xEC)
+#define OVL_LAYER3_PEPOS (0xF0)
+#define OVL_LAYER4_PSPOS (0x128)
+#define OVL_LAYER4_PEPOS (0x12C)
+#define OVL_LAYER5_PSPOS (0x164)
+#define OVL_LAYER5_PEPOS (0x168)
+#define OVL_LAYER6_PSPOS (0x38)
+#define OVL_LAYER6_PEPOS (0x3C)
+#define OVL_LAYER7_PSPOS (0x74)
+#define OVL_LAYER7_PEPOS (0x78)
+
+#define OVL6_BASE_ST_INFO (0x17C)
+#define OVL6_BASE_IST_INFO (0x180)
+#define OVL6_GATE_CTRL (0x184)
+#define OVL6_RD_SHADOW_SEL (0x188)
+#define OVL6_OV_CLK_SEL (0x18C)
+#define OVL6_OV_CLK_EN (0x190)
+#define OVL6_BLOCK_SIZE (0x1A0)
+#define OVL6_BLOCK_DBG (0x1A4)
+#define OVL6_REG_DEFAULT (0x1A8)
+
+#define OVL2_BASE_ST_INFO (0x8C)
+#define OVL2_BASE_IST_INFO (0x90)
+#define OVL2_GATE_CTRL (0x94)
+#define OVL2_OV_RD_SHADOW_SEL (0x98)
+#define OVL2_OV_CLK_SEL (0x9C)
+#define OVL2_OV_CLK_EN (0xA0)
+#define OVL2_BLOCK_SIZE (0xB0)
+#define OVL2_BLOCK_DBG (0xB4)
+#define OVL2_REG_DEFAULT (0xB8)
+
+/* LAYER0_CFG */
+#define BIT_OVL_LAYER_SRC_CFG BIT(8)
+#define BIT_OVL_LAYER_ENABLE BIT(0)
+
+/* LAYER0_INFO_ALPHA */
+#define BIT_OVL_LAYER_SRCALPHA_FLAG BIT(3)
+#define BIT_OVL_LAYER_DSTALPHA_FLAG BIT(2)
+
+/* LAYER0_INFO_SRCCOLOR */
+#define BIT_OVL_LAYER_SRCCOLOR_FLAG BIT(0)
+
+#define OVL_6LAYER_NUM (6)
+#define OVL_2LAYER_NUM (2)
+
+typedef struct dss_ovl_layer {
+ u32 layer_pos;
+ u32 layer_size;
+ u32 layer_pattern;
+ u32 layer_alpha;
+ u32 layer_cfg;
+
+} dss_ovl_layer_t;
+
+typedef struct dss_ovl_layer_pos {
+ u32 layer_pspos;
+ u32 layer_pepos;
+
+} dss_ovl_layer_pos_t;
+
+typedef struct dss_ovl {
+ u32 ovl_size;
+ u32 ovl_bg_color;
+ u32 ovl_dst_startpos;
+ u32 ovl_dst_endpos;
+ u32 ovl_gcfg;
+ u32 ovl_block_size;
+ dss_ovl_layer_t ovl_layer[OVL_6LAYER_NUM];
+ dss_ovl_layer_pos_t ovl_layer_pos[OVL_6LAYER_NUM];
+ u8 ovl_layer_used[OVL_6LAYER_NUM];
+} dss_ovl_t;
+
+typedef struct dss_ovl_alpha {
+ u32 src_amode;
+ u32 src_gmode;
+ u32 alpha_offsrc;
+ u32 src_lmode;
+ u32 src_pmode;
+
+ u32 alpha_smode;
+
+ u32 dst_amode;
+ u32 dst_gmode;
+ u32 alpha_offdst;
+ u32 dst_pmode;
+
+ u32 fix_mode;
+} dss_ovl_alpha_t;
+
+/*******************************************************************************
+ ** DBUF
+ */
+#define DBUF_FRM_SIZE (0x0000)
+#define DBUF_FRM_HSIZE (0x0004)
+#define DBUF_SRAM_VALID_NUM (0x0008)
+#define DBUF_WBE_EN (0x000C)
+#define DBUF_THD_FILL_LEV0 (0x0010)
+#define DBUF_DFS_FILL_LEV1 (0x0014)
+#define DBUF_THD_RQOS (0x0018)
+#define DBUF_THD_WQOS (0x001C)
+#define DBUF_THD_CG (0x0020)
+#define DBUF_THD_OTHER (0x0024)
+#define DBUF_FILL_LEV0_CNT (0x0028)
+#define DBUF_FILL_LEV1_CNT (0x002C)
+#define DBUF_FILL_LEV2_CNT (0x0030)
+#define DBUF_FILL_LEV3_CNT (0x0034)
+#define DBUF_FILL_LEV4_CNT (0x0038)
+#define DBUF_ONLINE_FILL_LEVEL (0x003C)
+#define DBUF_WB_FILL_LEVEL (0x0040)
+#define DBUF_DFS_STATUS (0x0044)
+#define DBUF_THD_FLUX_REQ_BEF (0x0048)
+#define DBUF_DFS_LP_CTRL (0x004C)
+#define DBUF_RD_SHADOW_SEL (0x0050)
+#define DBUF_MEM_CTRL (0x0054)
+#define DBUF_PM_CTRL (0x0058)
+#define DBUF_CLK_SEL (0x005C)
+#define DBUF_CLK_EN (0x0060)
+#define DBUF_THD_FLUX_REQ_AFT (0x0064)
+#define DBUF_THD_DFS_OK (0x0068)
+#define DBUF_FLUX_REQ_CTRL (0x006C)
+#define DBUF_REG_DEFAULT (0x00A4)
+
+/*******************************************************************************
+ ** DPP
+ */
+#define DPP_RD_SHADOW_SEL (0x000)
+#define DPP_DEFAULT (0x004)
+#define DPP_ID (0x008)
+#define DPP_IMG_SIZE_BEF_SR (0x00C)
+#define DPP_IMG_SIZE_AFT_SR (0x010)
+#define DPP_SBL (0x014)
+#define DPP_SBL_MEM_CTRL (0x018)
+#define DPP_ARSR1P_MEM_CTRL (0x01C)
+#define DPP_CLK_SEL (0x020)
+#define DPP_CLK_EN (0x024)
+#define DPP_DBG1_CNT (0x028)
+#define DPP_DBG2_CNT (0x02C)
+#define DPP_DBG1 (0x030)
+#define DPP_DBG2 (0x034)
+#define DPP_DBG3 (0x038)
+#define DPP_DBG4 (0x03C)
+#define DPP_INTS (0x040)
+#define DPP_INT_MSK (0x044)
+#define DPP_ARSR1P (0x048)
+#define DPP_DBG_CNT DPP_DBG1_CNT
+
+#define DPP_CLRBAR_CTRL (0x100)
+#define DPP_CLRBAR_1ST_CLR (0x104)
+#define DPP_CLRBAR_2ND_CLR (0x108)
+#define DPP_CLRBAR_3RD_CLR (0x10C)
+
+#define DPP_CLIP_TOP (0x180)
+#define DPP_CLIP_BOTTOM (0x184)
+#define DPP_CLIP_LEFT (0x188)
+#define DPP_CLIP_RIGHT (0x18C)
+#define DPP_CLIP_EN (0x190)
+#define DPP_CLIP_DBG (0x194)
+
+#define DITHER_PARA (0x000)
+#define DITHER_CTL (0x004)
+#define DITHER_MATRIX_PART1 (0x008)
+#define DITHER_MATRIX_PART0 (0x00C)
+#define DITHER_ERRDIFF_WEIGHT (0x010)
+#define DITHER_FRC_01_PART1 (0x014)
+#define DITHER_FRC_01_PART0 (0x018)
+#define DITHER_FRC_10_PART1 (0x01C)
+#define DITHER_FRC_10_PART0 (0x020)
+#define DITHER_FRC_11_PART1 (0x024)
+#define DITHER_FRC_11_PART0 (0x028)
+#define DITHER_MEM_CTRL (0x02C)
+#define DITHER_DBG0 (0x030)
+#define DITHER_DBG1 (0x034)
+#define DITHER_DBG2 (0x038)
+
+#define CSC10B_IDC0 (0x000)
+#define CSC10B_IDC1 (0x004)
+#define CSC10B_IDC2 (0x008)
+#define CSC10B_ODC0 (0x00C)
+#define CSC10B_ODC1 (0x010)
+#define CSC10B_ODC2 (0x014)
+#define CSC10B_P00 (0x018)
+#define CSC10B_P01 (0x01C)
+#define CSC10B_P02 (0x020)
+#define CSC10B_P10 (0x024)
+#define CSC10B_P11 (0x028)
+#define CSC10B_P12 (0x02C)
+#define CSC10B_P20 (0x030)
+#define CSC10B_P21 (0x034)
+#define CSC10B_P22 (0x038)
+#define CSC10B_MODULE_EN (0x03C)
+#define CSC10B_MPREC (0x040)
+
+#define GAMA_EN (0x000)
+#define GAMA_MEM_CTRL (0x004)
+
+#define ACM_EN (0x000)
+#define ACM_SATA_OFFSET (0x004)
+#define ACM_HUESEL (0x008)
+#define ACM_CSC_IDC0 (0x00C)
+#define ACM_CSC_IDC1 (0x010)
+#define ACM_CSC_IDC2 (0x014)
+#define ACM_CSC_P00 (0x018)
+#define ACM_CSC_P01 (0x01C)
+#define ACM_CSC_P02 (0x020)
+#define ACM_CSC_P10 (0x024)
+#define ACM_CSC_P11 (0x028)
+#define ACM_CSC_P12 (0x02C)
+#define ACM_CSC_P20 (0x030)
+#define ACM_CSC_P21 (0x034)
+#define ACM_CSC_P22 (0x038)
+#define ACM_CSC_MRREC (0x03C)
+#define ACM_R0_H (0x040)
+#define ACM_R1_H (0x044)
+#define ACM_R2_H (0x048)
+#define ACM_R3_H (0x04C)
+#define ACM_R4_H (0x050)
+#define ACM_R5_H (0x054)
+#define ACM_R6_H (0x058)
+#define ACM_LUT_DIS0 (0x05C)
+#define ACM_LUT_DIS1 (0x060)
+#define ACM_LUT_DIS2 (0x064)
+#define ACM_LUT_DIS3 (0x068)
+#define ACM_LUT_DIS4 (0x06C)
+#define ACM_LUT_DIS5 (0x070)
+#define ACM_LUT_DIS6 (0x074)
+#define ACM_LUT_DIS7 (0x078)
+#define ACM_LUT_PARAM0 (0x07C)
+#define ACM_LUT_PARAM1 (0x080)
+#define ACM_LUT_PARAM2 (0x084)
+#define ACM_LUT_PARAM3 (0x088)
+#define ACM_LUT_PARAM4 (0x08C)
+#define ACM_LUT_PARAM5 (0x090)
+#define ACM_LUT_PARAM6 (0x094)
+#define ACM_LUT_PARAM7 (0x098)
+#define ACM_LUT_SEL (0x09C)
+#define ACM_MEM_CTRL (0x0A0)
+#define ACM_DEBUG_TOP (0x0A4)
+#define ACM_DEBUG_CFG (0x0A8)
+#define ACM_DEBUG_W (0x0AC)
+
+#define ACE_EN (0x000)
+#define ACE_SKIN_CFG (0x004)
+#define ACE_LUT_SEL (0x008)
+#define ACE_HIST_IND (0x00C)
+#define ACE_ACTIVE (0x010)
+#define ACE_DBG (0x014)
+#define ACE_MEM_CTRL (0x018)
+#define ACE_IN_SEL (0x01C)
+#define ACE_R2Y (0x020)
+#define ACE_G2Y (0x024)
+#define ACE_B2Y (0x028)
+#define ACE_Y_OFFSET (0x02C)
+#define ACE_Y_CEN (0x030)
+#define ACE_U_CEN (0x034)
+#define ACE_V_CEN (0x038)
+#define ACE_Y_EXT (0x03C)
+#define ACE_U_EXT (0x040)
+#define ACE_V_EXT (0x044)
+#define ACE_Y_ATTENU (0x048)
+#define ACE_U_ATTENU (0x04C)
+#define ACE_V_ATTENU (0x050)
+#define ACE_ROTA (0x054)
+#define ACE_ROTB (0x058)
+#define ACE_Y_CORE (0x05C)
+#define ACE_U_CORE (0x060)
+#define ACE_V_CORE (0x064)
+
+#define LCP_XCC_COEF_00 (0x000)
+#define LCP_XCC_COEF_01 (0x004)
+#define LCP_XCC_COEF_02 (0x008)
+#define LCP_XCC_COEF_03 (0x00C)
+#define LCP_XCC_COEF_10 (0x010)
+#define LCP_XCC_COEF_11 (0x014)
+#define LCP_XCC_COEF_12 (0x018)
+#define LCP_XCC_COEF_13 (0x01C)
+#define LCP_XCC_COEF_20 (0x020)
+#define LCP_XCC_COEF_21 (0x024)
+#define LCP_XCC_COEF_22 (0x028)
+#define LCP_XCC_COEF_23 (0x02C)
+#define LCP_GMP_BYPASS_EN (0x030)
+#define LCP_XCC_BYPASS_EN (0x034)
+#define LCP_DEGAMA_EN (0x038)
+#define LCP_DEGAMA_MEM_CTRL (0x03C)
+#define LCP_GMP_MEM_CTRL (0x040)
+
+typedef struct dss_arsr1p {
+ u32 ihleft;
+ u32 ihright;
+ u32 ihleft1;
+ u32 ihright1;
+ u32 ivtop;
+ u32 ivbottom;
+ u32 uv_offset;
+ u32 ihinc;
+ u32 ivinc;
+ u32 mode;
+ u32 format;
+
+ u32 skin_thres_y;
+ u32 skin_thres_u;
+ u32 skin_thres_v;
+ u32 skin_expected;
+ u32 skin_cfg;
+ u32 shoot_cfg1;
+ u32 shoot_cfg2;
+ u32 sharp_cfg1;
+ u32 sharp_cfg2;
+ u32 sharp_cfg3;
+ u32 sharp_cfg4;
+ u32 sharp_cfg5;
+ u32 sharp_cfg6;
+ u32 sharp_cfg7;
+ u32 sharp_cfg8;
+ u32 sharp_cfg9;
+ u32 sharp_cfg10;
+ u32 sharp_cfg11;
+ u32 diff_ctrl;
+ u32 lsc_cfg1;
+ u32 lsc_cfg2;
+ u32 lsc_cfg3;
+ u32 force_clk_on_cfg;
+
+ u32 dpp_img_hrz_bef_sr;
+ u32 dpp_img_vrt_bef_sr;
+ u32 dpp_img_hrz_aft_sr;
+ u32 dpp_img_vrt_aft_sr;
+} dss_arsr1p_t;
+
+#define ARSR1P_INC_FACTOR (65536)
+
+#define ARSR1P_IHLEFT (0x000)
+#define ARSR1P_IHRIGHT (0x004)
+#define ARSR1P_IHLEFT1 (0x008)
+#define ARSR1P_IHRIGHT1 (0x00C)
+#define ARSR1P_IVTOP (0x010)
+#define ARSR1P_IVBOTTOM (0x014)
+#define ARSR1P_UV_OFFSET (0x018)
+#define ARSR1P_IHINC (0x01C)
+#define ARSR1P_IVINC (0x020)
+#define ARSR1P_MODE (0x024)
+#define ARSR1P_FORMAT (0x028)
+#define ARSR1P_SKIN_THRES_Y (0x02C)
+#define ARSR1P_SKIN_THRES_U (0x030)
+#define ARSR1P_SKIN_THRES_V (0x034)
+#define ARSR1P_SKIN_EXPECTED (0x038)
+#define ARSR1P_SKIN_CFG (0x03C)
+#define ARSR1P_SHOOT_CFG1 (0x040)
+#define ARSR1P_SHOOT_CFG2 (0x044)
+#define ARSR1P_SHARP_CFG1 (0x048)
+#define ARSR1P_SHARP_CFG2 (0x04C)
+#define ARSR1P_SHARP_CFG3 (0x050)
+#define ARSR1P_SHARP_CFG4 (0x054)
+#define ARSR1P_SHARP_CFG5 (0x058)
+#define ARSR1P_SHARP_CFG6 (0x05C)
+#define ARSR1P_SHARP_CFG7 (0x060)
+#define ARSR1P_SHARP_CFG8 (0x064)
+#define ARSR1P_SHARP_CFG9 (0x068)
+#define ARSR1P_SHARP_CFG10 (0x06C)
+#define ARSR1P_SHARP_CFG11 (0x070)
+#define ARSR1P_DIFF_CTRL (0x074)
+#define ARSR1P_LSC_CFG1 (0x078)
+#define ARSR1P_LSC_CFG2 (0x07C)
+#define ARSR1P_LSC_CFG3 (0x080)
+#define ARSR1P_FORCE_CLK_ON_CFG (0x084)
+
+/*******************************************************************************
+ ** BIT EXT
+ */
+#define BIT_EXT0_CTL (0x000)
+
+#define U_GAMA_R_COEF (0x000)
+#define U_GAMA_G_COEF (0x400)
+#define U_GAMA_B_COEF (0x800)
+#define U_GAMA_R_LAST_COEF (0x200)
+#define U_GAMA_G_LAST_COEF (0x600)
+#define U_GAMA_B_LAST_COEF (0xA00)
+
+#define ACM_U_H_COEF (0x000)
+#define ACM_U_SATA_COEF (0x200)
+#define ACM_U_SATR0_COEF (0x300)
+#define ACM_U_SATR1_COEF (0x340)
+#define ACM_U_SATR2_COEF (0x380)
+#define ACM_U_SATR3_COEF (0x3C0)
+#define ACM_U_SATR4_COEF (0x400)
+#define ACM_U_SATR5_COEF (0x440)
+#define ACM_U_SATR6_COEF (0x480)
+#define ACM_U_SATR7_COEF (0x4C0)
+
+#define LCP_U_GMP_COEF (0x0000)
+#define LCP_U_DEGAMA_R_COEF (0x5000)
+#define LCP_U_DEGAMA_G_COEF (0x5400)
+#define LCP_U_DEGAMA_B_COEF (0x5800)
+#define LCP_U_DEGAMA_R_LAST_COEF (0x5200)
+#define LCP_U_DEGAMA_G_LAST_COEF (0x5600)
+#define LCP_U_DEGAMA_B_LAST_COEF (0x5A00)
+
+#define ACE_HIST0 (0x000)
+#define ACE_HIST1 (0x400)
+#define ACE_LUT0 (0x800)
+#define ACE_LUT1 (0xA00)
+
+#define ARSR1P_LSC_GAIN (0x084)
+#define ARSR1P_COEFF_H_Y0 (0x0F0)
+#define ARSR1P_COEFF_H_Y1 (0x114)
+#define ARSR1P_COEFF_V_Y0 (0x138)
+#define ARSR1P_COEFF_V_Y1 (0x15C)
+#define ARSR1P_COEFF_H_UV0 (0x180)
+#define ARSR1P_COEFF_H_UV1 (0x1A4)
+#define ARSR1P_COEFF_V_UV0 (0x1C8)
+#define ARSR1P_COEFF_V_UV1 (0x1EC)
+
+#define HIACE_INT_STAT (0x0000)
+#define HIACE_INT_UNMASK (0x0004)
+#define HIACE_BYPASS_ACE (0x0008)
+#define HIACE_BYPASS_ACE_STAT (0x000c)
+#define HIACE_UPDATE_LOCAL (0x0010)
+#define HIACE_LOCAL_VALID (0x0014)
+#define HIACE_GAMMA_AB_SHADOW (0x0018)
+#define HIACE_GAMMA_AB_WORK (0x001c)
+#define HIACE_GLOBAL_HIST_AB_SHADOW (0x0020)
+#define HIACE_GLOBAL_HIST_AB_WORK (0x0024)
+#define HIACE_IMAGE_INFO (0x0030)
+#define HIACE_HALF_BLOCK_H_W (0x0034)
+#define HIACE_XYWEIGHT (0x0038)
+#define HIACE_LHIST_SFT (0x003c)
+#define HIACE_HUE (0x0050)
+#define HIACE_SATURATION (0x0054)
+#define HIACE_VALUE (0x0058)
+#define HIACE_SKIN_GAIN (0x005c)
+#define HIACE_UP_LOW_TH (0x0060)
+#define HIACE_UP_CNT (0x0070)
+#define HIACE_LOW_CNT (0x0074)
+#define HIACE_GLOBAL_HIST_LUT_ADDR (0x0080)
+#define HIACE_LHIST_EN (0x0100)
+#define HIACE_LOCAL_HIST_VxHy_2z_2z1 (0x0104)
+#define HIACE_GAMMA_EN (0x0108)
+#define HIACE_GAMMA_VxHy_3z2_3z1_3z_W (0x010c)
+#define HIACE_GAMMA_EN_HV_R (0x0110)
+#define HIACE_GAMMA_VxHy_3z2_3z1_3z_R (0x0114)
+#define HIACE_INIT_GAMMA (0x0120)
+#define HIACE_MANUAL_RELOAD (0x0124)
+#define HIACE_RAMCLK_FUNC (0x0128)
+#define HIACE_CLK_GATE (0x012c)
+#define HIACE_GAMMA_RAM_A_CFG_MEM_CTRL (0x0130)
+#define HIACE_GAMMA_RAM_B_CFG_MEM_CTRL (0x0134)
+#define HIACE_LHIST_RAM_CFG_MEM_CTRL (0x0138)
+#define HIACE_GAMMA_RAM_A_CFG_PM_CTRL (0x0140)
+#define HIACE_GAMMA_RAM_B_CFG_PM_CTRL (0x0144)
+#define HIACE_LHIST_RAM_CFG_PM_CTRL (0x0148)
+
+/*******************************************************************************
+ ** IFBC
+ */
+#define IFBC_SIZE (0x0000)
+#define IFBC_CTRL (0x0004)
+#define IFBC_HIMAX_CTRL0 (0x0008)
+#define IFBC_HIMAX_CTRL1 (0x000C)
+#define IFBC_HIMAX_CTRL2 (0x0010)
+#define IFBC_HIMAX_CTRL3 (0x0014)
+#define IFBC_EN (0x0018)
+#define IFBC_MEM_CTRL (0x001C)
+#define IFBC_INSERT (0x0020)
+#define IFBC_HIMAX_TEST_MODE (0x0024)
+#define IFBC_CORE_GT (0x0028)
+#define IFBC_PM_CTRL (0x002C)
+#define IFBC_RD_SHADOW (0x0030)
+#define IFBC_ORISE_CTL (0x0034)
+#define IFBC_ORSISE_DEBUG0 (0x0038)
+#define IFBC_ORSISE_DEBUG1 (0x003C)
+#define IFBC_RSP_COMP_TEST (0x0040)
+#define IFBC_CLK_SEL (0x044)
+#define IFBC_CLK_EN (0x048)
+#define IFBC_PAD (0x004C)
+#define IFBC_REG_DEFAULT (0x0050)
+
+/*******************************************************************************
+ ** DSC
+ */
+#define DSC_VERSION (0x0000)
+#define DSC_PPS_IDENTIFIER (0x0004)
+#define DSC_EN (0x0008)
+#define DSC_CTRL (0x000C)
+#define DSC_PIC_SIZE (0x0010)
+#define DSC_SLICE_SIZE (0x0014)
+#define DSC_CHUNK_SIZE (0x0018)
+#define DSC_INITIAL_DELAY (0x001C)
+#define DSC_RC_PARAM0 (0x0020)
+#define DSC_RC_PARAM1 (0x0024)
+#define DSC_RC_PARAM2 (0x0028)
+#define DSC_RC_PARAM3 (0x002C)
+#define DSC_FLATNESS_QP_TH (0x0030)
+#define DSC_RC_PARAM4 (0x0034)
+#define DSC_RC_PARAM5 (0x0038)
+#define DSC_RC_BUF_THRESH0 (0x003C)
+#define DSC_RC_BUF_THRESH1 (0x0040)
+#define DSC_RC_BUF_THRESH2 (0x0044)
+#define DSC_RC_BUF_THRESH3 (0x0048)
+#define DSC_RC_RANGE_PARAM0 (0x004C)
+#define DSC_RC_RANGE_PARAM1 (0x0050)
+#define DSC_RC_RANGE_PARAM2 (0x0054)
+#define DSC_RC_RANGE_PARAM3 (0x0058)
+#define DSC_RC_RANGE_PARAM4 (0x005C)
+#define DSC_RC_RANGE_PARAM5 (0x0060)
+#define DSC_RC_RANGE_PARAM6 (0x0064)
+#define DSC_RC_RANGE_PARAM7 (0x0068)
+#define DSC_ADJUSTMENT_BITS (0x006C)
+#define DSC_BITS_PER_GRP (0x0070)
+#define DSC_MULTI_SLICE_CTL (0x0074)
+#define DSC_OUT_CTRL (0x0078)
+#define DSC_CLK_SEL (0x007C)
+#define DSC_CLK_EN (0x0080)
+#define DSC_MEM_CTRL (0x0084)
+#define DSC_ST_DATAIN (0x0088)
+#define DSC_ST_DATAOUT (0x008C)
+#define DSC0_ST_SLC_POS (0x0090)
+#define DSC1_ST_SLC_POS (0x0094)
+#define DSC0_ST_PIC_POS (0x0098)
+#define DSC1_ST_PIC_POS (0x009C)
+#define DSC0_ST_FIFO (0x00A0)
+#define DSC1_ST_FIFO (0x00A4)
+#define DSC0_ST_LINEBUF (0x00A8)
+#define DSC1_ST_LINEBUF (0x00AC)
+#define DSC_ST_ITFC (0x00B0)
+#define DSC_RD_SHADOW_SEL (0x00B4)
+#define DSC_REG_DEFAULT (0x00B8)
+
+/*******************************************************************************
+ ** LDI
+ */
+#define LDI_DPI0_HRZ_CTRL0 (0x0000)
+#define LDI_DPI0_HRZ_CTRL1 (0x0004)
+#define LDI_DPI0_HRZ_CTRL2 (0x0008)
+#define LDI_VRT_CTRL0 (0x000C)
+#define LDI_VRT_CTRL1 (0x0010)
+#define LDI_VRT_CTRL2 (0x0014)
+#define LDI_PLR_CTRL (0x0018)
+#define LDI_SH_MASK_INT (0x001C)
+#define LDI_3D_CTRL (0x0020)
+#define LDI_CTRL (0x0024)
+#define LDI_WORK_MODE (0x0028)
+#define LDI_DE_SPACE_LOW (0x002C)
+#define LDI_DSI_CMD_MOD_CTRL (0x0030)
+#define LDI_DSI_TE_CTRL (0x0034)
+#define LDI_DSI_TE_HS_NUM (0x0038)
+#define LDI_DSI_TE_HS_WD (0x003C)
+#define LDI_DSI_TE_VS_WD (0x0040)
+#define LDI_FRM_MSK (0x0044)
+#define LDI_FRM_MSK_UP (0x0048)
+#define LDI_VINACT_MSK_LEN (0x0050)
+#define LDI_VSTATE (0x0054)
+#define LDI_DPI0_HSTATE (0x0058)
+#define LDI_DPI1_HSTATE (0x005C)
+#define LDI_CMD_EVENT_SEL (0x0060)
+#define LDI_SRAM_LP_CTRL (0x0064)
+#define LDI_ITF_RD_SHADOW (0x006C)
+#define LDI_DPI1_HRZ_CTRL0 (0x00F0)
+#define LDI_DPI1_HRZ_CTRL1 (0x00F4)
+#define LDI_DPI1_HRZ_CTRL2 (0x00F8)
+#define LDI_OVERLAP_SIZE (0x00FC)
+#define LDI_MEM_CTRL (0x0100)
+#define LDI_PM_CTRL (0x0104)
+#define LDI_CLK_SEL (0x0108)
+#define LDI_CLK_EN (0x010C)
+#define LDI_IF_BYPASS (0x0110)
+#define LDI_FRM_VALID_DBG (0x0118)
+/* LDI GLB*/
+#define LDI_PXL0_DIV2_GT_EN (0x0210)
+#define LDI_PXL0_DIV4_GT_EN (0x0214)
+#define LDI_PXL0_GT_EN (0x0218)
+#define LDI_PXL0_DSI_GT_EN (0x021C)
+#define LDI_PXL0_DIVXCFG (0x0220)
+#define LDI_DSI1_CLK_SEL (0x0224)
+#define LDI_VESA_CLK_SEL (0x0228)
+/* DSI1 RST*/
+#define LDI_DSI1_RST_SEL (0x0238)
+/* LDI INTERRUPT*/
+#define LDI_MCU_ITF_INTS (0x0240)
+#define LDI_MCU_ITF_INT_MSK (0x0244)
+#define LDI_CPU_ITF_INTS (0x0248)
+#define LDI_CPU_ITF_INT_MSK (0x024C)
+/* LDI MODULE CLOCK GATING*/
+#define LDI_MODULE_CLK_SEL (0x0258)
+#define LDI_MODULE_CLK_EN (0x025C)
+
+/*******************************************************************************
+ ** MIPI DSI
+ */
+#define MIPIDSI_VERSION_OFFSET (0x0000)
+#define MIPIDSI_PWR_UP_OFFSET (0x0004)
+#define MIPIDSI_CLKMGR_CFG_OFFSET (0x0008)
+#define MIPIDSI_DPI_VCID_OFFSET (0x000c)
+#define MIPIDSI_DPI_COLOR_CODING_OFFSET (0x0010)
+#define MIPIDSI_DPI_CFG_POL_OFFSET (0x0014)
+#define MIPIDSI_DPI_LP_CMD_TIM_OFFSET (0x0018)
+#define MIPIDSI_PCKHDL_CFG_OFFSET (0x002c)
+#define MIPIDSI_GEN_VCID_OFFSET (0x0030)
+#define MIPIDSI_MODE_CFG_OFFSET (0x0034)
+#define MIPIDSI_VID_MODE_CFG_OFFSET (0x0038)
+#define MIPIDSI_VID_PKT_SIZE_OFFSET (0x003c)
+#define MIPIDSI_VID_NUM_CHUNKS_OFFSET (0x0040)
+#define MIPIDSI_VID_NULL_SIZE_OFFSET (0x0044)
+#define MIPIDSI_VID_HSA_TIME_OFFSET (0x0048)
+#define MIPIDSI_VID_HBP_TIME_OFFSET (0x004c)
+#define MIPIDSI_VID_HLINE_TIME_OFFSET (0x0050)
+#define MIPIDSI_VID_VSA_LINES_OFFSET (0x0054)
+#define MIPIDSI_VID_VBP_LINES_OFFSET (0x0058)
+#define MIPIDSI_VID_VFP_LINES_OFFSET (0x005c)
+#define MIPIDSI_VID_VACTIVE_LINES_OFFSET (0x0060)
+#define MIPIDSI_EDPI_CMD_SIZE_OFFSET (0x0064)
+#define MIPIDSI_CMD_MODE_CFG_OFFSET (0x0068)
+#define MIPIDSI_GEN_HDR_OFFSET (0x006c)
+#define MIPIDSI_GEN_PLD_DATA_OFFSET (0x0070)
+#define MIPIDSI_CMD_PKT_STATUS_OFFSET (0x0074)
+#define MIPIDSI_TO_CNT_CFG_OFFSET (0x0078)
+#define MIPIDSI_HS_RD_TO_CNT_OFFSET (0x007C)
+#define MIPIDSI_LP_RD_TO_CNT_OFFSET (0x0080)
+#define MIPIDSI_HS_WR_TO_CNT_OFFSET (0x0084)
+#define MIPIDSI_LP_WR_TO_CNT_OFFSET (0x0088)
+#define MIPIDSI_BTA_TO_CNT_OFFSET (0x008C)
+#define MIPIDSI_SDF_3D_OFFSET (0x0090)
+#define MIPIDSI_LPCLK_CTRL_OFFSET (0x0094)
+#define MIPIDSI_PHY_TMR_LPCLK_CFG_OFFSET (0x0098)
+#define MIPIDSI_PHY_TMR_CFG_OFFSET (0x009c)
+#define MIPIDSI_PHY_RSTZ_OFFSET (0x00a0)
+#define MIPIDSI_PHY_IF_CFG_OFFSET (0x00a4)
+#define MIPIDSI_PHY_ULPS_CTRL_OFFSET (0x00a8)
+#define MIPIDSI_PHY_TX_TRIGGERS_OFFSET (0x00ac)
+#define MIPIDSI_PHY_STATUS_OFFSET (0x00b0)
+#define MIPIDSI_PHY_TST_CTRL0_OFFSET (0x00b4)
+#define MIPIDSI_PHY_TST_CTRL1_OFFSET (0x00b8)
+#define MIPIDSI_INT_ST0_OFFSET (0x00bc)
+#define MIPIDSI_INT_ST1_OFFSET (0x00c0)
+#define MIPIDSI_INT_MSK0_OFFSET (0x00c4)
+#define MIPIDSI_INT_MSK1_OFFSET (0x00c8)
+#define INT_FORCE0 (0x00D8)
+#define INT_FORCE1 (0x00DC)
+#define MIPIDSI_DSC_PARAMETER_OFFSET (0x00f0)
+#define MIPIDSI_PHY_TMR_RD_CFG_OFFSET (0x00f4)
+#define VID_SHADOW_CTRL (0x0100)
+#define DPI_VCID_ACT (0x010C)
+#define DPI_COLOR_CODING_ACT (0x0110)
+#define DPI_LP_CMD_TIM_ACT (0x0118)
+#define VID_MODE_CFG_ACT (0x0138)
+#define VID_PKT_SIZE_ACT (0x013C)
+#define VID_NUM_CHUNKS_ACT (0x0140)
+#define VID_NULL_SIZE_ACT (0x0144)
+#define VID_HSA_TIME_ACT (0x0148)
+#define VID_HBP_TIME_ACT (0x014C)
+#define VID_HLINE_TIME_ACT (0x0150)
+#define VID_VSA_LINES_ACT (0x0154)
+#define VID_VBP_LINES_ACT (0x0158)
+#define VID_VFP_LINES_ACT (0x015C)
+#define VID_VACTIVE_LINES_ACT (0x0160)
+#define SDF_3D_ACT (0x0190)
+
+/*******************************************************************************
+ ** MMBUF
+ */
+#define SMC_LOCK (0x0000)
+#define SMC_MEM_LP (0x0004)
+#define SMC_GCLK_CS (0x000C)
+#define SMC_QOS_BACKDOOR (0x0010)
+#define SMC_DFX_WCMD_CNT_1ST (0x0014)
+#define SMC_DFX_WCMD_CNT_2ND (0x0018)
+#define SMC_DFX_WCMD_CNT_3RD (0x001C)
+#define SMC_DFX_WCMD_CNT_4TH (0x0020)
+#define SMC_DFX_RCMD_CNT_1ST (0x0024)
+#define SMC_DFX_RCMD_CNT_2ND (0x0028)
+#define SMC_DFX_RCMD_CNT_3RD (0x002C)
+#define SMC_DFX_RCMD_CNT_4TH (0x0030)
+#define SMC_CS_IDLE (0x0034)
+#define SMC_DFX_BFIFO_CNT0 (0x0038)
+#define SMC_DFX_RDFIFO_CNT1 (0x003C)
+#define SMC_SP_SRAM_STATE0 (0x0040)
+#define SMC_SP_SRAM_STATE1 (0x0044)
+
+enum hisi_fb_pixel_format {
+ HISI_FB_PIXEL_FORMAT_RGB_565 = 0,
+ HISI_FB_PIXEL_FORMAT_RGBX_4444,
+ HISI_FB_PIXEL_FORMAT_RGBA_4444,
+ HISI_FB_PIXEL_FORMAT_RGBX_5551,
+ HISI_FB_PIXEL_FORMAT_RGBA_5551,
+ HISI_FB_PIXEL_FORMAT_RGBX_8888,
+ HISI_FB_PIXEL_FORMAT_RGBA_8888,
+
+ HISI_FB_PIXEL_FORMAT_BGR_565,
+ HISI_FB_PIXEL_FORMAT_BGRX_4444,
+ HISI_FB_PIXEL_FORMAT_BGRA_4444,
+ HISI_FB_PIXEL_FORMAT_BGRX_5551,
+ HISI_FB_PIXEL_FORMAT_BGRA_5551,
+ HISI_FB_PIXEL_FORMAT_BGRX_8888,
+ HISI_FB_PIXEL_FORMAT_BGRA_8888,
+
+ HISI_FB_PIXEL_FORMAT_YUV_422_I,
+
+ /* YUV Semi-planar */
+ HISI_FB_PIXEL_FORMAT_YCbCr_422_SP, /* NV16 */
+ HISI_FB_PIXEL_FORMAT_YCrCb_422_SP,
+ HISI_FB_PIXEL_FORMAT_YCbCr_420_SP,
+ HISI_FB_PIXEL_FORMAT_YCrCb_420_SP, /* NV21 */
+
+ /* YUV Planar */
+ HISI_FB_PIXEL_FORMAT_YCbCr_422_P,
+ HISI_FB_PIXEL_FORMAT_YCrCb_422_P,
+ HISI_FB_PIXEL_FORMAT_YCbCr_420_P,
+ HISI_FB_PIXEL_FORMAT_YCrCb_420_P, /* HISI_FB_PIXEL_FORMAT_YV12 */
+
+ /* YUV Package */
+ HISI_FB_PIXEL_FORMAT_YUYV_422_Pkg,
+ HISI_FB_PIXEL_FORMAT_UYVY_422_Pkg,
+ HISI_FB_PIXEL_FORMAT_YVYU_422_Pkg,
+ HISI_FB_PIXEL_FORMAT_VYUY_422_Pkg,
+ HISI_FB_PIXEL_FORMAT_MAX,
+
+ HISI_FB_PIXEL_FORMAT_UNSUPPORT = 800
+};
+
+struct dss_hw_ctx {
+ void __iomem *base;
+ struct regmap *noc_regmap;
+ struct reset_control *reset;
+
+ void __iomem *noc_dss_base;
+ void __iomem *peri_crg_base;
+ void __iomem *pmc_base;
+ void __iomem *sctrl_base;
+
+ struct clk *dss_axi_clk;
+ struct clk *dss_pclk_dss_clk;
+ struct clk *dss_pri_clk;
+ struct clk *dss_pxl0_clk;
+ struct clk *dss_pxl1_clk;
+ struct clk *dss_mmbuf_clk;
+ struct clk *dss_pclk_mmbuf_clk;
+
+ bool power_on;
+ int irq;
+
+ wait_queue_head_t vactive0_end_wq;
+ u32 vactive0_end_flag;
+ ktime_t vsync_timestamp;
+ ktime_t vsync_timestamp_prev;
+
+ struct iommu_domain *mmu_domain;
+ struct ion_client *ion_client;
+ struct ion_handle *ion_handle;
+ struct iommu_map_format iommu_format;
+ char __iomem *screen_base;
+ unsigned long smem_start;
+ unsigned long screen_size;
+};
+
+struct dss_crtc {
+ struct drm_crtc base;
+ struct dss_hw_ctx *ctx;
+ bool enable;
+ u32 out_format;
+ u32 bgr_fmt;
+};
+
+struct dss_plane {
+ struct drm_plane base;
+ /*void *ctx;*/
+ void *acrtc;
+ u8 ch; /* channel */
+};
+
+struct dss_data {
+ struct dss_crtc acrtc;
+ struct dss_plane aplane[DSS_CH_NUM];
+ struct dss_hw_ctx ctx;
+};
+
+/* ade-format info: */
+struct dss_format {
+ u32 pixel_format;
+ enum hisi_fb_pixel_format dss_format;
+};
+
+#define MIPI_DPHY_NUM (2)
+
+/* IFBC compress mode */
+enum IFBC_TYPE {
+ IFBC_TYPE_NONE = 0,
+ IFBC_TYPE_ORISE2X,
+ IFBC_TYPE_ORISE3X,
+ IFBC_TYPE_HIMAX2X,
+ IFBC_TYPE_RSP2X,
+ IFBC_TYPE_RSP3X,
+ IFBC_TYPE_VESA2X_SINGLE,
+ IFBC_TYPE_VESA3X_SINGLE,
+ IFBC_TYPE_VESA2X_DUAL,
+ IFBC_TYPE_VESA3X_DUAL,
+ IFBC_TYPE_VESA3_75X_DUAL,
+
+ IFBC_TYPE_MAX
+};
+
+/* IFBC compress mode */
+enum IFBC_COMP_MODE {
+ IFBC_COMP_MODE_0 = 0,
+ IFBC_COMP_MODE_1,
+ IFBC_COMP_MODE_2,
+ IFBC_COMP_MODE_3,
+ IFBC_COMP_MODE_4,
+ IFBC_COMP_MODE_5,
+ IFBC_COMP_MODE_6,
+};
+
+/* xres_div */
+enum XRES_DIV {
+ XRES_DIV_1 = 1,
+ XRES_DIV_2,
+ XRES_DIV_3,
+ XRES_DIV_4,
+ XRES_DIV_5,
+ XRES_DIV_6,
+};
+
+/* yres_div */
+enum YRES_DIV {
+ YRES_DIV_1 = 1,
+ YRES_DIV_2,
+ YRES_DIV_3,
+ YRES_DIV_4,
+ YRES_DIV_5,
+ YRES_DIV_6,
+};
+
+/* pxl0_divxcfg */
+enum PXL0_DIVCFG {
+ PXL0_DIVCFG_0 = 0,
+ PXL0_DIVCFG_1,
+ PXL0_DIVCFG_2,
+ PXL0_DIVCFG_3,
+ PXL0_DIVCFG_4,
+ PXL0_DIVCFG_5,
+ PXL0_DIVCFG_6,
+ PXL0_DIVCFG_7,
+};
+
+/* pxl0_div2_gt_en */
+enum PXL0_DIV2_GT_EN {
+ PXL0_DIV2_GT_EN_CLOSE = 0,
+ PXL0_DIV2_GT_EN_OPEN,
+};
+
+/* pxl0_div4_gt_en */
+enum PXL0_DIV4_GT_EN {
+ PXL0_DIV4_GT_EN_CLOSE = 0,
+ PXL0_DIV4_GT_EN_OPEN,
+};
+
+/* pxl0_dsi_gt_en */
+enum PXL0_DSI_GT_EN {
+ PXL0_DSI_GT_EN_0 = 0,
+ PXL0_DSI_GT_EN_1,
+ PXL0_DSI_GT_EN_2,
+ PXL0_DSI_GT_EN_3,
+};
+
+typedef struct mipi_ifbc_division {
+ u32 xres_div;
+ u32 yres_div;
+ u32 comp_mode;
+ u32 pxl0_div2_gt_en;
+ u32 pxl0_div4_gt_en;
+ u32 pxl0_divxcfg;
+ u32 pxl0_dsi_gt_en;
+} mipi_ifbc_division_t;
+
+/*******************************************************************************
+**
+*/
+#define outp32(addr, val) writel(val, addr)
+#define outp16(addr, val) writew(val, addr)
+#define outp8(addr, val) writeb(val, addr)
+#define outp(addr, val) outp32(addr, val)
+
+#define inp32(addr) readl(addr)
+#define inp16(addr) readw(addr)
+#define inp8(addr) readb(addr)
+#define inp(addr) inp32(addr)
+
+#define inpw(port) readw(port)
+#define outpw(port, val) writew(val, port)
+#define inpdw(port) readl(port)
+#define outpdw(port, val) writel(val, port)
+
+#ifndef ALIGN_DOWN
+#define ALIGN_DOWN(val, al) ((val) & ~((al) - 1))
+#endif
+#ifndef ALIGN_UP
+#define ALIGN_UP(val, al) (((val) + ((al) - 1)) & ~((al) - 1))
+#endif
+
+#define to_dss_crtc(crtc) \
+ container_of(crtc, struct dss_crtc, base)
+
+#define to_dss_plane(plane) \
+ container_of(plane, struct dss_plane, base)
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_dpe_utils.c b/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_dpe_utils.c
new file mode 100644
index 000000000000..2a13bbd772b7
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_dpe_utils.c
@@ -0,0 +1,730 @@
+/* Copyright (c) 2013-2014, Hisilicon Tech. Co., Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <drm/drmP.h>
+
+#include "drm_mipi_dsi.h"
+#include "kirin_drm_dpe_utils.h"
+
+int g_debug_set_reg_val = 0;
+
+extern u32 g_dss_module_ovl_base[DSS_MCTL_IDX_MAX][MODULE_OVL_MAX];
+
+mipi_ifbc_division_t g_mipi_ifbc_division[MIPI_DPHY_NUM][IFBC_TYPE_MAX] = {
+ /*single mipi*/
+ {
+ /*none*/
+ {XRES_DIV_1, YRES_DIV_1, IFBC_COMP_MODE_0, PXL0_DIV2_GT_EN_CLOSE,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_0, PXL0_DSI_GT_EN_1},
+ /*orise2x*/
+ {XRES_DIV_2, YRES_DIV_1, IFBC_COMP_MODE_0, PXL0_DIV2_GT_EN_OPEN,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_1, PXL0_DSI_GT_EN_3},
+ /*orise3x*/
+ {XRES_DIV_3, YRES_DIV_1, IFBC_COMP_MODE_1, PXL0_DIV2_GT_EN_OPEN,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_2, PXL0_DSI_GT_EN_3},
+ /*himax2x*/
+ {XRES_DIV_2, YRES_DIV_1, IFBC_COMP_MODE_2, PXL0_DIV2_GT_EN_OPEN,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_1, PXL0_DSI_GT_EN_3},
+ /*rsp2x*/
+ {XRES_DIV_2, YRES_DIV_1, IFBC_COMP_MODE_3, PXL0_DIV2_GT_EN_CLOSE,
+ PXL0_DIV4_GT_EN_OPEN, PXL0_DIVCFG_1, PXL0_DSI_GT_EN_3},
+ /*rsp3x [NOTE]reality: xres_div = 1.5, yres_div = 2, amended in "mipi_ifbc_get_rect" function*/
+ {XRES_DIV_3, YRES_DIV_1, IFBC_COMP_MODE_4, PXL0_DIV2_GT_EN_CLOSE,
+ PXL0_DIV4_GT_EN_OPEN, PXL0_DIVCFG_2, PXL0_DSI_GT_EN_3},
+ /*vesa2x_1pipe*/
+ {XRES_DIV_2, YRES_DIV_1, IFBC_COMP_MODE_5, PXL0_DIV2_GT_EN_CLOSE,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_1, PXL0_DSI_GT_EN_3},
+ /*vesa3x_1pipe*/
+ {XRES_DIV_3, YRES_DIV_1, IFBC_COMP_MODE_5, PXL0_DIV2_GT_EN_CLOSE,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_2, PXL0_DSI_GT_EN_3},
+ /*vesa2x_2pipe*/
+ {XRES_DIV_2, YRES_DIV_1, IFBC_COMP_MODE_6, PXL0_DIV2_GT_EN_OPEN,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_1, PXL0_DSI_GT_EN_3},
+ /*vesa3x_2pipe*/
+ {XRES_DIV_3, YRES_DIV_1, IFBC_COMP_MODE_6, PXL0_DIV2_GT_EN_OPEN,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_2, PXL0_DSI_GT_EN_3}
+ },
+
+ /*dual mipi*/
+ {
+ /*none*/
+ {XRES_DIV_2, YRES_DIV_1, IFBC_COMP_MODE_0, PXL0_DIV2_GT_EN_CLOSE,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_1, PXL0_DSI_GT_EN_3},
+ /*orise2x*/
+ {XRES_DIV_4, YRES_DIV_1, IFBC_COMP_MODE_0, PXL0_DIV2_GT_EN_OPEN,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_3, PXL0_DSI_GT_EN_3},
+ /*orise3x*/
+ {XRES_DIV_6, YRES_DIV_1, IFBC_COMP_MODE_1, PXL0_DIV2_GT_EN_OPEN,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_5, PXL0_DSI_GT_EN_3},
+ /*himax2x*/
+ {XRES_DIV_4, YRES_DIV_1, IFBC_COMP_MODE_2, PXL0_DIV2_GT_EN_OPEN,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_3, PXL0_DSI_GT_EN_3},
+ /*rsp2x*/
+ {XRES_DIV_4, YRES_DIV_1, IFBC_COMP_MODE_3, PXL0_DIV2_GT_EN_CLOSE,
+ PXL0_DIV4_GT_EN_OPEN, PXL0_DIVCFG_3, PXL0_DSI_GT_EN_3},
+ /*rsp3x*/
+ {XRES_DIV_3, YRES_DIV_2, IFBC_COMP_MODE_4, PXL0_DIV2_GT_EN_CLOSE,
+ PXL0_DIV4_GT_EN_OPEN, PXL0_DIVCFG_5, PXL0_DSI_GT_EN_3},
+ /*vesa2x_1pipe*/
+ {XRES_DIV_4, YRES_DIV_1, IFBC_COMP_MODE_5, PXL0_DIV2_GT_EN_CLOSE,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_3, PXL0_DSI_GT_EN_3},
+ /*vesa3x_1pipe*/
+ {XRES_DIV_6, YRES_DIV_1, IFBC_COMP_MODE_5, PXL0_DIV2_GT_EN_CLOSE,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_5, PXL0_DSI_GT_EN_3},
+ /*vesa2x_2pipe*/
+ {XRES_DIV_4, YRES_DIV_1, IFBC_COMP_MODE_6, PXL0_DIV2_GT_EN_OPEN,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_3, PXL0_DSI_GT_EN_3},
+ /*vesa3x_2pipe*/
+ {XRES_DIV_6, YRES_DIV_1, IFBC_COMP_MODE_6, PXL0_DIV2_GT_EN_OPEN,
+ PXL0_DIV4_GT_EN_CLOSE, PXL0_DIVCFG_5, 3} }
+};
+
+void set_reg(char __iomem *addr, uint32_t val, uint8_t bw, uint8_t bs)
+{
+ u32 mask = (1UL << bw) - 1UL;
+ u32 tmp = 0;
+
+ tmp = inp32(addr);
+ tmp &= ~(mask << bs);
+
+ outp32(addr, tmp | ((val & mask) << bs));
+
+ if (g_debug_set_reg_val) {
+ printk(KERN_INFO "writel: [%p] = 0x%x\n", addr,
+ tmp | ((val & mask) << bs));
+ }
+}
+
+static int mipi_ifbc_get_rect(struct dss_rect *rect)
+{
+ u32 ifbc_type;
+ u32 mipi_idx;
+ u32 xres_div;
+ u32 yres_div;
+
+ ifbc_type = IFBC_TYPE_NONE;
+ mipi_idx = 0;
+
+ xres_div = g_mipi_ifbc_division[mipi_idx][ifbc_type].xres_div;
+ yres_div = g_mipi_ifbc_division[mipi_idx][ifbc_type].yres_div;
+
+ if ((rect->w % xres_div) > 0)
+ DRM_ERROR("xres(%d) is not division_h(%d) pixel aligned!\n", rect->w, xres_div);
+
+ if ((rect->h % yres_div) > 0)
+ DRM_ERROR("yres(%d) is not division_v(%d) pixel aligned!\n", rect->h, yres_div);
+
+ /*
+ ** [NOTE] rsp3x && single_mipi CMD mode amended xres_div = 1.5, yres_div = 2 ,
+ ** VIDEO mode amended xres_div = 3, yres_div = 1
+ */
+ rect->w /= xres_div;
+ rect->h /= yres_div;
+
+ return 0;
+}
+
+static void init_ldi_pxl_div(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+ char __iomem *ldi_base;
+ struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+
+ u32 ifbc_type = 0;
+ u32 mipi_idx = 0;
+ u32 pxl0_div2_gt_en = 0;
+ u32 pxl0_div4_gt_en = 0;
+ u32 pxl0_divxcfg = 0;
+ u32 pxl0_dsi_gt_en = 0;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+
+ mode = &acrtc->base.state->mode;
+ adj_mode = &acrtc->base.state->adjusted_mode;
+
+ ldi_base = ctx->base + DSS_LDI0_OFFSET;
+
+ ifbc_type = IFBC_TYPE_NONE;
+ mipi_idx = 0;
+
+ pxl0_div2_gt_en = g_mipi_ifbc_division[mipi_idx][ifbc_type].pxl0_div2_gt_en;
+ pxl0_div4_gt_en = g_mipi_ifbc_division[mipi_idx][ifbc_type].pxl0_div4_gt_en;
+ pxl0_divxcfg = g_mipi_ifbc_division[mipi_idx][ifbc_type].pxl0_divxcfg;
+ pxl0_dsi_gt_en = g_mipi_ifbc_division[mipi_idx][ifbc_type].pxl0_dsi_gt_en;
+
+ set_reg(ldi_base + LDI_PXL0_DIV2_GT_EN, pxl0_div2_gt_en, 1, 0);
+ set_reg(ldi_base + LDI_PXL0_DIV4_GT_EN, pxl0_div4_gt_en, 1, 0);
+ set_reg(ldi_base + LDI_PXL0_GT_EN, 0x1, 1, 0);
+ set_reg(ldi_base + LDI_PXL0_DSI_GT_EN, pxl0_dsi_gt_en, 2, 0);
+ set_reg(ldi_base + LDI_PXL0_DIVXCFG, pxl0_divxcfg, 3, 0);
+}
+
+void init_other(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+ char __iomem *dss_base;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+
+ dss_base = ctx->base;
+
+ /**
+ * VESA_CLK_SEL is set to 0 for initial,
+ * 1 is needed only by vesa dual pipe compress
+ */
+ set_reg(dss_base + DSS_LDI0_OFFSET + LDI_VESA_CLK_SEL, 0, 1, 0);
+}
+
+void init_ldi(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+ char __iomem *ldi_base;
+ struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+
+ dss_rect_t rect = {0, 0, 0, 0};
+ u32 hfp, hbp, hsw, vfp, vbp, vsw;
+ u32 vsync_plr = 0;
+ u32 hsync_plr = 0;
+ u32 pixelclk_plr = 0;
+ u32 data_en_plr = 0;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return ;
+ }
+
+ mode = &acrtc->base.state->mode;
+ adj_mode = &acrtc->base.state->adjusted_mode;
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ hsw = mode->hsync_end - mode->hsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+
+ ldi_base = ctx->base + DSS_LDI0_OFFSET;
+
+ rect.x = 0;
+ rect.y = 0;
+ rect.w = mode->hdisplay;
+ rect.h = mode->vdisplay;
+ mipi_ifbc_get_rect(&rect);
+
+ init_ldi_pxl_div(acrtc);
+
+ outp32(ldi_base + LDI_DPI0_HRZ_CTRL0,
+ hfp | ((hbp + DSS_WIDTH(hsw)) << 16));
+ outp32(ldi_base + LDI_DPI0_HRZ_CTRL1, 0);
+ outp32(ldi_base + LDI_DPI0_HRZ_CTRL2, DSS_WIDTH(rect.w));
+ outp32(ldi_base + LDI_VRT_CTRL0,
+ vfp | (vbp << 16));
+ outp32(ldi_base + LDI_VRT_CTRL1, DSS_HEIGHT(vsw));
+ outp32(ldi_base + LDI_VRT_CTRL2, DSS_HEIGHT(rect.h));
+
+ outp32(ldi_base + LDI_PLR_CTRL,
+ vsync_plr | (hsync_plr << 1) |
+ (pixelclk_plr << 2) | (data_en_plr << 3));
+
+ /* bpp*/
+ set_reg(ldi_base + LDI_CTRL, acrtc->out_format, 2, 3);
+ /* bgr*/
+ set_reg(ldi_base + LDI_CTRL, acrtc->bgr_fmt, 1, 13);
+
+ /* for ddr pmqos*/
+ outp32(ldi_base + LDI_VINACT_MSK_LEN, vfp);
+
+ /*cmd event sel*/
+ outp32(ldi_base + LDI_CMD_EVENT_SEL, 0x1);
+
+ /* for 1Hz LCD and mipi command LCD*/
+ set_reg(ldi_base + LDI_DSI_CMD_MOD_CTRL, 0x1, 1, 1);
+
+ /*ldi_data_gate(hisifd, true);*/
+
+#ifdef CONFIG_HISI_FB_LDI_COLORBAR_USED
+ /* colorbar width*/
+ set_reg(ldi_base + LDI_CTRL, DSS_WIDTH(0x3c), 7, 6);
+ /* colorbar ort*/
+ set_reg(ldi_base + LDI_WORK_MODE, 0x0, 1, 1);
+ /* colorbar enable*/
+ set_reg(ldi_base + LDI_WORK_MODE, 0x0, 1, 0);
+#else
+ /* normal*/
+ set_reg(ldi_base + LDI_WORK_MODE, 0x1, 1, 0);
+#endif
+
+ /* ldi disable*/
+ set_reg(ldi_base + LDI_CTRL, 0x0, 1, 0);
+}
+
+void init_dbuf(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+ struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+ char __iomem *dbuf_base;
+
+ int sram_valid_num = 0;
+ int sram_max_mem_depth = 0;
+ int sram_min_support_depth = 0;
+
+ u32 thd_rqos_in = 0;
+ u32 thd_rqos_out = 0;
+ u32 thd_wqos_in = 0;
+ u32 thd_wqos_out = 0;
+ u32 thd_cg_in = 0;
+ u32 thd_cg_out = 0;
+ u32 thd_wr_wait = 0;
+ u32 thd_cg_hold = 0;
+ u32 thd_flux_req_befdfs_in = 0;
+ u32 thd_flux_req_befdfs_out = 0;
+ u32 thd_flux_req_aftdfs_in = 0;
+ u32 thd_flux_req_aftdfs_out = 0;
+ u32 thd_dfs_ok = 0;
+ u32 dfs_ok_mask = 0;
+ u32 thd_flux_req_sw_en = 1;
+ u32 hfp, hbp, hsw, vfp, vbp, vsw;
+
+ int dfs_time = 0;
+ int dfs_time_min = 0;
+ int depth = 0;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+
+ mode = &acrtc->base.state->mode;
+ adj_mode = &acrtc->base.state->adjusted_mode;
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ hsw = mode->hsync_end - mode->hsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+
+ dbuf_base = ctx->base + DSS_DBUF0_OFFSET;
+
+ if (mode->hdisplay * mode->vdisplay >= RES_4K_PHONE)
+ dfs_time_min = DFS_TIME_MIN_4K;
+ else
+ dfs_time_min = DFS_TIME_MIN;
+
+ dfs_time = DFS_TIME;
+ depth = DBUF0_DEPTH;
+
+ DRM_DEBUG("dfs_time=%d,\n"
+ "adj_mode->clock=%d\n"
+ "hsw=%d\n"
+ "hbp=%d\n"
+ "hfp=%d\n"
+ "mode->hdisplay=%d\n"
+ "mode->vdisplay=%d\n",
+ dfs_time,
+ adj_mode->clock,
+ hsw,
+ hbp,
+ hfp,
+ mode->hdisplay,
+ mode->vdisplay);
+
+ /*
+ ** int K = 0;
+ ** int Tp = 1000000 / adj_mode->clock;
+ ** K = (hsw + hbp + mode->hdisplay +
+ ** hfp) / mode->hdisplay;
+ ** thd_cg_out = dfs_time / (Tp * K * 6);
+ */
+ thd_cg_out = (dfs_time * adj_mode->clock * 1000UL * mode->hdisplay) /
+ (((hsw + hbp + hfp) + mode->hdisplay) * 6 * 1000000UL);
+
+ sram_valid_num = thd_cg_out / depth;
+ thd_cg_in = (sram_valid_num + 1) * depth - 1;
+
+ sram_max_mem_depth = (sram_valid_num + 1) * depth;
+
+ thd_rqos_in = thd_cg_out * 85 / 100;
+ thd_rqos_out = thd_cg_out;
+ thd_flux_req_befdfs_in = GET_FLUX_REQ_IN(sram_max_mem_depth);
+ thd_flux_req_befdfs_out = GET_FLUX_REQ_OUT(sram_max_mem_depth);
+
+ sram_min_support_depth = dfs_time_min * mode->hdisplay / (1000000 / 60 / (mode->vdisplay +
+ vbp + vfp + vsw) * (DBUF_WIDTH_BIT / 3 / BITS_PER_BYTE));
+
+ /*thd_flux_req_aftdfs_in =[(sram_valid_num+1)*depth - 50*HSIZE/((1000000/60/(VSIZE+VFP+VBP+VSW))*6)]/3*/
+ thd_flux_req_aftdfs_in = (sram_max_mem_depth - sram_min_support_depth) / 3;
+ /*thd_flux_req_aftdfs_out = 2*[(sram_valid_num+1)* depth - 50*HSIZE/((1000000/60/(VSIZE+VFP+VBP+VSW))*6)]/3*/
+ thd_flux_req_aftdfs_out = 2 * (sram_max_mem_depth - sram_min_support_depth) / 3;
+
+ thd_dfs_ok = thd_flux_req_befdfs_in;
+
+ DRM_DEBUG("hdisplay=%d\n"
+ "vdisplay=%d\n"
+ "sram_valid_num=%d,\n"
+ "thd_rqos_in=0x%x\n"
+ "thd_rqos_out=0x%x\n"
+ "thd_cg_in=0x%x\n"
+ "thd_cg_out=0x%x\n"
+ "thd_flux_req_befdfs_in=0x%x\n"
+ "thd_flux_req_befdfs_out=0x%x\n"
+ "thd_flux_req_aftdfs_in=0x%x\n"
+ "thd_flux_req_aftdfs_out=0x%x\n"
+ "thd_dfs_ok=0x%x\n",
+ mode->hdisplay,
+ mode->vdisplay,
+ sram_valid_num,
+ thd_rqos_in,
+ thd_rqos_out,
+ thd_cg_in,
+ thd_cg_out,
+ thd_flux_req_befdfs_in,
+ thd_flux_req_befdfs_out,
+ thd_flux_req_aftdfs_in,
+ thd_flux_req_aftdfs_out,
+ thd_dfs_ok);
+
+ outp32(dbuf_base + DBUF_FRM_SIZE, mode->hdisplay * mode->vdisplay);
+ outp32(dbuf_base + DBUF_FRM_HSIZE, DSS_WIDTH(mode->hdisplay));
+ outp32(dbuf_base + DBUF_SRAM_VALID_NUM, sram_valid_num);
+
+ outp32(dbuf_base + DBUF_THD_RQOS, (thd_rqos_out << 16) | thd_rqos_in);
+ outp32(dbuf_base + DBUF_THD_WQOS, (thd_wqos_out << 16) | thd_wqos_in);
+ outp32(dbuf_base + DBUF_THD_CG, (thd_cg_out << 16) | thd_cg_in);
+ outp32(dbuf_base + DBUF_THD_OTHER, (thd_cg_hold << 16) | thd_wr_wait);
+ outp32(dbuf_base + DBUF_THD_FLUX_REQ_BEF, (thd_flux_req_befdfs_out << 16) | thd_flux_req_befdfs_in);
+ outp32(dbuf_base + DBUF_THD_FLUX_REQ_AFT, (thd_flux_req_aftdfs_out << 16) | thd_flux_req_aftdfs_in);
+ outp32(dbuf_base + DBUF_THD_DFS_OK, thd_dfs_ok);
+ outp32(dbuf_base + DBUF_FLUX_REQ_CTRL, (dfs_ok_mask << 1) | thd_flux_req_sw_en);
+
+ outp32(dbuf_base + DBUF_DFS_LP_CTRL, 0x1);
+}
+
+void init_dpp(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+ struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+ char __iomem *dpp_base;
+ char __iomem *mctl_sys_base;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+
+ mode = &acrtc->base.state->mode;
+ adj_mode = &acrtc->base.state->adjusted_mode;
+
+ dpp_base = ctx->base + DSS_DPP_OFFSET;
+ mctl_sys_base = ctx->base + DSS_MCTRL_SYS_OFFSET;
+
+ outp32(dpp_base + DPP_IMG_SIZE_BEF_SR,
+ (DSS_HEIGHT(mode->vdisplay) << 16) | DSS_WIDTH(mode->hdisplay));
+ outp32(dpp_base + DPP_IMG_SIZE_AFT_SR,
+ (DSS_HEIGHT(mode->vdisplay) << 16) | DSS_WIDTH(mode->hdisplay));
+
+#ifdef CONFIG_HISI_FB_DPP_COLORBAR_USED
+ void __iomem *mctl_base;
+ outp32(dpp_base + DPP_CLRBAR_CTRL, (0x30 << 24) |(0 << 1) | 0x1);
+ set_reg(dpp_base + DPP_CLRBAR_1ST_CLR, 0xFF, 8, 16);
+ set_reg(dpp_base + DPP_CLRBAR_2ND_CLR, 0xFF, 8, 8);
+ set_reg(dpp_base + DPP_CLRBAR_3RD_CLR, 0xFF, 8, 0);
+
+ mctl_base = ctx->base +
+ g_dss_module_ovl_base[DSS_OVL0][MODULE_MCTL_BASE];
+
+ set_reg(mctl_base + MCTL_CTL_MUTEX, 0x1, 1, 0);
+ set_reg(mctl_base + MCTL_CTL_EN, 0x1, 32, 0);
+ set_reg(mctl_base + MCTL_CTL_TOP, 0x2, 32, 0); /*auto mode*/
+ set_reg(mctl_base + MCTL_CTL_DBG, 0xB13A00, 32, 0);
+
+ set_reg(mctl_base + MCTL_CTL_MUTEX_ITF, 0x1, 2, 0);
+ set_reg(mctl_sys_base + MCTL_OV0_FLUSH_EN, 0x8, 4, 0);
+ set_reg(mctl_base + MCTL_CTL_MUTEX, 0x0, 1, 0);
+#endif
+}
+
+void enable_ldi(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+ char __iomem *ldi_base;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+
+ ldi_base = ctx->base + DSS_LDI0_OFFSET;
+
+ /* ldi enable */
+ set_reg(ldi_base + LDI_CTRL, 0x1, 1, 0);
+}
+
+void disable_ldi(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+ char __iomem *ldi_base;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+
+ ldi_base = ctx->base + DSS_LDI0_OFFSET;
+
+ /* ldi disable */
+ set_reg(ldi_base + LDI_CTRL, 0x0, 1, 0);
+}
+
+void dpe_interrupt_clear(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+ char __iomem *dss_base;
+ u32 clear;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+
+ dss_base = ctx->base;
+
+ clear = ~0;
+ outp32(dss_base + GLB_CPU_PDP_INTS, clear);
+ outp32(dss_base + DSS_LDI0_OFFSET + LDI_CPU_ITF_INTS, clear);
+ outp32(dss_base + DSS_DPP_OFFSET + DPP_INTS, clear);
+
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_MCTL_INTS, clear);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_WCH0_INTS, clear);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_WCH1_INTS, clear);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH0_INTS, clear);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH1_INTS, clear);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH2_INTS, clear);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH3_INTS, clear);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH4_INTS, clear);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH5_INTS, clear);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH6_INTS, clear);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH7_INTS, clear);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_DSS_GLB_INTS, clear);
+}
+
+void dpe_interrupt_unmask(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+ char __iomem *dss_base;
+ u32 unmask;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+
+ dss_base = ctx->base;
+
+ unmask = ~0;
+ unmask &= ~(BIT_DPP_INTS | BIT_ITF0_INTS | BIT_MMU_IRPT_NS);
+ outp32(dss_base + GLB_CPU_PDP_INT_MSK, unmask);
+
+ unmask = ~0;
+ unmask &= ~(BIT_VSYNC | BIT_VACTIVE0_END | BIT_LDI_UNFLOW);
+
+ outp32(dss_base + DSS_LDI0_OFFSET + LDI_CPU_ITF_INT_MSK, unmask);
+}
+
+void dpe_interrupt_mask(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+ char __iomem *dss_base;
+ u32 mask;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return ;
+ }
+
+ dss_base = ctx->base;
+
+ mask = ~0;
+ outp32(dss_base + GLB_CPU_PDP_INT_MSK, mask);
+ outp32(dss_base + DSS_LDI0_OFFSET + LDI_CPU_ITF_INT_MSK, mask);
+ outp32(dss_base + DSS_DPP_OFFSET + DPP_INT_MSK, mask);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_DSS_GLB_INT_MSK, mask);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_MCTL_INT_MSK, mask);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_WCH0_INT_MSK, mask);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_WCH1_INT_MSK, mask);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH0_INT_MSK, mask);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH1_INT_MSK, mask);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH2_INT_MSK, mask);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH3_INT_MSK, mask);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH4_INT_MSK, mask);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH5_INT_MSK, mask);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH6_INT_MSK, mask);
+ outp32(dss_base + DSS_DBG_OFFSET + DBG_RCH7_INT_MSK, mask);
+}
+
+int dpe_init(struct dss_crtc *acrtc)
+{
+ struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+
+ mode = &acrtc->base.state->mode;
+ adj_mode = &acrtc->base.state->adjusted_mode;
+
+ init_dbuf(acrtc);
+ init_dpp(acrtc);
+ init_other(acrtc);
+ init_ldi(acrtc);
+
+ hisifb_dss_on(acrtc->ctx);
+ hisi_dss_mctl_on(acrtc->ctx);
+
+ hisi_dss_mctl_mutex_lock(acrtc->ctx);
+
+ hisi_dss_ovl_base_config(acrtc->ctx, mode->hdisplay, mode->vdisplay);
+
+ hisi_dss_mctl_mutex_unlock(acrtc->ctx);
+
+ enable_ldi(acrtc);
+
+ mdelay(60);
+
+ return 0;
+}
+
+void dss_inner_clk_pdp_enable(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+ char __iomem *dss_base;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+ dss_base = ctx->base;
+
+ outp32(dss_base + DSS_IFBC_OFFSET + IFBC_MEM_CTRL, 0x00000088);
+ outp32(dss_base + DSS_DSC_OFFSET + DSC_MEM_CTRL, 0x00000888);
+ outp32(dss_base + DSS_LDI0_OFFSET + LDI_MEM_CTRL, 0x00000008);
+ outp32(dss_base + DSS_DBUF0_OFFSET + DBUF_MEM_CTRL, 0x00000008);
+ outp32(dss_base + DSS_DPP_DITHER_OFFSET + DITHER_MEM_CTRL, 0x00000008);
+}
+
+void dss_inner_clk_common_enable(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+ char __iomem *dss_base;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+ dss_base = ctx->base;
+
+ /*core/axi/mmbuf*/
+ outp32(dss_base + DSS_CMDLIST_OFFSET + CMD_MEM_CTRL, 0x00000008); /*cmd mem*/
+
+ outp32(dss_base + DSS_RCH_VG0_SCL_OFFSET + SCF_COEF_MEM_CTRL, 0x00000088);/*rch_v0 ,scf mem*/
+ outp32(dss_base + DSS_RCH_VG0_SCL_OFFSET + SCF_LB_MEM_CTRL, 0x00000008);/*rch_v0 ,scf mem*/
+ outp32(dss_base + DSS_RCH_VG0_ARSR_OFFSET + ARSR2P_LB_MEM_CTRL, 0x00000008);/*rch_v0 ,arsr2p mem*/
+ outp32(dss_base + DSS_RCH_VG0_DMA_OFFSET + VPP_MEM_CTRL, 0x00000008);/*rch_v0 ,vpp mem*/
+ outp32(dss_base + DSS_RCH_VG0_DMA_OFFSET + DMA_BUF_MEM_CTRL, 0x00000008);/*rch_v0 ,dma_buf mem*/
+ outp32(dss_base + DSS_RCH_VG0_DMA_OFFSET + AFBCD_MEM_CTRL, 0x00008888);/*rch_v0 ,afbcd mem*/
+
+ outp32(dss_base + DSS_RCH_VG1_SCL_OFFSET + SCF_COEF_MEM_CTRL, 0x00000088);/*rch_v1 ,scf mem*/
+ outp32(dss_base + DSS_RCH_VG1_SCL_OFFSET + SCF_LB_MEM_CTRL, 0x00000008);/*rch_v1 ,scf mem*/
+ outp32(dss_base + DSS_RCH_VG1_DMA_OFFSET + DMA_BUF_MEM_CTRL, 0x00000008);/*rch_v1 ,dma_buf mem*/
+ outp32(dss_base + DSS_RCH_VG1_DMA_OFFSET + AFBCD_MEM_CTRL, 0x00008888);/*rch_v1 ,afbcd mem*/
+
+ outp32(dss_base + DSS_RCH_VG2_SCL_OFFSET + SCF_COEF_MEM_CTRL, 0x00000088);/*rch_v2 ,scf mem*/
+ outp32(dss_base + DSS_RCH_VG2_SCL_OFFSET + SCF_LB_MEM_CTRL, 0x00000008);/*rch_v2 ,scf mem*/
+ outp32(dss_base + DSS_RCH_VG2_DMA_OFFSET + DMA_BUF_MEM_CTRL, 0x00000008);/*rch_v2 ,dma_buf mem*/
+
+ outp32(dss_base + DSS_RCH_G0_SCL_OFFSET + SCF_COEF_MEM_CTRL, 0x00000088);/*rch_g0 ,scf mem*/
+ outp32(dss_base + DSS_RCH_G0_SCL_OFFSET + SCF_LB_MEM_CTRL, 0x0000008);/*rch_g0 ,scf mem*/
+ outp32(dss_base + DSS_RCH_G0_DMA_OFFSET + DMA_BUF_MEM_CTRL, 0x00000008);/*rch_g0 ,dma_buf mem*/
+ outp32(dss_base + DSS_RCH_G0_DMA_OFFSET + AFBCD_MEM_CTRL, 0x00008888);/*rch_g0 ,afbcd mem*/
+
+ outp32(dss_base + DSS_RCH_G1_SCL_OFFSET + SCF_COEF_MEM_CTRL, 0x00000088);/*rch_g1 ,scf mem*/
+ outp32(dss_base + DSS_RCH_G1_SCL_OFFSET + SCF_LB_MEM_CTRL, 0x0000008);/*rch_g1 ,scf mem*/
+ outp32(dss_base + DSS_RCH_G1_DMA_OFFSET + DMA_BUF_MEM_CTRL, 0x00000008);/*rch_g1 ,dma_buf mem*/
+ outp32(dss_base + DSS_RCH_G1_DMA_OFFSET + AFBCD_MEM_CTRL, 0x00008888);/*rch_g1 ,afbcd mem*/
+
+ outp32(dss_base + DSS_RCH_D0_DMA_OFFSET + DMA_BUF_MEM_CTRL, 0x00000008);/*rch_d0 ,dma_buf mem*/
+ outp32(dss_base + DSS_RCH_D0_DMA_OFFSET + AFBCD_MEM_CTRL, 0x00008888);/*rch_d0 ,afbcd mem*/
+ outp32(dss_base + DSS_RCH_D1_DMA_OFFSET + DMA_BUF_MEM_CTRL, 0x00000008);/*rch_d1 ,dma_buf mem*/
+ outp32(dss_base + DSS_RCH_D2_DMA_OFFSET + DMA_BUF_MEM_CTRL, 0x00000008);/*rch_d2 ,dma_buf mem*/
+ outp32(dss_base + DSS_RCH_D3_DMA_OFFSET + DMA_BUF_MEM_CTRL, 0x00000008);/*rch_d3 ,dma_buf mem*/
+
+ outp32(dss_base + DSS_WCH0_DMA_OFFSET + DMA_BUF_MEM_CTRL, 0x00000008);/*wch0 DMA/AFBCE mem*/
+ outp32(dss_base + DSS_WCH0_DMA_OFFSET + AFBCE_MEM_CTRL, 0x00000888);/*wch0 DMA/AFBCE mem*/
+ outp32(dss_base + DSS_WCH0_DMA_OFFSET + ROT_MEM_CTRL, 0x00000008);/*wch0 rot mem*/
+ outp32(dss_base + DSS_WCH1_DMA_OFFSET + DMA_BUF_MEM_CTRL, 0x00000008);/*wch1 DMA/AFBCE mem*/
+ outp32(dss_base + DSS_WCH1_DMA_OFFSET + AFBCE_MEM_CTRL, 0x00000888);/*wch1 DMA/AFBCE mem*/
+ outp32(dss_base + DSS_WCH1_DMA_OFFSET + ROT_MEM_CTRL, 0x00000008);/*wch1 rot mem*/
+ outp32(dss_base + DSS_WCH2_DMA_OFFSET + DMA_BUF_MEM_CTRL, 0x00000008);/*wch2 DMA/AFBCE mem*/
+ outp32(dss_base + DSS_WCH2_DMA_OFFSET + ROT_MEM_CTRL, 0x00000008);/*wch2 rot mem*/
+}
+int dpe_irq_enable(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return -1;
+ }
+
+ if (ctx->irq)
+ enable_irq(ctx->irq);
+
+ return 0;
+}
+
+int dpe_irq_disable(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx;
+
+ ctx = acrtc->ctx;
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return -1;
+ }
+
+ if (ctx->irq)
+ disable_irq(ctx->irq);
+
+ /*disable_irq_nosync(ctx->irq);*/
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_dpe_utils.h b/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_dpe_utils.h
new file mode 100644
index 000000000000..7ee992273d72
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_dpe_utils.h
@@ -0,0 +1,58 @@
+/* Copyright (c) 2013-2014, Hisilicon Tech. Co., Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef KIRIN_DRM_DPE_UTILS_H
+#define KIRIN_DRM_DPE_UTILS_H
+
+#include "kirin_dpe_reg.h"
+
+/*#define CONFIG_HISI_FB_OV_BASE_USED*/
+/*#define CONFIG_HISI_FB_DPP_COLORBAR_USED*/
+/*#define CONFIG_HISI_FB_LDI_COLORBAR_USED*/
+
+void set_reg(char __iomem *addr, uint32_t val, uint8_t bw, uint8_t bs);
+
+void init_dbuf(struct dss_crtc *acrtc);
+void init_dpp(struct dss_crtc *acrtc);
+void init_other(struct dss_crtc *acrtc);
+void init_ldi(struct dss_crtc *acrtc);
+
+void deinit_ldi(struct dss_crtc *acrtc);
+void enable_ldi(struct dss_crtc *acrtc);
+void disable_ldi(struct dss_crtc *acrtc);
+
+void dss_inner_clk_pdp_enable(struct dss_crtc *acrtc);
+void dss_inner_clk_common_enable(struct dss_crtc *acrtc);
+void dpe_interrupt_clear(struct dss_crtc *acrtc);
+void dpe_interrupt_unmask(struct dss_crtc *acrtc);
+void dpe_interrupt_mask(struct dss_crtc *acrtc);
+
+int dpe_irq_enable(struct dss_crtc *acrtc);
+int dpe_irq_disable(struct dss_crtc *acrtc);
+
+int dpe_init(struct dss_crtc *acrtc);
+
+void hisifb_dss_on(struct dss_hw_ctx *ctx);
+void hisi_dss_mctl_on(struct dss_hw_ctx *ctx);
+
+void hisi_dss_unflow_handler(struct dss_hw_ctx *ctx, bool unmask);
+int hisi_dss_mctl_mutex_lock(struct dss_hw_ctx *ctx);
+int hisi_dss_mctl_mutex_unlock(struct dss_hw_ctx *ctx);
+int hisi_dss_ovl_base_config(struct dss_hw_ctx *ctx, u32 xres, u32 yres);
+
+void hisi_fb_pan_display(struct drm_plane *plane);
+void hisi_dss_online_play(struct drm_plane *plane, drm_dss_layer_t *layer);
+
+u32 dss_get_format(u32 pixel_format);
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_drv.c
new file mode 100644
index 000000000000..ffa0cd792bf1
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_drv.c
@@ -0,0 +1,379 @@
+/*
+ * Hisilicon Kirin SoCs drm master driver
+ *
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * Author:
+ * <cailiwei@hisilicon.com>
+ * <zhengwanchun@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/of_platform.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "kirin_drm_drv.h"
+
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+static bool fbdev = true;
+MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
+module_param(fbdev, bool, 0600);
+#endif
+
+
+static struct kirin_dc_ops *dc_ops;
+
+static int kirin_drm_kms_cleanup(struct drm_device *dev)
+{
+ struct kirin_drm_private *priv = dev->dev_private;
+
+ if (priv->fbdev) {
+ kirin_drm_fbdev_fini(dev);
+ priv->fbdev = NULL;
+ }
+
+ drm_kms_helper_poll_fini(dev);
+ drm_vblank_cleanup(dev);
+ dc_ops->cleanup(dev);
+ drm_mode_config_cleanup(dev);
+ devm_kfree(dev->dev, priv);
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
+{
+ struct kirin_drm_private *priv = dev->dev_private;
+
+ dsi_set_output_client(dev);
+
+ if (priv->fbdev)
+ drm_fb_helper_hotplug_event(priv->fbdev);
+ else
+ priv->fbdev = kirin_drm_fbdev_init(dev);
+}
+
+static const struct drm_mode_config_funcs kirin_drm_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = kirin_fbdev_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void kirin_drm_mode_config_init(struct drm_device *dev)
+{
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ dev->mode_config.funcs = &kirin_drm_mode_config_funcs;
+}
+
+static int kirin_drm_kms_init(struct drm_device *dev)
+{
+ struct kirin_drm_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev->dev_private = priv;
+ dev_set_drvdata(dev->dev, dev);
+
+ /* dev->mode_config initialization */
+ drm_mode_config_init(dev);
+ kirin_drm_mode_config_init(dev);
+
+ /* display controller init */
+ ret = dc_ops->init(dev);
+ if (ret)
+ goto err_mode_config_cleanup;
+
+ /* bind and init sub drivers */
+ ret = component_bind_all(dev->dev, dev);
+ if (ret) {
+ DRM_ERROR("failed to bind all component.\n");
+ goto err_dc_cleanup;
+ }
+
+ /* vblank init */
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret) {
+ DRM_ERROR("failed to initialize vblank.\n");
+ goto err_unbind_all;
+ }
+ /* with irq_enabled = true, we can use the vblank feature. */
+ dev->irq_enabled = true;
+
+ /* reset all the states of crtc/plane/encoder/connector */
+ drm_mode_config_reset(dev);
+
+ //if (fbdev)
+ // priv->fbdev = kirin_drm_fbdev_init(dev);
+
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(dev);
+
+ /* force detection after connectors init */
+ (void)drm_helper_hpd_irq_event(dev);
+
+ return 0;
+
+err_unbind_all:
+ component_unbind_all(dev->dev, dev);
+err_dc_cleanup:
+ dc_ops->cleanup(dev);
+err_mode_config_cleanup:
+ drm_mode_config_cleanup(dev);
+ devm_kfree(dev->dev, priv);
+ dev->dev_private = NULL;
+
+ return ret;
+}
+
+static const struct file_operations kirin_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static int kirin_gem_cma_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ return drm_gem_cma_dumb_create_internal(file, dev, args);
+}
+
+static int kirin_drm_connectors_register(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct drm_connector *failed_connector;
+ int ret;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, dev) {
+ ret = drm_connector_register(connector);
+ if (ret) {
+ failed_connector = connector;
+ goto err;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+
+err:
+ drm_for_each_connector(connector, dev) {
+ if (failed_connector == connector)
+ break;
+ drm_connector_unregister(connector);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+static struct drm_driver kirin_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC | DRIVER_RENDER,
+ .fops = &kirin_drm_fops,
+
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = kirin_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ .name = "kirin",
+ .desc = "Hisilicon Kirin SoCs' DRM Driver",
+ .date = "20170309",
+ .major = 1,
+ .minor = 0,
+};
+
+#ifdef CONFIG_OF
+/* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
+ * (or probably any other).. so probably some room for some helpers
+ */
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+#else
+static int compare_dev(struct device *dev, void *data)
+{
+ return dev == data;
+}
+#endif
+
+static int kirin_drm_bind(struct device *dev)
+{
+ struct drm_driver *driver = &kirin_drm_driver;
+ struct drm_device *drm_dev;
+ int ret;
+
+ //drm_platform_init(&kirin_drm_driver, to_platform_device(dev));
+
+ drm_dev = drm_dev_alloc(driver, dev);
+ if (!drm_dev)
+ return -ENOMEM;
+
+ drm_dev->platformdev = to_platform_device(dev);
+
+ ret = kirin_drm_kms_init(drm_dev);
+ if (ret)
+ goto err_drm_dev_unref;
+
+ ret = drm_dev_register(drm_dev, 0);
+ if (ret)
+ goto err_kms_cleanup;
+
+ /* connectors should be registered after drm device register */
+ ret = kirin_drm_connectors_register(drm_dev);
+ if (ret)
+ goto err_drm_dev_unregister;
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, drm_dev->primary->index);
+
+ return 0;
+
+err_drm_dev_unregister:
+ drm_dev_unregister(drm_dev);
+err_kms_cleanup:
+ kirin_drm_kms_cleanup(drm_dev);
+err_drm_dev_unref:
+ drm_dev_unref(drm_dev);
+
+ return ret;
+}
+
+static void kirin_drm_unbind(struct device *dev)
+{
+ drm_put_dev(dev_get_drvdata(dev));
+}
+
+static const struct component_master_ops kirin_drm_ops = {
+ .bind = kirin_drm_bind,
+ .unbind = kirin_drm_unbind,
+};
+
+static struct device_node *kirin_get_remote_node(struct device_node *np)
+{
+ struct device_node *endpoint, *remote;
+
+ /* get the first endpoint, in our case only one remote node
+ * is connected to display controller.
+ */
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint) {
+ DRM_ERROR("no valid endpoint node\n");
+ return ERR_PTR(-ENODEV);
+ }
+ of_node_put(endpoint);
+
+ remote = of_graph_get_remote_port_parent(endpoint);
+ if (!remote) {
+ DRM_ERROR("no valid remote node\n");
+ return ERR_PTR(-ENODEV);
+ }
+ of_node_put(remote);
+
+ if (!of_device_is_available(remote)) {
+ DRM_ERROR("not available for remote node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ return remote;
+}
+
+static int kirin_drm_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct component_match *match = NULL;
+ struct device_node *remote;
+
+ dc_ops = (struct kirin_dc_ops *)of_device_get_match_data(dev);
+ if (!dc_ops) {
+ DRM_ERROR("failed to get dt id data\n");
+ return -EINVAL;
+ }
+
+ remote = kirin_get_remote_node(np);
+ if (IS_ERR(remote))
+ return PTR_ERR(remote);
+
+ component_match_add(dev, &match, compare_of, remote);
+
+ return component_master_add_with_match(dev, &kirin_drm_ops, match);
+
+ return 0;
+}
+
+static int kirin_drm_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &kirin_drm_ops);
+ dc_ops = NULL;
+ return 0;
+}
+
+static const struct of_device_id kirin_drm_dt_ids[] = {
+ { .compatible = "hisilicon,hi3660-dpe",
+ .data = &dss_dc_ops,
+ },
+ { /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, kirin_drm_dt_ids);
+
+static struct platform_driver kirin_drm_platform_driver = {
+ .probe = kirin_drm_platform_probe,
+ .remove = kirin_drm_platform_remove,
+ .driver = {
+ .name = "kirin-drm",
+ .of_match_table = kirin_drm_dt_ids,
+ },
+};
+
+module_platform_driver(kirin_drm_platform_driver);
+
+MODULE_AUTHOR("cailiwei <cailiwei@hisilicon.com>");
+MODULE_AUTHOR("zhengwanchun <zhengwanchun@hisilicon.com>");
+MODULE_DESCRIPTION("hisilicon Kirin SoCs' DRM master driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_drv.h
new file mode 100644
index 000000000000..2f842ad36ae9
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_drv.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __KIRIN_DRM_DRV_H__
+#define __KIRIN_DRM_DRV_H__
+
+#include <drm/drmP.h>
+#include <linux/iommu.h>
+#include <linux/ion.h>
+#include <linux/hisi/hisi_ion.h>
+#include <linux/hisi/hisi-iommu.h>
+
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+
+#define MAX_CRTC 2
+
+#define to_kirin_fbdev(x) container_of(x, struct kirin_fbdev, fb_helper)
+
+/* display controller init/cleanup ops */
+struct kirin_dc_ops {
+ int (*init)(struct drm_device *dev);
+ void (*cleanup)(struct drm_device *dev);
+};
+
+struct kirin_drm_private {
+ struct drm_fb_helper *fb_helper;
+ struct drm_fb_helper *fbdev;
+ struct drm_crtc *crtc[MAX_CRTC];
+};
+
+struct kirin_fbdev {
+ struct drm_fb_helper fb_helper;
+ struct drm_framebuffer *fb;
+
+ struct ion_client *ion_client;
+ struct ion_handle *ion_handle;
+ struct iommu_map_format iommu_format;
+ void *screen_base;
+ unsigned long smem_start;
+ unsigned long screen_size;
+ int shared_fd;
+};
+
+extern const struct kirin_dc_ops dss_dc_ops;
+extern void dsi_set_output_client(struct drm_device *dev);
+
+struct drm_framebuffer *kirin_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_fb_helper *kirin_drm_fbdev_init(struct drm_device *dev);
+void kirin_drm_fbdev_fini(struct drm_device *dev);
+
+
+#endif /* __KIRIN_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_dss.c b/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_dss.c
new file mode 100644
index 000000000000..e99a17270fc0
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_dss.c
@@ -0,0 +1,696 @@
+/*
+ * Hisilicon Hi6220 SoC ADE(Advanced Display Engine)'s crtc&plane driver
+ *
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * Author:
+ * Xinliang Liu <z.liuxinliang@hisilicon.com>
+ * Xinliang Liu <xinliang.liu@linaro.org>
+ * Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <video/display_timing.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/iommu.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "kirin_drm_drv.h"
+
+#include "kirin_drm_dpe_utils.h"
+#include "kirin_dpe_reg.h"
+
+#define DTS_COMP_DSS_NAME "hisilicon,hi3660-dpe"
+
+#define DSS_DEBUG 0
+
+static const struct dss_format dss_formats[] = {
+ /* 16bpp RGB: */
+ { DRM_FORMAT_RGB565, HISI_FB_PIXEL_FORMAT_RGB_565 },
+ { DRM_FORMAT_BGR565, HISI_FB_PIXEL_FORMAT_BGR_565 },
+ /* 32bpp [A]RGB: */
+ { DRM_FORMAT_XRGB8888, HISI_FB_PIXEL_FORMAT_RGBX_8888 },
+ { DRM_FORMAT_XBGR8888, HISI_FB_PIXEL_FORMAT_BGRX_8888 },
+ { DRM_FORMAT_RGBA8888, HISI_FB_PIXEL_FORMAT_RGBA_8888 },
+ { DRM_FORMAT_BGRA8888, HISI_FB_PIXEL_FORMAT_BGRA_8888 },
+ /*{ DRM_FORMAT_ARGB8888, },*/
+ /*{ DRM_FORMAT_ABGR8888, },*/
+};
+
+static const u32 channel_formats1[] = {
+ DRM_FORMAT_RGB565, DRM_FORMAT_BGR565,
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888
+};
+
+u32 dss_get_channel_formats(u8 ch, const u32 **formats)
+{
+ switch (ch) {
+ case DSS_CH1:
+ *formats = channel_formats1;
+ return ARRAY_SIZE(channel_formats1);
+ default:
+ DRM_ERROR("no this channel %d\n", ch);
+ *formats = NULL;
+ return 0;
+ }
+}
+
+/* convert from fourcc format to dss format */
+u32 dss_get_format(u32 pixel_format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dss_formats); i++)
+ if (dss_formats[i].pixel_format == pixel_format)
+ return dss_formats[i].dss_format;
+
+ /* not found */
+ DRM_ERROR("Not found pixel format!!fourcc_format= %d\n",
+ pixel_format);
+ return HISI_FB_PIXEL_FORMAT_UNSUPPORT;
+}
+
+/*******************************************************************************
+ **
+ */
+static void dss_ldi_set_mode(struct dss_crtc *acrtc)
+{
+ int ret;
+ u32 clk_Hz;
+ struct dss_hw_ctx *ctx = acrtc->ctx;
+ struct drm_display_mode *mode = &acrtc->base.state->mode;
+ struct drm_display_mode *adj_mode = &acrtc->base.state->adjusted_mode;
+
+
+ DRM_INFO("mode->clock(org) = %u\n", mode->clock);
+ if(mode->clock == 148500){
+ clk_Hz = 144000 * 1000UL;
+ } else if(mode->clock == 83496){
+ clk_Hz = 80000 * 1000UL;
+ } else if(mode->clock == 74440){
+ clk_Hz = 72000 * 1000UL;
+ } else if(mode->clock == 74250){
+ clk_Hz = 72000 * 1000UL;
+ } else {
+ clk_Hz = mode->clock * 1000UL;;
+ }
+
+ /*
+ * Success should be guaranteed in mode_valid call back,
+ * so failure shouldn't happen here
+ */
+ ret = clk_set_rate(ctx->dss_pxl0_clk, clk_Hz);
+ if (ret) {
+ DRM_ERROR("failed to set pixel clk %dHz (%d)\n", clk_Hz, ret);
+ }
+ adj_mode->clock = clk_get_rate(ctx->dss_pxl0_clk) / 1000;
+ DRM_INFO("dss_pxl0_clk = %u\n", adj_mode->clock);
+
+ dpe_init(acrtc);
+}
+
+static int dss_power_up(struct dss_crtc *acrtc)
+{
+ int ret;
+ struct dss_hw_ctx *ctx = acrtc->ctx;
+
+ ret = clk_prepare_enable(ctx->dss_pxl0_clk);
+ if (ret) {
+ DRM_ERROR("failed to enable dss_pxl0_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctx->dss_pri_clk);
+ if (ret) {
+ DRM_ERROR("failed to enable dss_pri_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctx->dss_pclk_dss_clk);
+ if (ret) {
+ DRM_ERROR("failed to enable dss_pclk_dss_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctx->dss_axi_clk);
+ if (ret) {
+ DRM_ERROR("failed to enable dss_axi_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctx->dss_mmbuf_clk);
+ if (ret) {
+ DRM_ERROR("failed to enable dss_mmbuf_clk (%d)\n", ret);
+ return ret;
+ }
+ dss_inner_clk_pdp_enable(acrtc);
+ dss_inner_clk_common_enable(acrtc);
+ dpe_interrupt_mask(acrtc);
+ dpe_interrupt_clear(acrtc);
+ dpe_irq_enable(acrtc);
+ dpe_interrupt_unmask(acrtc);
+
+ ctx->power_on = true;
+ return 0;
+}
+
+#if 0
+static void dss_power_down(struct dss_crtc *acrtc)
+{
+ struct dss_hw_ctx *ctx = acrtc->ctx;
+
+ dpe_interrupt_mask(acrtc);
+ dpe_irq_disable(acrtc);
+
+ ctx->power_on = false;
+}
+#endif
+
+static int dss_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct kirin_drm_private *priv = dev->dev_private;
+ struct dss_crtc *acrtc = to_dss_crtc(priv->crtc[pipe]);
+ struct dss_hw_ctx *ctx = acrtc->ctx;
+
+ if (!ctx->power_on)
+ (void)dss_power_up(acrtc);
+
+ return 0;
+}
+
+static void dss_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct kirin_drm_private *priv = dev->dev_private;
+ struct dss_crtc *acrtc = to_dss_crtc(priv->crtc[pipe]);
+ struct dss_hw_ctx *ctx = acrtc->ctx;
+
+ if (!ctx->power_on) {
+ DRM_ERROR("power is down! vblank disable fail\n");
+ return;
+ }
+}
+
+static irqreturn_t dss_irq_handler(int irq, void *data)
+{
+ struct dss_crtc *acrtc = data;
+ struct drm_crtc *crtc = &acrtc->base;
+ struct dss_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *dss_base = ctx->base;
+
+ u32 isr_s1 = 0;
+ u32 isr_s2 = 0;
+ u32 isr_s2_dpp = 0;
+ u32 isr_s2_smmu = 0;
+ u32 mask = 0;
+
+ isr_s1 = inp32(dss_base + GLB_CPU_PDP_INTS);
+ isr_s2 = inp32(dss_base + DSS_LDI0_OFFSET + LDI_CPU_ITF_INTS);
+ isr_s2_dpp = inp32(dss_base + DSS_DPP_OFFSET + DPP_INTS);
+ isr_s2_smmu = inp32(dss_base + DSS_SMMU_OFFSET + SMMU_INTSTAT_NS);
+
+ outp32(dss_base + DSS_SMMU_OFFSET + SMMU_INTCLR_NS, isr_s2_smmu);
+ outp32(dss_base + DSS_DPP_OFFSET + DPP_INTS, isr_s2_dpp);
+ outp32(dss_base + DSS_LDI0_OFFSET + LDI_CPU_ITF_INTS, isr_s2);
+ outp32(dss_base + GLB_CPU_PDP_INTS, isr_s1);
+
+ isr_s1 &= ~(inp32(dss_base + GLB_CPU_PDP_INT_MSK));
+ isr_s2 &= ~(inp32(dss_base + DSS_LDI0_OFFSET + LDI_CPU_ITF_INT_MSK));
+ isr_s2_dpp &= ~(inp32(dss_base + DSS_DPP_OFFSET + DPP_INT_MSK));
+
+ if (isr_s2 & BIT_VACTIVE0_END) {
+ ctx->vactive0_end_flag++;
+ wake_up_interruptible_all(&ctx->vactive0_end_wq);
+ }
+
+ if (isr_s2 & BIT_VSYNC) {
+ ctx->vsync_timestamp = ktime_get();
+ drm_crtc_handle_vblank(crtc);
+ }
+
+ if (isr_s2 & BIT_LDI_UNFLOW) {
+ mask = inp32(dss_base + DSS_LDI0_OFFSET + LDI_CPU_ITF_INT_MSK);
+ mask |= BIT_LDI_UNFLOW;
+ outp32(dss_base + DSS_LDI0_OFFSET + LDI_CPU_ITF_INT_MSK, mask);
+
+ DRM_ERROR("ldi underflow!\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void dss_crtc_enable(struct drm_crtc *crtc)
+{
+ struct dss_crtc *acrtc = to_dss_crtc(crtc);
+ struct dss_hw_ctx *ctx = acrtc->ctx;
+ int ret;
+
+ if (acrtc->enable)
+ return;
+
+ if (!ctx->power_on) {
+ ret = dss_power_up(acrtc);
+ if (ret)
+ return;
+ }
+
+ acrtc->enable = true;
+ drm_crtc_vblank_on(crtc);
+}
+
+static void dss_crtc_disable(struct drm_crtc *crtc)
+{
+ struct dss_crtc *acrtc = to_dss_crtc(crtc);
+
+ if (!acrtc->enable)
+ return;
+
+ /*dss_power_down(acrtc);*/
+ acrtc->enable = false;
+ drm_crtc_vblank_off(crtc);
+}
+
+static void dss_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct dss_crtc *acrtc = to_dss_crtc(crtc);
+ struct dss_hw_ctx *ctx = acrtc->ctx;
+
+ if (!ctx->power_on)
+ (void)dss_power_up(acrtc);
+ dss_ldi_set_mode(acrtc);
+}
+
+static void dss_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct dss_crtc *acrtc = to_dss_crtc(crtc);
+ struct dss_hw_ctx *ctx = acrtc->ctx;
+
+ if (!ctx->power_on)
+ (void)dss_power_up(acrtc);
+}
+
+static void dss_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+
+{
+ struct drm_pending_vblank_event *event = crtc->state->event;
+
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+
+}
+
+static const struct drm_crtc_helper_funcs dss_crtc_helper_funcs = {
+ .enable = dss_crtc_enable,
+ .disable = dss_crtc_disable,
+ .mode_set_nofb = dss_crtc_mode_set_nofb,
+ .atomic_begin = dss_crtc_atomic_begin,
+ .atomic_flush = dss_crtc_atomic_flush,
+};
+
+static const struct drm_crtc_funcs dss_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .set_property = drm_atomic_helper_crtc_set_property,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int dss_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *plane)
+{
+ struct kirin_drm_private *priv = dev->dev_private;
+ struct device_node *port;
+ int ret;
+
+ /* set crtc port so that
+ * drm_of_find_possible_crtcs call works
+ */
+ port = of_get_child_by_name(dev->dev->of_node, "port");
+ if (!port) {
+ DRM_ERROR("no port node found in %s\n",
+ dev->dev->of_node->full_name);
+ return -EINVAL;
+ }
+ of_node_put(port);
+ crtc->port = port;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &dss_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to init crtc.\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &dss_crtc_helper_funcs);
+ priv->crtc[drm_crtc_index(crtc)] = crtc;
+
+ return 0;
+}
+
+static int dss_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
+ u32 src_x = state->src_x >> 16;
+ u32 src_y = state->src_y >> 16;
+ u32 src_w = state->src_w >> 16;
+ u32 src_h = state->src_h >> 16;
+ int crtc_x = state->crtc_x;
+ int crtc_y = state->crtc_y;
+ u32 crtc_w = state->crtc_w;
+ u32 crtc_h = state->crtc_h;
+ u32 fmt;
+
+ if (!crtc || !fb)
+ return 0;
+
+ fmt = dss_get_format(fb->pixel_format);
+ if (fmt == HISI_FB_PIXEL_FORMAT_UNSUPPORT)
+ return -EINVAL;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (src_w != crtc_w || src_h != crtc_h) {
+ DRM_ERROR("Scale not support!!!\n");
+ return -EINVAL;
+ }
+
+ if (src_x + src_w > fb->width ||
+ src_y + src_h > fb->height)
+ return -EINVAL;
+
+ if (crtc_x < 0 || crtc_y < 0)
+ return -EINVAL;
+
+ if (crtc_x + crtc_w > crtc_state->adjusted_mode.hdisplay ||
+ crtc_y + crtc_h > crtc_state->adjusted_mode.vdisplay)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void dss_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ hisi_fb_pan_display(plane);
+}
+
+static void dss_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ //struct dss_plane *aplane = to_dss_plane(plane);
+}
+
+static const struct drm_plane_helper_funcs dss_plane_helper_funcs = {
+ .atomic_check = dss_plane_atomic_check,
+ .atomic_update = dss_plane_atomic_update,
+ .atomic_disable = dss_plane_atomic_disable,
+};
+
+static struct drm_plane_funcs dss_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int dss_plane_init(struct drm_device *dev, struct dss_plane *aplane,
+ enum drm_plane_type type)
+{
+ const u32 *fmts;
+ u32 fmts_cnt;
+ int ret = 0;
+
+ /* get properties */
+ fmts_cnt = dss_get_channel_formats(aplane->ch, &fmts);
+ if (ret)
+ return ret;
+
+ ret = drm_universal_plane_init(dev, &aplane->base, 1, &dss_plane_funcs,
+ fmts, fmts_cnt, type, NULL);
+ if (ret) {
+ DRM_ERROR("fail to init plane, ch=%d\n", aplane->ch);
+ return ret;
+ }
+
+ drm_plane_helper_add(&aplane->base, &dss_plane_helper_funcs);
+
+ return 0;
+}
+
+static int dss_enable_iommu(struct platform_device *pdev, struct dss_hw_ctx *ctx)
+{
+ struct device *dev = NULL;
+
+ dev = &pdev->dev;
+
+ /* create iommu domain */
+ ctx->mmu_domain = iommu_domain_alloc(dev->bus);
+ if (!ctx->mmu_domain) {
+ pr_err("iommu_domain_alloc failed!\n");
+ return -EINVAL;
+ }
+
+ iommu_attach_device(ctx->mmu_domain, dev);
+
+ return 0;
+}
+
+static int dss_dts_parse(struct platform_device *pdev, struct dss_hw_ctx *ctx)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = NULL;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, DTS_COMP_DSS_NAME);
+ if (!np) {
+ DRM_ERROR("NOT FOUND device node %s!\n",
+ DTS_COMP_DSS_NAME);
+ return -ENXIO;
+ }
+
+ ctx->base = of_iomap(np, 0);
+ if (!(ctx->base)) {
+ DRM_ERROR ("failed to get ade base resource.\n");
+ return -ENXIO;
+ }
+
+ ctx->peri_crg_base = of_iomap(np, 1);
+ if (!(ctx->peri_crg_base)) {
+ DRM_ERROR ("failed to get ade peri_crg_base resource.\n");
+ return -ENXIO;
+ }
+
+ ctx->sctrl_base = of_iomap(np, 2);
+ if (!(ctx->sctrl_base)) {
+ DRM_ERROR ("failed to get ade sctrl_base resource.\n");
+ return -ENXIO;
+ }
+
+ ctx->pmc_base = of_iomap(np, 3);
+ if (!(ctx->pmc_base)) {
+ DRM_ERROR ("failed to get ade pmc_base resource.\n");
+ return -ENXIO;
+ }
+
+ ctx->noc_dss_base = of_iomap(np, 4);
+ if (!(ctx->noc_dss_base)) {
+ DRM_ERROR ("failed to get noc_dss_base resource.\n");
+ return -ENXIO;
+ }
+
+ /* get irq no */
+ ctx->irq = irq_of_parse_and_map(np, 0);
+ if (ctx->irq <= 0) {
+ DRM_ERROR("failed to get irq_pdp resource.\n");
+ return -ENXIO;
+ }
+
+ DRM_INFO("dss irq = %d.", ctx->irq);
+
+ ctx->dss_mmbuf_clk = devm_clk_get(dev, "clk_dss_axi_mm");
+ if (!ctx->dss_mmbuf_clk) {
+ DRM_ERROR("failed to parse dss_mmbuf_clk\n");
+ return -ENODEV;
+ }
+
+ ctx->dss_axi_clk = devm_clk_get(dev, "aclk_dss");
+ if (!ctx->dss_axi_clk) {
+ DRM_ERROR("failed to parse dss_axi_clk\n");
+ return -ENODEV;
+ }
+
+ ctx->dss_pclk_dss_clk = devm_clk_get(dev, "pclk_dss");
+ if (!ctx->dss_pclk_dss_clk) {
+ DRM_ERROR("failed to parse dss_pclk_dss_clk\n");
+ return -ENODEV;
+ }
+
+ ctx->dss_pri_clk = devm_clk_get(dev, "clk_edc0");
+ if (!ctx->dss_pri_clk) {
+ DRM_ERROR("failed to parse dss_pri_clk\n");
+ return -ENODEV;
+ }
+
+ ret = clk_set_rate(ctx->dss_pri_clk, DEFAULT_DSS_CORE_CLK_07V_RATE);
+ if (ret < 0) {
+ DRM_ERROR("dss_pri_clk clk_set_rate(%lu) failed, error=%d!\n",
+ DEFAULT_DSS_CORE_CLK_07V_RATE, ret);
+ return -EINVAL;
+ }
+
+ DRM_INFO("dss_pri_clk:[%lu]->[%llu].\n",
+ DEFAULT_DSS_CORE_CLK_07V_RATE, (uint64_t)clk_get_rate(ctx->dss_pri_clk));
+
+ ctx->dss_pxl0_clk = devm_clk_get(dev, "clk_ldi0");
+ if (!ctx->dss_pxl0_clk) {
+ DRM_ERROR("failed to parse dss_pxl0_clk\n");
+ return -ENODEV;
+ }
+
+ ret = clk_set_rate(ctx->dss_pxl0_clk, DSS_MAX_PXL0_CLK_144M);
+ if (ret < 0) {
+ DRM_ERROR("dss_pxl0_clk clk_set_rate(%lu) failed, error=%d!\n",
+ DSS_MAX_PXL0_CLK_144M, ret);
+ return -EINVAL;
+ }
+
+ DRM_INFO("dss_pxl0_clk:[%lu]->[%llu].\n",
+ DSS_MAX_PXL0_CLK_144M, (uint64_t)clk_get_rate(ctx->dss_pxl0_clk));
+
+ /* regulator enable */
+
+ dss_enable_iommu(pdev, ctx);
+
+ return 0;
+}
+
+static int dss_drm_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct dss_data *dss;
+ struct dss_hw_ctx *ctx;
+ struct dss_crtc *acrtc;
+ struct dss_plane *aplane;
+ enum drm_plane_type type;
+ int ret;
+ int i;
+
+ dss = devm_kzalloc(dev->dev, sizeof(*dss), GFP_KERNEL);
+ if (!dss) {
+ DRM_ERROR("failed to alloc dss_data\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, dss);
+
+ ctx = &dss->ctx;
+ acrtc = &dss->acrtc;
+ acrtc->ctx = ctx;
+ acrtc->out_format = LCD_RGB888;
+ acrtc->bgr_fmt = LCD_RGB;
+
+ ret = dss_dts_parse(pdev, ctx);
+ if (ret)
+ return ret;
+
+ ctx->ion_client = NULL;
+ ctx->ion_handle = NULL;
+ ctx->screen_base = 0;
+ ctx->screen_size = 0;
+ ctx->smem_start = 0;
+
+ ctx->vactive0_end_flag = 0;
+ init_waitqueue_head(&ctx->vactive0_end_wq);
+
+ /*
+ * plane init
+ * TODO: Now only support primary plane, overlay planes
+ * need to do.
+ */
+ for (i = 0; i < DSS_CH_NUM; i++) {
+ aplane = &dss->aplane[i];
+ aplane->ch = i;
+ /*aplane->ctx = ctx;*/
+ aplane->acrtc = acrtc;
+ type = i == PRIMARY_CH ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+
+ ret = dss_plane_init(dev, aplane, type);
+ if (ret)
+ return ret;
+ }
+
+ /* crtc init */
+ ret = dss_crtc_init(dev, &acrtc->base, &dss->aplane[PRIMARY_CH].base);
+ if (ret)
+ return ret;
+
+ /* vblank irq init */
+ ret = devm_request_irq(dev->dev, ctx->irq, dss_irq_handler,
+ IRQF_SHARED, dev->driver->name, acrtc);
+ if (ret) {
+ DRM_ERROR("fail to devm_request_irq, ret=%d!", ret);
+ return ret;
+ }
+
+ disable_irq(ctx->irq);
+
+ dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
+ dev->driver->enable_vblank = dss_enable_vblank;
+ dev->driver->disable_vblank = dss_disable_vblank;
+
+ return 0;
+}
+
+static void dss_drm_cleanup(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct dss_data *dss = platform_get_drvdata(pdev);
+ struct drm_crtc *crtc = &dss->acrtc.base;
+
+ drm_crtc_cleanup(crtc);
+}
+
+const struct kirin_dc_ops dss_dc_ops = {
+ .init = dss_drm_init,
+ .cleanup = dss_drm_cleanup
+};
diff --git a/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_overlay_utils.c b/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_overlay_utils.c
new file mode 100644
index 000000000000..28778b15512a
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/kirin_drm_overlay_utils.c
@@ -0,0 +1,1241 @@
+/* Copyright (c) 2008-2011, Hisilicon Tech. Co., Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "kirin_drm_dpe_utils.h"
+#include "kirin_drm_drv.h"
+
+
+#define DSS_CHN_MAX_DEFINE (DSS_COPYBIT_MAX)
+static int mid_array[DSS_CHN_MAX_DEFINE] = {0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x2, 0x1, 0x3, 0x0};
+
+/*
+** dss_chn_idx
+** DSS_RCHN_D2 = 0, DSS_RCHN_D3, DSS_RCHN_V0, DSS_RCHN_G0, DSS_RCHN_V1,
+** DSS_RCHN_G1, DSS_RCHN_D0, DSS_RCHN_D1, DSS_WCHN_W0, DSS_WCHN_W1,
+** DSS_RCHN_V2, DSS_WCHN_W2,
+*/
+/*lint -e785*/
+u32 g_dss_module_base[DSS_CHN_MAX_DEFINE][MODULE_CHN_MAX] = {
+ /* D0 */
+ {
+ MIF_CH0_OFFSET,
+ AIF0_CH0_OFFSET,
+ AIF1_CH0_OFFSET,
+ MCTL_CTL_MUTEX_RCH0,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH0_FLUSH_EN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH0_OV_OEN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH0_STARTY,
+ DSS_MCTRL_SYS_OFFSET + MCTL_MOD0_DBG,
+ DSS_RCH_D0_DMA_OFFSET,
+ DSS_RCH_D0_DFC_OFFSET,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ DSS_RCH_D0_CSC_OFFSET,
+ },
+
+ /* D1 */
+ {
+ MIF_CH1_OFFSET,
+ AIF0_CH1_OFFSET,
+ AIF1_CH1_OFFSET,
+ MCTL_CTL_MUTEX_RCH1,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH1_FLUSH_EN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH1_OV_OEN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH1_STARTY,
+ DSS_MCTRL_SYS_OFFSET + MCTL_MOD1_DBG,
+ DSS_RCH_D1_DMA_OFFSET,
+ DSS_RCH_D1_DFC_OFFSET,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ DSS_RCH_D1_CSC_OFFSET,
+ },
+
+ /* V0 */
+ {
+ MIF_CH2_OFFSET,
+ AIF0_CH2_OFFSET,
+ AIF1_CH2_OFFSET,
+ MCTL_CTL_MUTEX_RCH2,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH2_FLUSH_EN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH2_OV_OEN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH2_STARTY,
+ DSS_MCTRL_SYS_OFFSET + MCTL_MOD2_DBG,
+ DSS_RCH_VG0_DMA_OFFSET,
+ DSS_RCH_VG0_DFC_OFFSET,
+ DSS_RCH_VG0_SCL_OFFSET,
+ DSS_RCH_VG0_SCL_LUT_OFFSET,
+ DSS_RCH_VG0_ARSR_OFFSET,
+ DSS_RCH_VG0_ARSR_LUT_OFFSET,
+ DSS_RCH_VG0_POST_CLIP_OFFSET,
+ DSS_RCH_VG0_PCSC_OFFSET,
+ DSS_RCH_VG0_CSC_OFFSET,
+ },
+
+ /* G0 */
+ {
+ MIF_CH3_OFFSET,
+ AIF0_CH3_OFFSET,
+ AIF1_CH3_OFFSET,
+ MCTL_CTL_MUTEX_RCH3,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH3_FLUSH_EN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH3_OV_OEN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH3_STARTY,
+ DSS_MCTRL_SYS_OFFSET + MCTL_MOD3_DBG,
+ DSS_RCH_G0_DMA_OFFSET,
+ DSS_RCH_G0_DFC_OFFSET,
+ DSS_RCH_G0_SCL_OFFSET,
+ 0,
+ 0,
+ 0,
+ DSS_RCH_G0_POST_CLIP_OFFSET,
+ 0,
+ DSS_RCH_G0_CSC_OFFSET,
+ },
+
+ /* V1 */
+ {
+ MIF_CH4_OFFSET,
+ AIF0_CH4_OFFSET,
+ AIF1_CH4_OFFSET,
+ MCTL_CTL_MUTEX_RCH4,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH4_FLUSH_EN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH4_OV_OEN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH4_STARTY,
+ DSS_MCTRL_SYS_OFFSET + MCTL_MOD4_DBG,
+ DSS_RCH_VG1_DMA_OFFSET,
+ DSS_RCH_VG1_DFC_OFFSET,
+ DSS_RCH_VG1_SCL_OFFSET,
+ DSS_RCH_VG1_SCL_LUT_OFFSET,
+ 0,
+ 0,
+ DSS_RCH_VG1_POST_CLIP_OFFSET,
+ 0,
+ DSS_RCH_VG1_CSC_OFFSET,
+ },
+
+ /* G1 */
+ {
+ MIF_CH5_OFFSET,
+ AIF0_CH5_OFFSET,
+ AIF1_CH5_OFFSET,
+ MCTL_CTL_MUTEX_RCH5,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH5_FLUSH_EN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH5_OV_OEN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH5_STARTY,
+ DSS_MCTRL_SYS_OFFSET + MCTL_MOD5_DBG,
+ DSS_RCH_G1_DMA_OFFSET,
+ DSS_RCH_G1_DFC_OFFSET,
+ DSS_RCH_G1_SCL_OFFSET,
+ 0,
+ 0,
+ 0,
+ DSS_RCH_G1_POST_CLIP_OFFSET,
+ 0,
+ DSS_RCH_G1_CSC_OFFSET,
+ },
+
+ /* D2 */
+ {
+ MIF_CH6_OFFSET,
+ AIF0_CH6_OFFSET,
+ AIF1_CH6_OFFSET,
+ MCTL_CTL_MUTEX_RCH6,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH6_FLUSH_EN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH6_OV_OEN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH6_STARTY,
+ DSS_MCTRL_SYS_OFFSET + MCTL_MOD6_DBG,
+ DSS_RCH_D2_DMA_OFFSET,
+ DSS_RCH_D2_DFC_OFFSET,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ DSS_RCH_D2_CSC_OFFSET,
+ },
+
+ /* D3 */
+ {
+ MIF_CH7_OFFSET,
+ AIF0_CH7_OFFSET,
+ AIF1_CH7_OFFSET,
+ MCTL_CTL_MUTEX_RCH7,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH7_FLUSH_EN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH7_OV_OEN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH7_STARTY,
+ DSS_MCTRL_SYS_OFFSET + MCTL_MOD7_DBG,
+ DSS_RCH_D3_DMA_OFFSET,
+ DSS_RCH_D3_DFC_OFFSET,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ DSS_RCH_D3_CSC_OFFSET,
+ },
+
+ /* W0 */
+ {
+ MIF_CH8_OFFSET,
+ AIF0_CH8_OFFSET,
+ AIF1_CH8_OFFSET,
+ MCTL_CTL_MUTEX_WCH0,
+ DSS_MCTRL_SYS_OFFSET + MCTL_WCH0_FLUSH_EN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_WCH0_OV_IEN,
+ 0,
+ 0,
+ DSS_WCH0_DMA_OFFSET,
+ DSS_WCH0_DFC_OFFSET,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ DSS_WCH0_CSC_OFFSET,
+ },
+
+ /* W1 */
+ {
+ MIF_CH9_OFFSET,
+ AIF0_CH9_OFFSET,
+ AIF1_CH9_OFFSET,
+ MCTL_CTL_MUTEX_WCH1,
+ DSS_MCTRL_SYS_OFFSET + MCTL_WCH1_FLUSH_EN,
+ DSS_MCTRL_SYS_OFFSET + MCTL_WCH1_OV_IEN,
+ 0,
+ 0,
+ DSS_WCH1_DMA_OFFSET,
+ DSS_WCH1_DFC_OFFSET,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ DSS_WCH1_CSC_OFFSET,
+ },
+ /* V2 */
+ {
+ MIF_CH10_OFFSET,
+ AIF0_CH11_OFFSET,
+ AIF1_CH11_OFFSET,
+ MCTL_CTL_MUTEX_RCH8,
+ DSS_MCTRL_SYS_OFFSET + MCTL_RCH8_FLUSH_EN,
+ 0,
+ 0,
+ DSS_MCTRL_SYS_OFFSET + MCTL_MOD8_DBG,
+ DSS_RCH_VG2_DMA_OFFSET,
+ DSS_RCH_VG2_DFC_OFFSET,
+ DSS_RCH_VG2_SCL_OFFSET,
+ DSS_RCH_VG2_SCL_LUT_OFFSET,
+ 0,
+ 0,
+ DSS_RCH_VG2_POST_CLIP_OFFSET,
+ 0,
+ DSS_RCH_VG2_CSC_OFFSET,
+ },
+ /* W2 */
+ {
+ MIF_CH11_OFFSET,
+ AIF0_CH12_OFFSET,
+ AIF1_CH12_OFFSET,
+ MCTL_CTL_MUTEX_WCH2,
+ DSS_MCTRL_SYS_OFFSET + MCTL_WCH2_FLUSH_EN,
+ 0,
+ 0,
+ 0,
+ DSS_WCH2_DMA_OFFSET,
+ DSS_WCH2_DFC_OFFSET,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ DSS_WCH2_CSC_OFFSET,
+ },
+};
+
+/*lint +e785*/
+u32 g_dss_module_ovl_base[DSS_MCTL_IDX_MAX][MODULE_OVL_MAX] = {
+ {DSS_OVL0_OFFSET,
+ DSS_MCTRL_CTL0_OFFSET},
+
+ {DSS_OVL1_OFFSET,
+ DSS_MCTRL_CTL1_OFFSET},
+
+ {DSS_OVL2_OFFSET,
+ DSS_MCTRL_CTL2_OFFSET},
+
+ {DSS_OVL3_OFFSET,
+ DSS_MCTRL_CTL3_OFFSET},
+
+ {0,
+ DSS_MCTRL_CTL4_OFFSET},
+
+ {0,
+ DSS_MCTRL_CTL5_OFFSET},
+};
+
+/*SCF_LUT_CHN coef_idx*/
+int g_scf_lut_chn_coef_idx[DSS_CHN_MAX_DEFINE] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
+
+u32 g_dss_module_cap[DSS_CHN_MAX_DEFINE][MODULE_CAP_MAX] = {
+ /* D2 */
+ {0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1},
+ /* D3 */
+ {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1},
+ /* V0 */
+ {0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1},
+ /* G0 */
+ {0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0},
+ /* V1 */
+ {0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1},
+ /* G1 */
+ {0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0},
+ /* D0 */
+ {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1},
+ /* D1 */
+ {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1},
+
+ /* W0 */
+ {1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1},
+ /* W1 */
+ {1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1},
+
+ /* V2 */
+ {0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1},
+ /* W2 */
+ {1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1},
+};
+
+/* number of smrx idx for each channel */
+u32 g_dss_chn_sid_num[DSS_CHN_MAX_DEFINE] = {
+ 4, 1, 4, 4, 4, 4, 1, 1, 3, 3, 3, 2
+};
+
+/* start idx of each channel */
+/* smrx_idx = g_dss_smmu_smrx_idx[chn_idx] + (0 ~ g_dss_chn_sid_num[chn_idx]) */
+u32 g_dss_smmu_smrx_idx[DSS_CHN_MAX_DEFINE] = {
+ 0, 4, 5, 9, 13, 17, 21, 22, 26, 29, 23, 32
+};
+u32 g_dss_mif_sid_map[DSS_CHN_MAX] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static int hisi_pixel_format_hal2dma(int format)
+{
+ int ret = 0;
+
+ switch(format) {
+ case HISI_FB_PIXEL_FORMAT_RGB_565:
+ case HISI_FB_PIXEL_FORMAT_BGR_565:
+ ret = DMA_PIXEL_FORMAT_RGB_565;
+ break;
+ case HISI_FB_PIXEL_FORMAT_RGBX_4444:
+ case HISI_FB_PIXEL_FORMAT_BGRX_4444:
+ ret = DMA_PIXEL_FORMAT_XRGB_4444;
+ break;
+ case HISI_FB_PIXEL_FORMAT_RGBA_4444:
+ case HISI_FB_PIXEL_FORMAT_BGRA_4444:
+ ret = DMA_PIXEL_FORMAT_ARGB_4444;
+ break;
+ case HISI_FB_PIXEL_FORMAT_RGBX_5551:
+ case HISI_FB_PIXEL_FORMAT_BGRX_5551:
+ ret = DMA_PIXEL_FORMAT_XRGB_5551;
+ break;
+ case HISI_FB_PIXEL_FORMAT_RGBA_5551:
+ case HISI_FB_PIXEL_FORMAT_BGRA_5551:
+ ret = DMA_PIXEL_FORMAT_ARGB_5551;
+ break;
+
+ case HISI_FB_PIXEL_FORMAT_RGBX_8888:
+ case HISI_FB_PIXEL_FORMAT_BGRX_8888:
+ ret = DMA_PIXEL_FORMAT_XRGB_8888;
+ break;
+ case HISI_FB_PIXEL_FORMAT_RGBA_8888:
+ case HISI_FB_PIXEL_FORMAT_BGRA_8888:
+ ret = DMA_PIXEL_FORMAT_ARGB_8888;
+ break;
+
+ case HISI_FB_PIXEL_FORMAT_YUV_422_I:
+ case HISI_FB_PIXEL_FORMAT_YUYV_422_Pkg:
+ case HISI_FB_PIXEL_FORMAT_YVYU_422_Pkg:
+ case HISI_FB_PIXEL_FORMAT_UYVY_422_Pkg:
+ case HISI_FB_PIXEL_FORMAT_VYUY_422_Pkg:
+ ret = DMA_PIXEL_FORMAT_YUYV_422_Pkg;
+ break;
+
+ case HISI_FB_PIXEL_FORMAT_YCbCr_422_P:
+ case HISI_FB_PIXEL_FORMAT_YCrCb_422_P:
+ ret = DMA_PIXEL_FORMAT_YUV_422_P_HP;
+ break;
+ case HISI_FB_PIXEL_FORMAT_YCbCr_420_P:
+ case HISI_FB_PIXEL_FORMAT_YCrCb_420_P:
+ ret = DMA_PIXEL_FORMAT_YUV_420_P_HP;
+ break;
+
+ case HISI_FB_PIXEL_FORMAT_YCbCr_422_SP:
+ case HISI_FB_PIXEL_FORMAT_YCrCb_422_SP:
+ ret = DMA_PIXEL_FORMAT_YUV_422_SP_HP;
+ break;
+ case HISI_FB_PIXEL_FORMAT_YCbCr_420_SP:
+ case HISI_FB_PIXEL_FORMAT_YCrCb_420_SP:
+ ret = DMA_PIXEL_FORMAT_YUV_420_SP_HP;
+ break;
+
+ default:
+ DRM_ERROR("not support format(%d)!\n", format);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+static int hisi_pixel_format_hal2dfc(int format)
+{
+ int ret = 0;
+
+ switch (format) {
+ case HISI_FB_PIXEL_FORMAT_RGB_565:
+ ret = DFC_PIXEL_FORMAT_RGB_565;
+ break;
+ case HISI_FB_PIXEL_FORMAT_RGBX_4444:
+ ret = DFC_PIXEL_FORMAT_XBGR_4444;
+ break;
+ case HISI_FB_PIXEL_FORMAT_RGBA_4444:
+ ret = DFC_PIXEL_FORMAT_ABGR_4444;
+ break;
+ case HISI_FB_PIXEL_FORMAT_RGBX_5551:
+ ret = DFC_PIXEL_FORMAT_XBGR_5551;
+ break;
+ case HISI_FB_PIXEL_FORMAT_RGBA_5551:
+ ret = DFC_PIXEL_FORMAT_ABGR_5551;
+ break;
+ case HISI_FB_PIXEL_FORMAT_RGBX_8888:
+ ret = DFC_PIXEL_FORMAT_XBGR_8888;
+ break;
+ case HISI_FB_PIXEL_FORMAT_RGBA_8888:
+ ret = DFC_PIXEL_FORMAT_ABGR_8888;
+ break;
+
+ case HISI_FB_PIXEL_FORMAT_BGR_565:
+ ret = DFC_PIXEL_FORMAT_BGR_565;
+ break;
+ case HISI_FB_PIXEL_FORMAT_BGRX_4444:
+ ret = DFC_PIXEL_FORMAT_XRGB_4444;
+ break;
+ case HISI_FB_PIXEL_FORMAT_BGRA_4444:
+ ret = DFC_PIXEL_FORMAT_ARGB_4444;
+ break;
+ case HISI_FB_PIXEL_FORMAT_BGRX_5551:
+ ret = DFC_PIXEL_FORMAT_XRGB_5551;
+ break;
+ case HISI_FB_PIXEL_FORMAT_BGRA_5551:
+ ret = DFC_PIXEL_FORMAT_ARGB_5551;
+ break;
+ case HISI_FB_PIXEL_FORMAT_BGRX_8888:
+ ret = DFC_PIXEL_FORMAT_XRGB_8888;
+ break;
+ case HISI_FB_PIXEL_FORMAT_BGRA_8888:
+ ret = DFC_PIXEL_FORMAT_ARGB_8888;
+ break;
+
+ case HISI_FB_PIXEL_FORMAT_YUV_422_I:
+ case HISI_FB_PIXEL_FORMAT_YUYV_422_Pkg:
+ ret = DFC_PIXEL_FORMAT_YUYV422;
+ break;
+ case HISI_FB_PIXEL_FORMAT_YVYU_422_Pkg:
+ ret = DFC_PIXEL_FORMAT_YVYU422;
+ break;
+ case HISI_FB_PIXEL_FORMAT_UYVY_422_Pkg:
+ ret = DFC_PIXEL_FORMAT_UYVY422;
+ break;
+ case HISI_FB_PIXEL_FORMAT_VYUY_422_Pkg:
+ ret = DFC_PIXEL_FORMAT_VYUY422;
+ break;
+
+ case HISI_FB_PIXEL_FORMAT_YCbCr_422_SP:
+ ret = DFC_PIXEL_FORMAT_YUYV422;
+ break;
+ case HISI_FB_PIXEL_FORMAT_YCrCb_422_SP:
+ ret = DFC_PIXEL_FORMAT_YVYU422;
+ break;
+ case HISI_FB_PIXEL_FORMAT_YCbCr_420_SP:
+ ret = DFC_PIXEL_FORMAT_YUYV422;
+ break;
+ case HISI_FB_PIXEL_FORMAT_YCrCb_420_SP:
+ ret = DFC_PIXEL_FORMAT_YVYU422;
+ break;
+
+ case HISI_FB_PIXEL_FORMAT_YCbCr_422_P:
+ case HISI_FB_PIXEL_FORMAT_YCbCr_420_P:
+ ret = DFC_PIXEL_FORMAT_YUYV422;
+ break;
+ case HISI_FB_PIXEL_FORMAT_YCrCb_422_P:
+ case HISI_FB_PIXEL_FORMAT_YCrCb_420_P:
+ ret = DFC_PIXEL_FORMAT_YVYU422;
+ break;
+
+ default:
+ DRM_ERROR("not support format(%d)!\n", format);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+static int hisi_dss_aif_ch_config(struct dss_hw_ctx *ctx, int chn_idx)
+{
+ void __iomem *aif0_ch_base;
+ int mid = 0;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return -1;
+ }
+
+ mid = mid_array[chn_idx];
+ aif0_ch_base = ctx->base + g_dss_module_base[chn_idx][MODULE_AIF0_CHN];
+
+ set_reg(aif0_ch_base, 0x0, 1, 0);
+ set_reg(aif0_ch_base, (uint32_t)mid, 4, 4);
+
+ return 0;
+}
+
+static int hisi_dss_smmu_config(struct dss_hw_ctx *ctx, int chn_idx, bool mmu_enable)
+{
+ void __iomem *smmu_base;
+ u32 idx = 0, i = 0;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return -1;
+ }
+
+ smmu_base = ctx->base + DSS_SMMU_OFFSET;
+
+ for (i = 0; i < g_dss_chn_sid_num[chn_idx]; i++) {
+ idx = g_dss_smmu_smrx_idx[chn_idx] + i;
+ if (!mmu_enable)
+ set_reg(smmu_base + SMMU_SMRx_NS + idx * 0x4, 1, 32, 0);
+ else
+ set_reg(smmu_base + SMMU_SMRx_NS + idx * 0x4, 0x70, 32, 0);
+ }
+
+ return 0;
+}
+
+static int hisi_dss_mif_config(struct dss_hw_ctx *ctx, int chn_idx, bool mmu_enable)
+{
+ void __iomem *mif_base;
+ void __iomem *mif_ch_base;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return -1;
+ }
+
+ mif_base = ctx->base + DSS_MIF_OFFSET;
+ mif_ch_base = ctx->base +
+ g_dss_module_base[chn_idx][MODULE_MIF_CHN];
+
+ if (!mmu_enable) {
+ set_reg(mif_ch_base + MIF_CTRL1, 0x1, 1, 5);
+ } else {
+ set_reg(mif_ch_base + MIF_CTRL1, 0x00080000, 32, 0);
+ }
+
+ return 0;
+}
+
+int hisi_dss_mctl_mutex_lock(struct dss_hw_ctx *ctx)
+{
+ void __iomem *mctl_base;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return -1;
+ }
+
+ mctl_base = ctx->base +
+ g_dss_module_ovl_base[DSS_OVL0][MODULE_MCTL_BASE];
+
+ set_reg(mctl_base + MCTL_CTL_MUTEX, 0x1, 1, 0);
+
+ return 0;
+}
+
+int hisi_dss_mctl_mutex_unlock(struct dss_hw_ctx *ctx)
+{
+ void __iomem *mctl_base;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return -1;
+ }
+
+ mctl_base = ctx->base +
+ g_dss_module_ovl_base[DSS_OVL0][MODULE_MCTL_BASE];
+
+ set_reg(mctl_base + MCTL_CTL_MUTEX, 0x0, 1, 0);
+
+ return 0;
+}
+
+static int hisi_dss_mctl_ov_config(struct dss_hw_ctx *ctx, int chn_idx)
+{
+ void __iomem *mctl_base;
+ u32 mctl_rch_offset = 0;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return -1;
+ }
+
+ mctl_rch_offset = (uint32_t)(MCTL_CTL_MUTEX_RCH0 + chn_idx * 0x4);
+
+ mctl_base = ctx->base +
+ g_dss_module_ovl_base[DSS_OVL0][MODULE_MCTL_BASE];
+
+ set_reg(mctl_base + MCTL_CTL_EN, 0x1, 32, 0);
+ set_reg(mctl_base + MCTL_CTL_TOP, 0x2, 32, 0); /*auto mode*/
+ set_reg(mctl_base + MCTL_CTL_DBG, 0xB13A00, 32, 0);
+
+ set_reg(mctl_base + mctl_rch_offset, 0x1, 32, 0);
+ set_reg(mctl_base + MCTL_CTL_MUTEX_ITF, 0x1, 2, 0);
+ set_reg(mctl_base + MCTL_CTL_MUTEX_DBUF, 0x1, 2, 0);
+ set_reg(mctl_base + MCTL_CTL_MUTEX_OV, 1 << DSS_OVL0, 4, 0);
+
+ return 0;
+}
+
+static int hisi_dss_mctl_sys_config(struct dss_hw_ctx *ctx, int chn_idx)
+{
+ void __iomem *mctl_sys_base;
+
+ u32 layer_idx = 0;
+ u32 mctl_rch_ov_oen_offset = 0;
+ u32 mctl_rch_flush_en_offset = 0;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return -1;
+ }
+
+ mctl_sys_base = ctx->base + DSS_MCTRL_SYS_OFFSET;
+ mctl_rch_ov_oen_offset = MCTL_RCH0_OV_OEN + chn_idx * 0x4;
+ mctl_rch_flush_en_offset = MCTL_RCH0_FLUSH_EN + chn_idx * 0x4;
+
+ set_reg(mctl_sys_base + mctl_rch_ov_oen_offset,
+ ((1 << (layer_idx + 1)) | (0x100 << DSS_OVL0)), 32, 0);
+
+ set_reg(mctl_sys_base + MCTL_RCH_OV0_SEL, 0x8, 4, 0);
+
+ set_reg(mctl_sys_base + MCTL_RCH_OV0_SEL, chn_idx, 4, (layer_idx + 1) * 4);
+
+ set_reg(mctl_sys_base + MCTL_OV0_FLUSH_EN, 0xd, 4, 0);
+ set_reg(mctl_sys_base + mctl_rch_flush_en_offset, 0x1, 32, 0);
+
+ return 0;
+}
+
+static int hisi_dss_rdma_config(struct dss_hw_ctx *ctx,
+ const dss_rect_ltrb_t *rect, u32 display_addr, u32 hal_format,
+ u32 bpp, int chn_idx, bool afbcd, bool mmu_enable)
+{
+ void __iomem *rdma_base;
+
+ u32 aligned_pixel = 0;
+ u32 rdma_oft_x0 = 0;
+ u32 rdma_oft_y0 = 0;
+ u32 rdma_oft_x1 = 0;
+ u32 rdma_oft_y1 = 0;
+ u32 rdma_stride = 0;
+ u32 rdma_bpp = 0;
+ u32 rdma_format = 0;
+ u32 stretch_size_vrt = 0;
+
+ u32 stride_align = 0;
+ u32 mm_base_0 = 0;
+ u32 mm_base_1 = 0;
+
+ u32 afbcd_header_addr = 0;
+ u32 afbcd_header_stride = 0;
+ u32 afbcd_payload_addr = 0;
+ u32 afbcd_payload_stride = 0;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return -1;
+ }
+
+ if (bpp == 4) {
+ rdma_bpp = 0x5;
+ } else if (bpp == 2) {
+ rdma_bpp = 0x0;
+ } else {
+ rdma_bpp = 0x0;
+ }
+
+ rdma_base = ctx->base +
+ g_dss_module_base[chn_idx][MODULE_DMA];
+
+ aligned_pixel = DMA_ALIGN_BYTES / bpp;
+ rdma_oft_x0 = rect->left / aligned_pixel;
+ rdma_oft_y0 = rect->top;
+ rdma_oft_x1 = rect->right / aligned_pixel;
+ rdma_oft_y1 = rect->bottom;
+
+ rdma_format = hisi_pixel_format_hal2dma(hal_format);
+ if (rdma_format < 0) {
+ DRM_ERROR("layer format(%d) not support !\n", hal_format);
+ return -EINVAL;
+ }
+
+ if (afbcd) {
+ mm_base_0 = 0;
+ mm_base_1 = mm_base_0 + rect->right * bpp * MMBUF_LINE_NUM;
+ mm_base_0 = ALIGN_UP(mm_base_0, MMBUF_ADDR_ALIGN);
+ mm_base_1 = ALIGN_UP(mm_base_1, MMBUF_ADDR_ALIGN);
+
+ if ((((rect->right - rect->left) + 1) & (AFBC_HEADER_ADDR_ALIGN - 1)) ||
+ (((rect->bottom - rect->top) + 1) & (AFBC_BLOCK_ALIGN - 1))) {
+ DRM_ERROR("img width(%d) is not %d bytes aligned, or "
+ "img heigh(%d) is not %d bytes aligned!\n",
+ ((rect->right - rect->left) + 1), AFBC_HEADER_ADDR_ALIGN,
+ ((rect->bottom - rect->top) + 1), AFBC_BLOCK_ALIGN);
+ }
+
+ if ((mm_base_0 & (MMBUF_ADDR_ALIGN - 1)) || (mm_base_1 & (MMBUF_ADDR_ALIGN - 1))) {
+ DRM_ERROR("mm_base_0(0x%x) is not %d bytes aligned, or "
+ "mm_base_1(0x%x) is not %d bytes aligned!\n",
+ mm_base_0, MMBUF_ADDR_ALIGN,
+ mm_base_1, MMBUF_ADDR_ALIGN);
+ }
+ /*header*/
+ afbcd_header_stride = (((rect->right - rect->left) + 1) / AFBC_BLOCK_ALIGN) * AFBC_HEADER_STRIDE_BLOCK;
+ afbcd_header_addr = (uint32_t)(unsigned long)display_addr;
+
+ /*payload*/
+ if (bpp == 4)
+ stride_align = AFBC_PAYLOAD_STRIDE_ALIGN_32;
+ else if (bpp == 2)
+ stride_align = AFBC_PAYLOAD_STRIDE_ALIGN_16;
+ else
+ DRM_ERROR("bpp(%d) not supported!\n", bpp);
+
+ afbcd_payload_stride = (((rect->right - rect->left) + 1) / AFBC_BLOCK_ALIGN) * stride_align;
+
+ afbcd_payload_addr = afbcd_header_addr + ALIGN_UP(16 * (((rect->right - rect->left) + 1) / 16) *
+ (((rect->bottom - rect->top) + 1) / 16), 1024);
+ afbcd_payload_addr = afbcd_payload_addr +
+ (rect->top / AFBC_BLOCK_ALIGN) * afbcd_payload_stride +
+ (rect->left / AFBC_BLOCK_ALIGN) * stride_align;
+
+ set_reg(rdma_base + CH_REG_DEFAULT, 0x1, 32, 0);
+ set_reg(rdma_base + CH_REG_DEFAULT, 0x0, 32, 0);
+ set_reg(rdma_base + DMA_OFT_X0, rdma_oft_x0, 12, 0);
+ set_reg(rdma_base + DMA_OFT_Y0, rdma_oft_y0, 16, 0);
+ set_reg(rdma_base + DMA_OFT_X1, rdma_oft_x1, 12, 0);
+ set_reg(rdma_base + DMA_OFT_Y1, rdma_oft_y1, 16, 0);
+ set_reg(rdma_base + DMA_STRETCH_SIZE_VRT, (rect->bottom - rect->top), 13, 0);
+ set_reg(rdma_base + DMA_CTRL, rdma_format, 5, 3);
+ set_reg(rdma_base + DMA_CTRL, (mmu_enable ? 0x1 : 0x0), 1, 8);
+
+ set_reg(rdma_base + AFBCD_HREG_PIC_WIDTH, (rect->right - rect->left), 16, 0);
+ set_reg(rdma_base + AFBCD_HREG_PIC_HEIGHT, (rect->bottom - rect->top), 16, 0);
+ set_reg(rdma_base + AFBCD_CTL, AFBC_HALF_BLOCK_UPPER_LOWER_ALL, 2, 6);
+ set_reg(rdma_base + AFBCD_HREG_HDR_PTR_LO, afbcd_header_addr, 32, 0);
+ set_reg(rdma_base + AFBCD_INPUT_HEADER_STRIDE, afbcd_header_stride, 14, 0);
+ set_reg(rdma_base + AFBCD_PAYLOAD_STRIDE, afbcd_payload_stride, 20, 0);
+ set_reg(rdma_base + AFBCD_MM_BASE_0, mm_base_0, 32, 0);
+ set_reg(rdma_base + AFBCD_HREG_FORMAT, 0x1, 1, 21);
+ set_reg(rdma_base + AFBCD_SCRAMBLE_MODE, 0x0, 32, 0);
+ set_reg(rdma_base + AFBCD_AFBCD_PAYLOAD_POINTER, afbcd_payload_addr, 32, 0);
+ set_reg(rdma_base + AFBCD_HEIGHT_BF_STR, (rect->bottom - rect->top), 16, 0);
+
+ set_reg(rdma_base + CH_CTL, 0xf005, 32, 0);
+ } else {
+ stretch_size_vrt = rdma_oft_y1 - rdma_oft_y0;
+ rdma_stride = ((rect->right - rect->left) + 1) * bpp / DMA_ALIGN_BYTES;
+
+ set_reg(rdma_base + CH_REG_DEFAULT, 0x1, 32, 0);
+ set_reg(rdma_base + CH_REG_DEFAULT, 0x0, 32, 0);
+
+ set_reg(rdma_base + DMA_OFT_X0, rdma_oft_x0, 12, 0);
+ set_reg(rdma_base + DMA_OFT_Y0, rdma_oft_y0, 16, 0);
+ set_reg(rdma_base + DMA_OFT_X1, rdma_oft_x1, 12, 0);
+ set_reg(rdma_base + DMA_OFT_Y1, rdma_oft_y1, 16, 0);
+ set_reg(rdma_base + DMA_CTRL, rdma_format, 5, 3);
+ set_reg(rdma_base + DMA_CTRL, (mmu_enable ? 0x1 : 0x0), 1, 8);
+ set_reg(rdma_base + DMA_STRETCH_SIZE_VRT, stretch_size_vrt, 32, 0);
+ set_reg(rdma_base + DMA_DATA_ADDR0, display_addr, 32, 0);
+ set_reg(rdma_base + DMA_STRIDE0, rdma_stride, 13, 0);
+
+ set_reg(rdma_base + CH_CTL, 0x1, 1, 0);
+ }
+
+ return 0;
+}
+
+static int hisi_dss_rdfc_config(struct dss_hw_ctx *ctx,
+ const dss_rect_ltrb_t *rect, u32 hal_format, u32 bpp, int chn_idx)
+{
+ void __iomem *rdfc_base;
+
+ u32 dfc_pix_in_num = 0;
+ u32 size_hrz = 0;
+ u32 size_vrt = 0;
+ u32 dfc_fmt = 0;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return -1;
+ }
+
+ rdfc_base = ctx->base +
+ g_dss_module_base[chn_idx][MODULE_DFC];
+
+ dfc_pix_in_num = (bpp <= 2) ? 0x1 : 0x0;
+ size_hrz = rect->right - rect->left;
+ size_vrt = rect->bottom - rect->top;
+
+ dfc_fmt = hisi_pixel_format_hal2dfc(hal_format);
+ if (dfc_fmt < 0) {
+ DRM_ERROR("layer format (%d) not support !\n", hal_format);
+ return -EINVAL;
+ }
+
+ set_reg(rdfc_base + DFC_DISP_SIZE, (size_vrt | (size_hrz << 16)), 29, 0);
+ set_reg(rdfc_base + DFC_PIX_IN_NUM, dfc_pix_in_num, 1, 0);
+ //set_reg(rdfc_base + DFC_DISP_FMT, (bpp <= 2) ? 0x0 : 0x6, 5, 1);
+ set_reg(rdfc_base + DFC_DISP_FMT, dfc_fmt, 5, 1);
+ set_reg(rdfc_base + DFC_CTL_CLIP_EN, 0x1, 1, 0);
+ set_reg(rdfc_base + DFC_ICG_MODULE, 0x1, 1, 0);
+
+ return 0;
+}
+
+int hisi_dss_ovl_base_config(struct dss_hw_ctx *ctx, u32 xres, u32 yres)
+{
+ void __iomem *mctl_sys_base;
+ void __iomem *mctl_base;
+ void __iomem *ovl0_base;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return -1;
+ }
+
+ mctl_sys_base = ctx->base + DSS_MCTRL_SYS_OFFSET;
+ mctl_base = ctx->base +
+ g_dss_module_ovl_base[DSS_OVL0][MODULE_MCTL_BASE];
+ ovl0_base = ctx->base +
+ g_dss_module_ovl_base[DSS_OVL0][MODULE_OVL_BASE];
+
+ set_reg(ovl0_base + OVL6_REG_DEFAULT, 0x1, 32, 0);
+ set_reg(ovl0_base + OVL6_REG_DEFAULT, 0x0, 32, 0);
+
+ set_reg(ovl0_base + OVL_SIZE, (xres - 1) | ((yres - 1) << 16), 32, 0);
+#ifdef CONFIG_HISI_FB_OV_BASE_USED
+ set_reg(ovl0_base + OVL_BG_COLOR, 0xFFFF0000, 32, 0);
+#else
+ set_reg(ovl0_base + OVL_BG_COLOR, 0xFF000000, 32, 0);
+#endif
+ set_reg(ovl0_base + OVL_DST_STARTPOS, 0x0, 32, 0);
+ set_reg(ovl0_base + OVL_DST_ENDPOS, (xres - 1) | ((yres - 1) << 16), 32, 0);
+ set_reg(ovl0_base + OVL_GCFG, 0x10001, 32, 0);
+
+ set_reg(mctl_base + MCTL_CTL_MUTEX_ITF, 0x1, 32, 0);
+ set_reg(mctl_base + MCTL_CTL_MUTEX_DBUF, 0x1, 2, 0);
+ set_reg(mctl_base + MCTL_CTL_MUTEX_OV, 1 << DSS_OVL0, 4, 0);
+
+ set_reg(mctl_sys_base + MCTL_RCH_OV0_SEL, 0x8, 4, 0);
+ set_reg(mctl_sys_base + MCTL_OV0_FLUSH_EN, 0xd, 4, 0);
+
+ return 0;
+}
+
+static int hisi_dss_ovl_config(struct dss_hw_ctx *ctx,
+ const dss_rect_ltrb_t *rect, u32 xres, u32 yres)
+{
+ void __iomem *ovl0_base;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return -1;
+ }
+
+ ovl0_base = ctx->base +
+ g_dss_module_ovl_base[DSS_OVL0][MODULE_OVL_BASE];
+
+ set_reg(ovl0_base + OVL6_REG_DEFAULT, 0x1, 32, 0);
+ set_reg(ovl0_base + OVL6_REG_DEFAULT, 0x0, 32, 0);
+ set_reg(ovl0_base + OVL_SIZE, (xres - 1) |
+ ((yres - 1) << 16), 32, 0);
+ set_reg(ovl0_base + OVL_BG_COLOR, 0xFF000000, 32, 0);
+ set_reg(ovl0_base + OVL_DST_STARTPOS, 0x0, 32, 0);
+ set_reg(ovl0_base + OVL_DST_ENDPOS, (xres - 1) |
+ ((yres - 1) << 16), 32, 0);
+ set_reg(ovl0_base + OVL_GCFG, 0x10001, 32, 0);
+ set_reg(ovl0_base + OVL_LAYER0_POS, (rect->left) |
+ ((rect->top) << 16), 32, 0);
+ set_reg(ovl0_base + OVL_LAYER0_SIZE, (rect->right) |
+ ((rect->bottom) << 16), 32, 0);
+ set_reg(ovl0_base + OVL_LAYER0_ALPHA, 0x00ff40ff, 32, 0);
+ set_reg(ovl0_base + OVL_LAYER0_CFG, 0x1, 1, 0);
+
+ return 0;
+}
+
+static void hisi_dss_qos_on(struct dss_hw_ctx *ctx)
+{
+ char __iomem *noc_dss_base;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+
+ noc_dss_base = ctx->noc_dss_base;
+
+ outp32(noc_dss_base + 0xc, 0x2);
+ outp32(noc_dss_base + 0x8c, 0x2);
+ outp32(noc_dss_base + 0x10c, 0x2);
+ outp32(noc_dss_base + 0x18c, 0x2);
+}
+
+static void hisi_dss_mif_on(struct dss_hw_ctx *ctx)
+{
+ char __iomem *dss_base;
+ char __iomem *mif_base;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+
+ dss_base = ctx->base;
+ mif_base = ctx->base + DSS_MIF_OFFSET;
+
+ set_reg(mif_base + MIF_ENABLE, 0x1, 1, 0);
+ set_reg(dss_base + MIF_CH0_OFFSET + MIF_CTRL0, 0x1, 1, 0);
+ set_reg(dss_base + MIF_CH1_OFFSET + MIF_CTRL0, 0x1, 1, 0);
+ set_reg(dss_base + MIF_CH2_OFFSET + MIF_CTRL0, 0x1, 1, 0);
+ set_reg(dss_base + MIF_CH3_OFFSET + MIF_CTRL0, 0x1, 1, 0);
+ set_reg(dss_base + MIF_CH4_OFFSET + MIF_CTRL0, 0x1, 1, 0);
+ set_reg(dss_base + MIF_CH5_OFFSET + MIF_CTRL0, 0x1, 1, 0);
+ set_reg(dss_base + MIF_CH6_OFFSET + MIF_CTRL0, 0x1, 1, 0);
+ set_reg(dss_base + MIF_CH7_OFFSET + MIF_CTRL0, 0x1, 1, 0);
+ set_reg(dss_base + MIF_CH8_OFFSET + MIF_CTRL0, 0x1, 1, 0);
+ set_reg(dss_base + MIF_CH9_OFFSET + MIF_CTRL0, 0x1, 1, 0);
+
+ set_reg(dss_base + MIF_CH10_OFFSET + MIF_CTRL0, 0x1, 1, 0);
+ set_reg(dss_base + MIF_CH11_OFFSET + MIF_CTRL0, 0x1, 1, 0);
+}
+
+void hisi_dss_smmu_on(struct dss_hw_ctx *ctx)
+{
+ void __iomem *smmu_base;
+ struct iommu_domain_data *domain_data = NULL;
+ uint32_t phy_pgd_base = 0;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+
+ smmu_base = ctx->base + DSS_SMMU_OFFSET;
+
+ set_reg(smmu_base + SMMU_SCR, 0x0, 1, 0); /*global bypass cancel*/
+ set_reg(smmu_base + SMMU_SCR, 0x1, 8, 20); /*ptw_mid*/
+ set_reg(smmu_base + SMMU_SCR, 0xf, 4, 16); /*pwt_pf*/
+ set_reg(smmu_base + SMMU_SCR, 0x7, 3, 3); /*interrupt cachel1 cach3l2 en*/
+ set_reg(smmu_base + SMMU_LP_CTRL, 0x1, 1, 0); /*auto_clk_gt_en*/
+
+ /*Long Descriptor*/
+ set_reg(smmu_base + SMMU_CB_TTBCR, 0x1, 1, 0);
+
+ set_reg(smmu_base + SMMU_ERR_RDADDR, 0x7FF00000, 32, 0);
+ set_reg(smmu_base + SMMU_ERR_WRADDR, 0x7FFF0000, 32, 0);
+
+ /*disable cmdlist, dbg, reload*/
+ set_reg(smmu_base + SMMU_RLD_EN0_NS, DSS_SMMU_RLD_EN0_DEFAULT_VAL, 32, 0);
+ set_reg(smmu_base + SMMU_RLD_EN1_NS, DSS_SMMU_RLD_EN1_DEFAULT_VAL, 32, 0);
+
+ /*cmdlist stream bypass*/
+ set_reg(smmu_base + SMMU_SMRx_NS + 36 * 0x4, 0x1, 32, 0); /*debug stream id*/
+ set_reg(smmu_base + SMMU_SMRx_NS + 37 * 0x4, 0x1, 32, 0); /*cmd unsec stream id*/
+ set_reg(smmu_base + SMMU_SMRx_NS + 38 * 0x4, 0x1, 32, 0); /*cmd sec stream id*/
+
+ /*TTBR0*/
+ domain_data = (struct iommu_domain_data *)(ctx->mmu_domain->priv);
+ phy_pgd_base = (uint32_t)(domain_data->phy_pgd_base);
+ set_reg(smmu_base + SMMU_CB_TTBR0, phy_pgd_base, 32, 0);
+}
+
+void hisifb_dss_on(struct dss_hw_ctx *ctx)
+{
+ /* dss qos on*/
+ hisi_dss_qos_on(ctx);
+ /* mif on*/
+ hisi_dss_mif_on(ctx);
+ /* smmu on*/
+ hisi_dss_smmu_on(ctx);
+}
+
+void hisi_dss_mctl_on(struct dss_hw_ctx *ctx)
+{
+ char __iomem *mctl_base = NULL;
+ char __iomem *mctl_sys_base = NULL;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+ mctl_base = ctx->base +
+ g_dss_module_ovl_base[DSS_MCTL0][MODULE_MCTL_BASE];
+ mctl_sys_base = ctx->base + DSS_MCTRL_SYS_OFFSET;
+
+ set_reg(mctl_base + MCTL_CTL_EN, 0x1, 32, 0);
+ set_reg(mctl_base + MCTL_CTL_MUTEX_ITF, 0x1, 32, 0);
+ set_reg(mctl_base + MCTL_CTL_DBG, 0xB13A00, 32, 0);
+ set_reg(mctl_base + MCTL_CTL_TOP, 0x2, 32, 0);
+}
+
+void hisi_dss_unflow_handler(struct dss_hw_ctx *ctx, bool unmask)
+{
+ void __iomem *dss_base;
+ u32 tmp = 0;
+
+ if (!ctx) {
+ DRM_ERROR("ctx is NULL!\n");
+ return;
+ }
+
+ dss_base = ctx->base;
+
+ tmp = inp32(dss_base + DSS_LDI0_OFFSET + LDI_CPU_ITF_INT_MSK);
+ if (unmask)
+ tmp &= ~BIT_LDI_UNFLOW;
+ else
+ tmp |= BIT_LDI_UNFLOW;
+
+ outp32(dss_base + DSS_LDI0_OFFSET + LDI_CPU_ITF_INT_MSK, tmp);
+}
+
+static int hisi_dss_wait_for_complete(struct dss_hw_ctx *ctx)
+{
+ int ret = 0;
+ u32 times = 0;
+ u32 prev_vactive0_end = 0;
+
+ prev_vactive0_end = ctx->vactive0_end_flag;
+
+REDO:
+ ret = wait_event_interruptible_timeout(ctx->vactive0_end_wq,
+ (prev_vactive0_end != ctx->vactive0_end_flag),
+ msecs_to_jiffies(300));
+ if (ret == -ERESTARTSYS) {
+ if (times < 50) {
+ times++;
+ mdelay(10);
+ goto REDO;
+ }
+ }
+
+ if (ret <= 0) {
+ DRM_ERROR("wait_for vactive0_end_flag timeout! ret=%d.\n", ret);
+
+ ret = -ETIMEDOUT;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+void hisi_fb_pan_display(struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+
+ struct dss_plane *aplane = to_dss_plane(plane);
+ struct dss_crtc *acrtc = aplane->acrtc;
+ struct dss_hw_ctx *ctx = acrtc->ctx;
+
+ struct kirin_drm_private *priv = plane->dev->dev_private;
+ struct kirin_fbdev *fbdev = to_kirin_fbdev(priv->fbdev);
+
+ bool afbcd = false;
+ bool mmu_enable = true;
+ dss_rect_ltrb_t rect;
+ u32 bpp;
+ u32 stride;
+ u32 display_addr = 0;
+ u32 hal_fmt;
+ int chn_idx = DSS_RCHN_D2;
+
+ int crtc_x = state->crtc_x;
+ int crtc_y = state->crtc_y;
+ unsigned int crtc_w = state->crtc_w;
+ unsigned int crtc_h = state->crtc_h;
+ u32 src_x = state->src_x >> 16;
+ u32 src_y = state->src_y >> 16;
+ u32 src_w = state->src_w >> 16;
+ u32 src_h = state->src_h >> 16;
+
+ u32 hfp, hbp, hsw, vfp, vbp, vsw;
+
+ mode = &acrtc->base.state->mode;
+ adj_mode = &acrtc->base.state->adjusted_mode;
+
+ bpp = fb->bits_per_pixel / 8;
+ stride = fb->pitches[0];
+
+ if (fbdev)
+ display_addr = (u32)fbdev->smem_start + src_y * stride;
+ else
+ printk("JDB: fbdev is null?\n");
+
+ rect.left = 0;
+ rect.right = src_w - 1;
+ rect.top = 0;
+ rect.bottom = src_h - 1;
+ hal_fmt = dss_get_format(fb->pixel_format);
+
+ DRM_DEBUG("channel%d: src:(%d,%d, %dx%d) crtc:(%d,%d, %dx%d), rect(%d,%d,%d,%d),"
+ "fb:%dx%d, pixel_format=%d, stride=%d, paddr=0x%x, bpp=%d, bits_per_pixel=%d.\n",
+ chn_idx, src_x, src_y, src_w, src_h,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ rect.left, rect.top, rect.right, rect.bottom,
+ fb->width, fb->height, hal_fmt,
+ stride, display_addr, bpp, fb->bits_per_pixel);
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ hsw = mode->hsync_end - mode->hsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+
+ hisi_dss_mctl_mutex_lock(ctx);
+ hisi_dss_aif_ch_config(ctx, chn_idx);
+ hisi_dss_mif_config(ctx, chn_idx, mmu_enable);
+ hisi_dss_smmu_config(ctx, chn_idx, mmu_enable);
+
+ hisi_dss_rdma_config(ctx, &rect, display_addr, hal_fmt, bpp, chn_idx, afbcd, mmu_enable);
+ hisi_dss_rdfc_config(ctx, &rect, hal_fmt, bpp, chn_idx);
+ hisi_dss_ovl_config(ctx, &rect, mode->hdisplay, mode->vdisplay);
+
+ hisi_dss_mctl_ov_config(ctx, chn_idx);
+ hisi_dss_mctl_sys_config(ctx, chn_idx);
+ hisi_dss_mctl_mutex_unlock(ctx);
+ hisi_dss_unflow_handler(ctx, true);
+
+ enable_ldi(acrtc);
+ hisi_dss_wait_for_complete(ctx);
+}
+
+void hisi_dss_online_play(struct drm_plane *plane, drm_dss_layer_t *layer)
+{
+ struct drm_plane_state *state = plane->state;
+ struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+
+ struct dss_plane *aplane = to_dss_plane(plane);
+ struct dss_crtc *acrtc = aplane->acrtc;
+ struct dss_hw_ctx *ctx = acrtc->ctx;
+
+ bool afbcd = false;
+ bool mmu_enable = true;
+ dss_rect_ltrb_t rect;
+ u32 bpp;
+ u32 stride;
+ u32 display_addr;
+
+ int chn_idx = DSS_RCHN_D2;
+ u32 hal_fmt = 0;
+ u32 src_w = state->src_w >> 16;
+ u32 src_h = state->src_h >> 16;
+
+ u32 hfp, hbp, hsw, vfp, vbp, vsw;
+
+ mode = &acrtc->base.state->mode;
+ adj_mode = &acrtc->base.state->adjusted_mode;
+
+ bpp = layer->img.bpp;
+ stride = layer->img.stride;
+ display_addr = layer->img.vir_addr;
+ hal_fmt = layer->img.format;
+
+ rect.left = 0;
+ rect.right = src_w - 1;
+ rect.top = 0;
+ rect.bottom = src_h - 1;
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ hsw = mode->hsync_end - mode->hsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+
+ hisi_dss_mctl_mutex_lock(ctx);
+ hisi_dss_aif_ch_config(ctx, chn_idx);
+ hisi_dss_mif_config(ctx, chn_idx, mmu_enable);
+ hisi_dss_smmu_config(ctx, chn_idx, mmu_enable);
+
+ hisi_dss_rdma_config(ctx, &rect, display_addr, hal_fmt, bpp, chn_idx, afbcd, mmu_enable);
+ hisi_dss_rdfc_config(ctx, &rect, hal_fmt, bpp, chn_idx);
+ hisi_dss_ovl_config(ctx, &rect, mode->hdisplay, mode->vdisplay);
+
+ hisi_dss_mctl_ov_config(ctx, chn_idx);
+ hisi_dss_mctl_sys_config(ctx, chn_idx);
+ hisi_dss_mctl_mutex_unlock(ctx);
+ hisi_dss_unflow_handler(ctx, true);
+
+ enable_ldi(acrtc);
+ hisi_dss_wait_for_complete(ctx);
+}
diff --git a/drivers/gpu/drm/hisilicon/kirin960/kirin_fb.c b/drivers/gpu/drm/hisilicon/kirin960/kirin_fb.c
new file mode 100644
index 000000000000..834c9a381a4a
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/kirin_fb.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <drm/drmP.h>
+
+#include "kirin_drm_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct kirin_framebuffer {
+ struct drm_framebuffer base;
+};
+#define to_kirin_framebuffer(x) container_of(x, struct kirin_framebuffer, base)
+
+
+static int kirin_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ //struct kirin_framebuffer *kirin_fb = to_kirin_framebuffer(fb);
+ return 0;
+}
+
+static void kirin_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct kirin_framebuffer *kirin_fb = to_kirin_framebuffer(fb);
+
+ DRM_DEBUG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ kfree(kirin_fb);
+}
+
+static int kirin_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned flags, unsigned color,
+ struct drm_clip_rect *clips, unsigned num_clips)
+{
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs kirin_framebuffer_funcs = {
+ .create_handle = kirin_framebuffer_create_handle,
+ .destroy = kirin_framebuffer_destroy,
+ .dirty = kirin_framebuffer_dirty,
+};
+
+struct drm_framebuffer *kirin_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct kirin_framebuffer *kirin_fb = NULL;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ kirin_fb = kzalloc(sizeof(*kirin_fb), GFP_KERNEL);
+ if (!kirin_fb) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ fb = &kirin_fb->base;
+
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+ ret = drm_framebuffer_init(dev, fb, &kirin_framebuffer_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ goto fail;
+ }
+
+ DRM_DEBUG("create: FB ID: %d (%p)", fb->base.id, fb);
+
+ return fb;
+
+fail:
+ kfree(kirin_fb);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/hisilicon/kirin960/kirin_fbdev.c b/drivers/gpu/drm/hisilicon/kirin960/kirin_fbdev.c
new file mode 100644
index 000000000000..fe8db4d40f63
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/kirin_fbdev.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <drm/drmP.h>
+#include <drm_crtc_helper.h>
+
+#include <linux/ion.h>
+#include <linux/hisi/hisi_ion.h>
+
+#include "kirin_drm_drv.h"
+#include "kirin_dpe_reg.h"
+#include "kirin_drm_dpe_utils.h"
+
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+
+//#define CONFIG_HISI_FB_HEAP_CARVEOUT_USED
+
+#define FBDEV_BUFFER_NUM 3
+struct fb_dmabuf_export
+{
+ __u32 fd;
+ __u32 flags;
+};
+#define FBIOGET_DMABUF _IOR('F', 0x21, struct fb_dmabuf_export)
+
+#define HISIFB_IOCTL_MAGIC 'M'
+#define HISI_DRM_ONLINE_PLAY _IOW(HISIFB_IOCTL_MAGIC, 0x21, struct drm_dss_layer)
+
+/*
+ * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+ */
+
+#define HISI_FB_ION_CLIENT_NAME "hisi_fb_ion"
+
+unsigned long kirin_alloc_fb_buffer(struct kirin_fbdev *fbdev, int size)
+{
+ struct ion_client *client = NULL;
+ struct ion_handle *handle = NULL;
+ size_t buf_len = 0;
+ unsigned long buf_addr = 0;
+ int shared_fd = -1;
+
+ if (NULL == fbdev) {
+ DRM_ERROR("fbdev is NULL!\n");
+ return -EINVAL;
+ }
+
+ client = fbdev->ion_client;
+ handle = fbdev->ion_handle;
+
+ buf_len = size;
+
+ client = hisi_ion_client_create(HISI_FB_ION_CLIENT_NAME);
+ if (!client) {
+ DRM_ERROR("failed to create ion client!\n");
+ return -ENOMEM;
+ }
+ memset(&fbdev->iommu_format, 0, sizeof(struct iommu_map_format));
+
+#ifdef CONFIG_HISI_FB_HEAP_CARVEOUT_USED
+ handle = ion_alloc(client, buf_len, PAGE_SIZE, ION_HEAP(ION_GRALLOC_HEAP_ID), 0);
+#else
+ handle = ion_alloc(client, buf_len, PAGE_SIZE, ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
+#endif
+ if (!handle) {
+ DRM_ERROR("failed to ion_alloc!\n");
+ goto err_return;
+ }
+
+ fbdev->screen_base = ion_map_kernel(client, handle);
+ if (!fbdev->screen_base) {
+ DRM_ERROR("failed to ion_map_kernel!\n");
+ goto err_ion_map;
+ }
+
+#ifdef CONFIG_HISI_FB_HEAP_CARVEOUT_USED
+ if (ion_phys(client, handle, &buf_addr, &buf_len) < 0) {
+ DRM_ERROR("failed to get ion phys!\n");
+ goto err_ion_get_addr;
+ }
+#else
+ if (ion_map_iommu(client, handle, &(fbdev->iommu_format))) {
+ DRM_ERROR("failed to ion_map_iommu!\n");
+ goto err_ion_get_addr;
+ }
+
+ buf_addr = fbdev->iommu_format.iova_start;
+#endif
+
+ fbdev->shared_fd = shared_fd;
+ fbdev->smem_start = buf_addr;
+ fbdev->screen_size = buf_len;
+ memset(fbdev->screen_base, 0x0, fbdev->screen_size);
+
+ fbdev->ion_client = client;
+ fbdev->ion_handle = handle;
+
+ return buf_addr;
+
+err_ion_get_addr:
+ ion_unmap_kernel(client, handle);
+err_ion_map:
+ ion_free(client, handle);
+err_return:
+ return 0;
+}
+
+static int kirin_fbdev_mmap(struct fb_info *info, struct vm_area_struct * vma)
+{
+ struct sg_table *table = NULL;
+ struct scatterlist *sg = NULL;
+ struct page *page = NULL;
+ unsigned long remainder = 0;
+ unsigned long len = 0;
+ unsigned long addr = 0;
+ unsigned long offset = 0;
+ unsigned long size = 0;
+ int i = 0;
+ int ret = 0;
+
+ struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
+ struct kirin_fbdev *fbdev = to_kirin_fbdev(helper);
+
+ if (NULL == info) {
+ DRM_ERROR("info is NULL!\n");
+ return -EINVAL;
+ }
+
+ if (NULL == fbdev) {
+ DRM_ERROR("fbdev is NULL!\n");
+ return -EINVAL;
+ }
+
+ table = ion_sg_table(fbdev->ion_client, fbdev->ion_handle);
+ if ((table == NULL) || (vma == NULL)) {
+ DRM_ERROR("table or vma is NULL!\n");
+ return -EFAULT;
+ }
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ addr = vma->vm_start;
+ offset = vma->vm_pgoff * PAGE_SIZE;
+ size = vma->vm_end - vma->vm_start;
+
+ if (size > info->fix.smem_len) {
+ DRM_ERROR("size=%lu is out of range(%u)!\n", size, info->fix.smem_len);
+ return -EFAULT;
+ }
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ page = sg_page(sg);
+ remainder = vma->vm_end - addr;
+ len = sg->length;
+
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len = sg->length - offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ if (ret != 0) {
+ DRM_ERROR("failed to remap_pfn_range! ret=%d\n", ret);
+ }
+
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+
+ return 0;
+}
+
+static int kirin_dmabuf_export(struct fb_info *info, void __user *argp)
+{
+ int ret;
+ struct drm_fb_helper *helper;
+ struct kirin_fbdev *fbdev;
+ struct fb_dmabuf_export dmabuf_export;
+
+ helper = (struct drm_fb_helper *)info->par;
+ fbdev = to_kirin_fbdev(helper);
+
+ ret = copy_from_user(&dmabuf_export, argp, sizeof(struct fb_dmabuf_export));
+ if (ret) {
+ DRM_ERROR("copy for user failed!ret=%d.\n", ret);
+ ret = -EINVAL;
+ } else {
+ dmabuf_export.flags = 0;
+ dmabuf_export.fd = ion_share_dma_buf_fd(fbdev->ion_client, fbdev->ion_handle);
+ if (dmabuf_export.fd < 0) {
+ DRM_ERROR("failed to ion_share!\n");
+ }
+
+ ret = copy_to_user(argp, &dmabuf_export, sizeof(struct fb_dmabuf_export));
+ if (ret) {
+ DRM_ERROR("copy to user failed!ret=%d.", ret);
+ ret = -EFAULT;
+ }
+ }
+
+ return ret;
+}
+
+static int kirin_dss_online_compose(struct fb_info *info, void __user *argp)
+{
+ int ret;
+ struct drm_fb_helper *helper;
+ struct kirin_drm_private *priv;
+ struct drm_plane *plane;
+
+ struct drm_dss_layer layer;
+
+ helper = (struct drm_fb_helper *)info->par;
+ priv = helper->dev->dev_private;
+ plane =priv->crtc[0]->primary;
+
+ ret = copy_from_user(&layer, argp, sizeof(struct drm_dss_layer));
+ if (ret) {
+ DRM_ERROR("copy for user failed!ret=%d.\n", ret);
+ return -EINVAL;
+ }
+
+ hisi_dss_online_play(plane, &layer);
+
+ return ret;
+}
+
+static int kirin_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOSYS;
+ void __user *argp = (void __user *)arg;
+
+ if (NULL == info) {
+ DRM_ERROR("info is NULL!\n");
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case FBIOGET_DMABUF:
+ ret = kirin_dmabuf_export(info, argp);
+ break;
+ case HISI_DRM_ONLINE_PLAY:
+ ret = kirin_dss_online_compose(info, argp);
+ break;
+ case FBIO_WAITFORVSYNC:
+ ret = 0;
+ default:
+ break;
+ }
+
+ if (ret == -ENOSYS)
+ DRM_ERROR("unsupported ioctl (%x)\n", cmd);
+
+ return ret;
+}
+
+
+static struct fb_ops kirin_fb_ops = {
+ .owner = THIS_MODULE,
+
+ /* Note: to properly handle manual update displays, we wrap the
+ * basic fbdev ops which write to the framebuffer
+ */
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_mmap = kirin_fbdev_mmap,
+
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+
+ .fb_ioctl = kirin_fb_ioctl,
+ .fb_compat_ioctl = kirin_fb_ioctl,
+};
+
+static int kirin_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct kirin_fbdev *fbdev = to_kirin_fbdev(helper);
+ struct drm_device *dev = helper->dev;
+ struct drm_framebuffer *fb = NULL;
+ struct fb_info *fbi = NULL;
+ struct drm_mode_fb_cmd2 mode_cmd = {0};
+ int ret, size;
+ unsigned int bytes_per_pixel;
+
+ DRM_DEBUG("create fbdev: %dx%d@%d (%dx%d)\n", sizes->surface_width,
+ sizes->surface_height, sizes->surface_bpp,
+ sizes->fb_width, sizes->fb_height);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height * FBDEV_BUFFER_NUM;
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+ mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+ //mode_cmd.pitches[0] = align_pitch(mode_cmd.width, sizes->surface_bpp);
+
+ /* allocate backing bo */
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ DRM_DEBUG("allocating %d bytes for fb %d", size, dev->primary->index);
+
+ fb = kirin_framebuffer_init(dev, &mode_cmd);
+ if (IS_ERR(fb)) {
+ dev_err(dev->dev, "failed to allocate fb\n");
+ /* note: if fb creation failed, we can't rely on fb destroy
+ * to unref the bo:
+ */
+ ret = PTR_ERR(fb);
+ goto fail;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ fbdev->ion_client = NULL;
+ fbdev->ion_handle = NULL;
+ fbdev->screen_base = NULL;
+ fbdev->smem_start = 0;
+ fbdev->screen_size = 0;
+ memset(&fbdev->iommu_format, 0, sizeof(struct iommu_map_format));
+
+ kirin_alloc_fb_buffer(fbdev, size);
+
+ fbi = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(fbi)) {
+ dev_err(dev->dev, "failed to allocate fb info\n");
+ ret = PTR_ERR(fbi);
+ goto fail_unlock;
+ }
+
+ DRM_DEBUG("fbi=%p, dev=%p", fbi, dev);
+
+ fbdev->fb = fb;
+ helper->fb = fb;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_DEFAULT;
+ fbi->fbops = &kirin_fb_ops;
+
+ strcpy(fbi->fix.id, "dss");
+
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+
+ dev->mode_config.fb_base = fbdev->smem_start;
+ fbi->screen_base = fbdev->screen_base;
+ fbi->screen_size = fbdev->screen_size;
+ fbi->fix.smem_start = fbdev->smem_start;
+ fbi->fix.smem_len = fbdev->screen_size;
+
+ DRM_DEBUG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+ DRM_DEBUG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+fail:
+ if (ret) {
+ if (fb) {
+ drm_framebuffer_unregister_private(fb);
+ drm_framebuffer_remove(fb);
+ }
+ }
+ return ret;
+}
+
+static const struct drm_fb_helper_funcs kirin_fb_helper_funcs = {
+ .fb_probe = kirin_fbdev_create,
+};
+
+/* initialize fbdev helper */
+struct drm_fb_helper *kirin_drm_fbdev_init(struct drm_device *dev)
+{
+ struct kirin_drm_private *priv = dev->dev_private;
+ struct kirin_fbdev *fbdev = NULL;
+ struct drm_fb_helper *helper;
+ int ret;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ goto fail;
+
+ priv->fb_helper = helper = &fbdev->fb_helper;
+
+ drm_fb_helper_prepare(dev, helper, &kirin_fb_helper_funcs);
+
+ DRM_INFO("num_crtc=%d, num_connector=%d.\n",
+ dev->mode_config.num_crtc, dev->mode_config.num_connector);
+
+ ret = drm_fb_helper_init(dev, helper,
+ dev->mode_config.num_crtc, dev->mode_config.num_connector);
+ if (ret) {
+ dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ goto fail;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(helper);
+ if (ret)
+ goto fini;
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(helper, 32);
+ if (ret)
+ goto fini;
+
+ priv->fbdev = helper;
+
+ return helper;
+
+fini:
+ drm_fb_helper_fini(helper);
+fail:
+ kfree(fbdev);
+ return NULL;
+}
+
+void kirin_drm_fbdev_fini(struct drm_device *dev)
+{
+ struct kirin_drm_private *priv = dev->dev_private;
+ struct drm_fb_helper *helper = priv->fbdev;
+ struct kirin_fbdev *fbdev;
+
+ drm_fb_helper_unregister_fbi(helper);
+ drm_fb_helper_release_fbi(helper);
+
+ drm_fb_helper_fini(helper);
+
+ fbdev = to_kirin_fbdev(priv->fbdev);
+
+ /* this will free the backing object */
+ if (fbdev->fb) {
+ drm_framebuffer_unregister_private(fbdev->fb);
+ drm_framebuffer_remove(fbdev->fb);
+ }
+
+ kfree(fbdev);
+
+ priv->fbdev = NULL;
+}
diff --git a/drivers/gpu/drm/hisilicon/kirin960/panel/panel-hikey960-nte300nts.c b/drivers/gpu/drm/hisilicon/kirin960/panel/panel-hikey960-nte300nts.c
new file mode 100644
index 000000000000..51a99b305d07
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin960/panel/panel-hikey960-nte300nts.c
@@ -0,0 +1,402 @@
+/*
+ * HiKey LCD panel driver
+ * TODO: Add backlight adjustment support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/gpio/consumer.h>
+#include <video/mipi_display.h>
+
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#define REGFLAG_DELAY 0XFFE
+
+struct hikey_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *dsi;
+
+ bool prepared;
+ bool enabled;
+
+ struct gpio_desc *gpio_pwr_en;
+ struct gpio_desc *gpio_bl_en;
+ struct gpio_desc *gpio_pwm;
+
+ struct regulator *vdd;
+};
+
+struct dsi_panel_cmd {
+ u32 cmd; /* cmd: DCS command */
+ u32 len; /* command payload length */
+ u8 data[64]; /* buffer containing the command payload */
+};
+
+static struct dsi_panel_cmd nte300nts_init_cmds[] = {
+ {0x01, 0, {0x00} },
+ {REGFLAG_DELAY, 5, {} },
+
+ {0xB0, 1, {0x00} },
+ {REGFLAG_DELAY, 2, {} },
+
+ {0xD6, 1, {0x01} },
+ {REGFLAG_DELAY, 2, {} },
+
+ {0xB3, 5, {0x14, 0x08, 0x00, 0x22, 0x00} },
+ {REGFLAG_DELAY, 2, {} },
+
+ {0xB4, 1, {0x0C} },
+ {REGFLAG_DELAY, 2, {} },
+
+ {0xB6, 2, {0x3A, 0xC3} },
+ {REGFLAG_DELAY, 2, {} },
+
+ {0x2A, 4, {0x00, 0x00, 0X04, 0XAF} },
+ {REGFLAG_DELAY, 2, {} },
+
+ {0x2B, 4, {0x00, 0x00, 0X07, 0X7F} },
+ {REGFLAG_DELAY, 2, {} },
+
+ {0x51, 1, {0xA6} },
+ {REGFLAG_DELAY, 2, {} },
+
+ {0x53, 1, {0x2C} },
+ {REGFLAG_DELAY, 2, {} },
+
+ {0x3A, 1, {0x66} },
+ {REGFLAG_DELAY, 2, {} },
+
+ {0x29, 0, {0x00} },
+ {REGFLAG_DELAY, 20, {} },
+
+ {0x11, 0, {0x00} },
+ {REGFLAG_DELAY, 150, {} },
+};
+
+static struct dsi_panel_cmd nte300nts_off_cmds[] = {
+ {0x28, 0, {0x00} },
+ {REGFLAG_DELAY, 20, {} },
+
+ {0x10, 0, {0x00} },
+ {REGFLAG_DELAY, 80, {} },
+};
+
+static int hikey_panel_write_cmds(struct mipi_dsi_device *dsi,
+ struct dsi_panel_cmd *cmds,
+ u32 count)
+{
+ struct dsi_panel_cmd *cmd;
+ int ret = 0;
+ u32 i;
+
+ for (i = 0; i < count; i++) {
+ cmd = &cmds[i];
+ switch (cmd->cmd) {
+ case REGFLAG_DELAY:
+ msleep(cmd->len);
+ break;
+ default:
+ ret = mipi_dsi_dcs_write(dsi, cmd->cmd, cmd->data,
+ cmd->len);
+ }
+ }
+
+ return ret;
+}
+
+static inline struct hikey_panel *to_hikey_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct hikey_panel, base);
+}
+
+static int hikey_panel_unprepare(struct drm_panel *p)
+{
+ struct hikey_panel *panel = to_hikey_panel(p);
+
+ if (!panel->prepared)
+ return 0;
+
+ gpiod_set_value(panel->gpio_bl_en, 0);
+ gpiod_set_value(panel->gpio_pwm, 0);
+
+ panel->prepared = false;
+
+ return 0;
+}
+
+static int hikey_panel_prepare(struct drm_panel *p)
+{
+ struct hikey_panel *panel = to_hikey_panel(p);
+ int ret;
+
+ if (panel->prepared)
+ return 0;
+
+ /*
+ * A minimum delay of 250ms is required after power-up until commands
+ * can be sent
+ */
+ msleep(250);
+
+ /* init the panel */
+ ret = hikey_panel_write_cmds(panel->dsi, nte300nts_init_cmds,
+ ARRAY_SIZE(nte300nts_init_cmds));
+ if (ret < 0)
+ return ret;
+
+ panel->prepared = true;
+
+ return 0;
+}
+
+static int hikey_panel_disable(struct drm_panel *p)
+{
+ struct hikey_panel *panel = to_hikey_panel(p);
+ int ret;
+
+ if (!panel->enabled)
+ return 0;
+
+ ret = hikey_panel_write_cmds(panel->dsi, nte300nts_off_cmds,
+ ARRAY_SIZE(nte300nts_off_cmds));
+ if (ret < 0)
+ return ret;
+
+ panel->enabled = false;
+
+ return 0;
+}
+
+static int hikey_panel_enable(struct drm_panel *p)
+{
+ struct hikey_panel *panel = to_hikey_panel(p);
+
+ if (panel->enabled)
+ return 0;
+
+ msleep(200);
+ gpiod_set_value(panel->gpio_bl_en, 1);
+ gpiod_set_value(panel->gpio_pwm, 1);
+
+ panel->enabled = true;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 144000,
+
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 200,
+ .hsync_end = 1200 + 200 + 12,
+ .htotal = 1200 + 12 + 60 + 200,
+
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 8,
+ .vsync_end = 1920 + 8 + 2,
+ .vtotal = 1920 + 2 + 8 + 8,
+};
+
+static int hikey_panel_get_modes(struct drm_panel *panel)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_ERROR("failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(panel->connector, mode);
+
+ panel->connector->display_info.width_mm = 94;
+ panel->connector->display_info.height_mm = 151;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs hikey_panel_funcs = {
+ .get_modes = hikey_panel_get_modes,
+ .enable = hikey_panel_enable,
+ .disable = hikey_panel_disable,
+ .prepare = hikey_panel_prepare,
+ .unprepare = hikey_panel_unprepare,
+};
+
+static int hikey_panel_add(struct hikey_panel *panel)
+{
+ struct device *dev = &panel->dsi->dev;
+ int ret;
+
+ drm_panel_init(&panel->base);
+ panel->base.funcs = &hikey_panel_funcs;
+ panel->base.dev = dev;
+
+ ret = drm_panel_add(&panel->base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void hikey_panel_del(struct hikey_panel *panel)
+{
+ if (panel->base.dev)
+ drm_panel_remove(&panel->base);
+}
+
+static int hikey_panel_parse_dt(struct hikey_panel *panel)
+{
+ struct device *dev = &panel->dsi->dev;
+ int ret = 0;
+
+ panel->gpio_pwr_en =
+ devm_gpiod_get_optional(dev, "pwr-en", GPIOD_OUT_HIGH);
+ if (IS_ERR(panel->gpio_pwr_en))
+ return PTR_ERR(panel->gpio_pwr_en);
+
+ panel->gpio_bl_en =
+ devm_gpiod_get_optional(dev, "bl-en", GPIOD_OUT_LOW);
+ if (IS_ERR(panel->gpio_bl_en))
+ return PTR_ERR(panel->gpio_bl_en);
+
+ panel->gpio_pwm =
+ devm_gpiod_get_optional(dev, "pwm", GPIOD_OUT_LOW);
+ if (IS_ERR(panel->gpio_pwm))
+ return PTR_ERR(panel->gpio_pwm);
+
+ panel->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(panel->vdd)) {
+ ret = PTR_ERR(panel->vdd);
+ return ret;
+ }
+
+ ret = regulator_set_voltage(panel->vdd, 1800000, 1800000);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(panel->vdd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hikey_panel_attach_dsi(struct mipi_dsi_device *dsi)
+{
+ int ret;
+
+ dsi->phy_clock = 864000; /* in kHz */
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_HSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ DRM_ERROR("failed to attach dsi to host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hikey_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct hikey_panel *panel;
+ int ret;
+
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ panel->dsi = dsi;
+ ret = hikey_panel_parse_dt(panel);
+ if (ret)
+ return ret;
+
+ ret = hikey_panel_add(panel);
+ if (ret)
+ return ret;
+
+ ret = hikey_panel_attach_dsi(dsi);
+ if (ret) {
+ hikey_panel_del(panel);
+ return ret;
+ }
+
+ mipi_dsi_set_drvdata(dsi, panel);
+
+ return 0;
+}
+
+static int hikey_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct hikey_panel *panel = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = hikey_panel_disable(&panel->base);
+ if (ret < 0)
+ DRM_ERROR("failed to disable panel: %d\n", ret);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_ERROR("failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_detach(&panel->base);
+ hikey_panel_del(panel);
+
+ return 0;
+}
+
+static void hikey_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct hikey_panel *panel = mipi_dsi_get_drvdata(dsi);
+
+ hikey_panel_disable(&panel->base);
+}
+
+static const struct of_device_id panel_of_match[] = {
+ { .compatible = "hisilicon,mipi-hikey", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, panel_of_match);
+
+static struct mipi_dsi_driver hikey_panel_driver = {
+ .driver = {
+ .name = "hikey-lcd-panel",
+ .of_match_table = panel_of_match,
+ },
+ .probe = hikey_panel_probe,
+ .remove = hikey_panel_remove,
+ .shutdown = hikey_panel_shutdown,
+};
+module_mipi_dsi_driver(hikey_panel_driver);
+
+MODULE_DESCRIPTION("NTE300NTS (1920x1200) video mode panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hisi/Kconfig b/drivers/hisi/Kconfig
new file mode 100644
index 000000000000..b75cccc5a038
--- /dev/null
+++ b/drivers/hisi/Kconfig
@@ -0,0 +1,17 @@
+menu "Hisilicon platform"
+
+menuconfig HISILICON_PLATFORM
+ bool "Hisilicon platform support"
+ default n
+ help
+ Say yes here to support the hisilicon platform
+
+if HISILICON_PLATFORM
+
+source "drivers/hisi/mailbox/Kconfig"
+source "drivers/hisi/hifi_dsp/Kconfig"
+source "drivers/hisi/hifi_mailbox/Kconfig"
+
+endif #HISILICON_PLATFORM
+
+endmenu
diff --git a/drivers/hisi/Makefile b/drivers/hisi/Makefile
new file mode 100644
index 000000000000..c2f75581457a
--- /dev/null
+++ b/drivers/hisi/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_HISILICON_PLATFORM_MAILBOX) += mailbox/
+obj-$(CONFIG_HIFI_DSP_ONE_TRACK) += hifi_dsp/
+obj-$(CONFIG_HIFI_MAILBOX) += hifi_mailbox/
+
diff --git a/drivers/hisi/ap/platform/hi3660/global_ddr_map.h b/drivers/hisi/ap/platform/hi3660/global_ddr_map.h
new file mode 100644
index 000000000000..42a7571e58a6
--- /dev/null
+++ b/drivers/hisi/ap/platform/hi3660/global_ddr_map.h
@@ -0,0 +1,62 @@
+#ifndef _HI_GLOBAL_MEM_MAP_INCLUDE_H_
+#define _HI_GLOBAL_MEM_MAP_INCLUDE_H_
+#define HISI_RESERVED_FASTBOOT_PHYMEM_BASE HISI_RESERVED_ISP_BOOT_PHYMEM_BASE
+#define HISI_RESERVED_FASTBOOT_PHYMEM_SIZE (0x400000)
+#define HISI_RESERVED_FASTBOOT_DTB_PHYMEM_BASE (0x16C00000)
+#define HISI_RESERVED_FASTBOOT_DTB_PHYMEM_SIZE (0x4000000)
+#define HISI_RESERVED_DTB_PHYMEM_BASE 0x07A00000
+#define HISI_RESERVED_DTB_PHYMEM_SIZE (0x07C00000 - 0x07A00000)
+#define HISI_RESERVED_ISP_BOOT_PHYMEM_BASE 0x1AC00000
+#define HISI_RESERVED_ISP_BOOT_PHYMEM_SIZE (0x2500000)
+#define HISI_RESERVED_IVP_PHYMEM_BASE 0x1D100000
+#define HISI_RESERVED_IVP_PHYMEM_SIZE (0x100000)
+#define HISI_RESERVED_SECOS_PHYMEM_BASE 0x1D200000
+#define HISI_RESERVED_SECOS_PHYMEM_SIZE (0x3000000)
+#define HISI_RESERVED_BL31_PHYMEM_BASE 0x20200000
+#define HISI_RESERVED_BL31_PHYMEM_SIZE (0x200000)
+#define HISI_RESERVED_SENSORHUB_PHYMEM_BASE 0x20400000
+#define HISI_RESERVED_SENSORHUB_PHYMEM_SIZE (0x600000)
+#define HISI_RESERVED_PSTORE_PHYMEM_BASE 0x20A00000
+#define HISI_RESERVED_PSTORE_PHYMEM_SIZE (0x100000)
+#define HISI_RESERVED_FAST_KER_AND_PHYMEM_BASE 0x20B00000
+#define HISI_RESERVED_FAST_KER_AND_PHYMEM_SIZE (0x40000)
+#define HISI_SUB_RESERVED_FASTBOOT_LOG_PYHMEM_BASE 0x20B00000
+#define HISI_SUB_RESERVED_FASTBOOT_LOG_PYHMEM_SIZE (0x20000)
+#define HISI_SUB_RESERVED_SCHARGE_PYHMEM_BASE 0x20B20000
+#define HISI_SUB_RESERVED_SCHARGE_PYHMEM_SIZE (0x1000)
+#define HISI_SUB_RESERVED_BL31_SHARE_MEM_PHYMEM_BASE 0x20B21000
+#define HISI_SUB_RESERVED_BL31_SHARE_MEM_PHYMEM_SIZE (0x10000)
+#define HISI_SUB_RESERVED_LCD_GAMMA_MEM_PHYMEM_BASE 0x20B31000
+#define HISI_SUB_RESERVED_LCD_GAMMA_MEM_PHYMEM_SIZE (0x1000)
+#define HISI_SUB_RESERVED_UNUSED_PHYMEM_BASE 0x20B32000
+#define HISI_SUB_RESERVED_UNUSED_PHYMEM_SIZE (0x20B40000 - 0x20B32000)
+#define HISI_RESERVED_SENSORHUB_SHMEM_PHYMEM_BASE 0x20B40000
+#define HISI_RESERVED_SENSORHUB_SHMEM_PHYMEM_SIZE (0x40000)
+#define HISI_CONTEXTHUB_THERMAL_SHMEM_PHYMEM_BASE HISI_RESERVED_SENSORHUB_SHMEM_PHYMEM_BASE
+#define HISI_CONTEXTHUB_THERMAL_SHMEM_PHYMEM_SIZE (0x3C00)
+#define HISI_RESERVED_CH_BLOCK_SHMEM_PHYMEM_BASE (HISI_CONTEXTHUB_THERMAL_SHMEM_PHYMEM_BASE + HISI_CONTEXTHUB_THERMAL_SHMEM_PHYMEM_SIZE)
+#define HISI_RESERVED_CH_BLOCK_SHMEM_PHYMEM_SIZE (HISI_RESERVED_SENSORHUB_SHMEM_PHYMEM_SIZE - HISI_CONTEXTHUB_THERMAL_SHMEM_PHYMEM_SIZE)
+#define HISI_RESERVED_SENSORHUB_SHARE_MEM_PHYMEM_BASE 0x20B80000
+#define HISI_RESERVED_SENSORHUB_SHARE_MEM_PHYMEM_SIZE (0x80000)
+#define HISI_RESERVED_MODEM_PHYMEM_BASE 0x80000000
+#define HISI_RESERVED_MODEM_PHYMEM_SIZE (0x9200000)
+#define HISI_RESERVED_HIFI_PHYMEM_BASE 0x89200000
+#define HISI_RESERVED_HIFI_PHYMEM_SIZE (0x980000)
+#define HISI_RESERVED_LPMX_CORE_PHYMEM_BASE 0x89B80000
+#define HISI_RESERVED_LPMX_CORE_PHYMEM_SIZE (0x100000)
+#define HISI_RESERVED_LPMCU_PHYMEM_BASE 0x89C80000
+#define HISI_RESERVED_LPMCU_PHYMEM_SIZE 0x40000
+#define HISI_RESERVED_MODEM_SHARE_PHYMEM_BASE 0x89D00000
+#define HISI_RESERVED_MODEM_SHARE_PHYMEM_SIZE (0x500000)
+#define HISI_RESERVED_MODEM_SOCP_PHYMEM_BASE 0x8A200000
+#define HISI_RESERVED_MODEM_SOCP_PHYMEM_SIZE (0x1100000)
+#define HISI_RESERVED_HIFI_DATA_PHYMEM_BASE 0x8B300000
+#define HISI_RESERVED_HIFI_DATA_PHYMEM_SIZE (0x380000)
+#define HISI_RESERVED_MNTN_PHYMEM_BASE 0x8B680000
+#define HISI_RESERVED_MNTN_PHYMEM_SIZE (0x800000)
+#define HISI_RESERVED_DDR_TRAINING1_PHYMEM_BASE 0x07B00000
+#define HISI_RESERVED_DDR_TRAINING1_PHYMEM_SIZE (0x07B08000 - 0x07B00000)
+#define HISI_RESERVED_LPMX_CORE_PHYMEM_BASE_UNIQUE (HISI_RESERVED_LPMX_CORE_PHYMEM_BASE)
+#define HISI_RESERVED_LPMCU_PHYMEM_BASE_UNIQUE (HISI_RESERVED_LPMCU_PHYMEM_BASE)
+#define HISI_RESERVED_MNTN_PHYMEM_BASE_UNIQUE (HISI_RESERVED_MNTN_PHYMEM_BASE)
+#endif
diff --git a/drivers/hisi/ap/platform/hi3660/mntn_public_interface.h b/drivers/hisi/ap/platform/hi3660/mntn_public_interface.h
new file mode 100644
index 000000000000..e8396cca4142
--- /dev/null
+++ b/drivers/hisi/ap/platform/hi3660/mntn_public_interface.h
@@ -0,0 +1,411 @@
+#ifndef __MNTN_PUBLIC_INTERFACE_H__
+#define __MNTN_PUBLIC_INTERFACE_H__
+#include "soc_acpu_baseaddr_interface.h"
+#include "global_ddr_map.h"
+typedef unsigned long long u64;
+typedef unsigned int u32;
+#define PMU_RESET_REG_OFFSET (PMIC_HRST_REG0_ADDR(0)<<2)
+#define RST_FLAG_MASK (0xFF)
+#define PMU_RESET_VALUE_USED 0xFFFFFF00
+#define PMU_RESET_RECORD_DDR_AREA_SIZE 0x100
+#define RECORD_PC_STR_MAX_LENGTH 0x48
+typedef struct {
+ char exception_info[RECORD_PC_STR_MAX_LENGTH];
+ unsigned long exception_info_len;
+}
+AP_RECORD_PC;
+#define ETR_MAGIC_START "ETRTRACE"
+#define ETR_MAGIC_SIZE ((unsigned int)sizeof(ETR_MAGIC_START))
+typedef struct {
+ char magic[ETR_MAGIC_SIZE];
+ u64 paddr;
+ u32 size;
+ u32 rd_offset;
+}
+AP_RECORD_ETR;
+#define BOARD_COLD_START_ADDR ((HISI_RESERVED_MNTN_PHYMEM_BASE_UNIQUE) + 0x280)
+#define FPGA_RESET_REG_ADDR ((HISI_RESERVED_MNTN_PHYMEM_BASE_UNIQUE) + 0x288)
+#define FPGA_BOOTUP_KEYPOINT_ADDR ((HISI_RESERVED_MNTN_PHYMEM_BASE_UNIQUE) + 0x290)
+#define BOOTUP_KEYPOINT_OFFSET (PMIC_HRST_REG1_ADDR(0)<<2)
+#define DFX_HEAD_SIZE 512
+#define TOTAL_NUMBER 5
+#define FASTBOOTLOG_SIZE HISI_SUB_RESERVED_FASTBOOT_LOG_PYHMEM_SIZE
+#define LAST_KMSG_SIZE (HISI_RESERVED_PSTORE_PHYMEM_SIZE/2)
+#define LAST_APPLOG_SIZE (HISI_RESERVED_PSTORE_PHYMEM_SIZE/8)
+#define EVERY_NUMBER_SIZE (DFX_HEAD_SIZE + LAST_APPLOG_SIZE + LAST_KMSG_SIZE + FASTBOOTLOG_SIZE)
+#define DFX_MAGIC_NUMBER 0x2846579
+#define DFX_USED_SIZE (EVERY_NUMBER_SIZE*(TOTAL_NUMBER+1)+DFX_HEAD_SIZE)
+struct dfx_head_info {
+ int magic;
+ int total_number;
+ int cur_number;
+ int need_save_number;
+ u64 every_number_addr[TOTAL_NUMBER];
+ u64 every_number_size;
+ u64 temp_number_addr;
+};
+struct every_number_info {
+ u64 rtc_time;
+ u64 bootup_keypoint;
+ u64 reboot_type;
+ u64 fastbootlog_start_addr;
+ u64 fastbootlog_size;
+ u64 last_kmsg_start_addr;
+ u64 last_kmsg_size;
+ u64 last_applog_start_addr;
+ u64 last_applog_size;
+};
+enum boot_stage_point {
+ STAGE_START = 1,
+ STAGE_XLOADER_START = STAGE_START,
+ STAGE_XLOADER_EMMC_INIT_FAIL = 2,
+ STAGE_XLOADER_EMMC_INIT_OK = 3,
+ STAGE_XLOADER_DDR_INIT_FAIL = 4,
+ STAGE_XLOADER_DDR_INIT_OK = 5,
+ STAGE_XLOADER_RD_VRL_FAIL = 6,
+ STAGE_XLOADER_CHECK_VRL_ERROR = 7,
+ STAGE_XLOADER_IMG_TOO_LARGE = 8,
+ STAGE_XLOADER_READ_FASTBOOT_FAIL = 9,
+ STAGE_XLOADER_LOAD_HIBENCH_FAIL = 10,
+ STAGE_XLOADER_SEC_VERIFY_FAIL = 11,
+ STAGE_XLOADER_GET_IMGSIZE_FAIL = 12,
+ STAGE_XLOADER_IMGSIZE_ERROR = 13,
+ STAGE_XLOADER_VRL_CHECK_ERROR = 14,
+ STAGE_XLOADER_SECURE_VERIFY_ERROR = 15,
+ STAGE_XLOADER_READ_UCE_FAIL = 16,
+ STAGE_XLOADER_UCE_SEC_VERIFY_FAIL = 17,
+ STAGE_XLOADER_UCE_SECURE_VERIFY_ERROR = 18,
+ STAGE_XLOADER_VECTOR_SEC_VERIFY_FAIL = 19,
+ STAGE_XLOADER_LOAD_FASTBOOT_START = 20,
+ STAGE_XLOADER_LOAD_FASTBOOT_END = 21,
+ STAGE_XLOADER_END = 25,
+ STAGE_FASTBOOT_START = 26,
+ STAGE_FASTBOOT_EMMC_INIT_START = 27,
+ STAGE_FASTBOOT_EMMC_INIT_FAIL = 28,
+ STAGE_FASTBOOT_EMMC_INIT_OK = 29,
+ STAGE_FASTBOOT_DDR_INIT_START = 30,
+ STAGE_FASTBOOT_DISPLAY_INIT_START = 31,
+ STAGE_FASTBOOT_PRE_BOOT_INIT_START = 32,
+ STAGE_FASTBOOT_LD_OTHER_IMGS_START = 33,
+ STAGE_FASTBOOT_LD_KERNEL_IMG_START = 34,
+ STAGE_FASTBOOT_BOOT_KERNEL_START = 35,
+ STAGE_FASTBOOT_LOADLPMCU_FAIL = 38,
+ STAGE_FASTBOOT_SECBOOT_INIT_START = 39,
+ STAGE_FASTBOOT_END = 70,
+ STAGE_KERNEL_EARLY_INITCALL = 75,
+ STAGE_KERNEL_PURE_INITCALL = 77,
+ STAGE_KERNEL_CORE_INITCALL = 79,
+ STAGE_KERNEL_CORE_INITCALL_SYNC = 81,
+ STAGE_KERNEL_POSTCORE_INITCALL = 83,
+ STAGE_KERNEL_POSTCORE_INITCALL_SYNC = 85,
+ STAGE_KERNEL_ARCH_INITCALL = 87,
+ STAGE_KERNEL_ARCH_INITCALLC = 89,
+ STAGE_KERNEL_SUBSYS_INITCALL = 81,
+ STAGE_KERNEL_SUBSYS_INITCALL_SYNC = 83,
+ STAGE_KERNEL_FS_INITCALL = 85,
+ STAGE_KERNEL_FS_INITCALL_SYNC = 87,
+ STAGE_KERNEL_ROOTFS_INITCALL = 89,
+ STAGE_KERNEL_DEVICE_INITCALL = 91,
+ STAGE_KERNEL_DEVICE_INITCALL_SYNC = 93,
+ STAGE_KERNEL_LATE_INITCALL = 95,
+ STAGE_KERNEL_LATE_INITCALL_SYNC = 97,
+ STAGE_KERNEL_CONSOLE_INITCALL = 99,
+ STAGE_KERNEL_SECURITY_INITCALL = 101,
+ STAGE_KERNEL_BOOTANIM_COMPLETE = 103,
+ STAGE_INIT_INIT_START = 110,
+ STAGE_INIT_ON_EARLY_INIT = 111,
+ STAGE_INIT_ON_INIT = 112,
+ STAGE_INIT_ON_EARLY_FS = 113,
+ STAGE_INIT_ON_FS = 114,
+ STAGE_INIT_ON_POST_FS = 115,
+ STAGE_INIT_ON_POST_FS_DATA = 116,
+ STAGE_INIT_ON_EARLY_BOOT = 117,
+ STAGE_INIT_ON_BOOT = 118,
+ STAGE_ANDROID_ZYGOTE_START = 150,
+ STAGE_ANDROID_VM_START = 151,
+ STAGE_ANDROID_PHASE_WAIT_FOR_DEFAULT_DISPLAY = 152,
+ STAGE_ANDROID_PHASE_LOCK_SETTINGS_READY = 153,
+ STAGE_ANDROID_PHASE_SYSTEM_SERVICES_READY = 154,
+ STAGE_ANDROID_PHASE_ACTIVITY_MANAGER_READY = 155,
+ STAGE_ANDROID_PHASE_THIRD_PARTY_APPS_CAN_START = 156,
+ STAGE_ANDROID_BOOT_SUCCESS = 250,
+ STAGE_BOOTUP_END = STAGE_ANDROID_BOOT_SUCCESS,
+ STAGE_END = 255,
+};
+enum himntnEnum {
+ HIMNTN_NVE_VALID = 0,
+ HIMNTN_WDT_MIN,
+ HIMNTN_AP_WDT = HIMNTN_WDT_MIN,
+ HIMNTN_GLOBAL_WDT,
+ HIMNTN_MODEM_WDT,
+ HIMNTN_LPM3_WDT,
+ HIMNTN_IOM3_WDT,
+ HIMNTN_HIFI_WDT,
+ HIMNTN_SECOS_WDT,
+ HIMNTN_ISP_WDT,
+ HIMNTN_IVP_WDT,
+ HIMNTN_OCBC_WDT = 10,
+ HIMNTN_UCE_WDT,
+ HIMNTN_RESERVED_WDT3,
+ HIMNTN_WDT_MAX = HIMNTN_RESERVED_WDT3,
+ HIMNTN_FST_DUMP_MEM,
+ HIMNTN_MNTN_DUMP_MEM,
+ HIMNTN_SD2JTAG,
+ HIMNTN_PRESS_KEY_TO_FASTBOOT,
+ HIMNTN_PANIC_INTO_LOOP,
+ HIMNTN_GOBAL_RESETLOG,
+ HIMNTN_NOC_INT_HAPPEN,
+ HIMNTN_NOC_ERROR_REBOOT = 20,
+ HIMNTN_DFXPARTITION_TO_FILE,
+ HIMNTN_DDR_ERROR_REBOOT,
+ HIMNTN_HISEE,
+ HIMNTN_WATCHPOINT_EN,
+ HIMNTN_KMEMLEAK_SWITCH,
+ HIMNTN_FB_PANIC_REBOOT,
+ HIMNTN_MEM_TRACE = 27,
+ HIMNTN_FTRACE,
+ HIMNTN_EAGLE_EYE,
+ HIMNTN_KERNEL_DUMP_ENABLE = 30,
+ HIMNTN_SD2DJTAG,
+ HIMNTN_MMC_TRACE,
+ HIMNTN_LPM3_PANIC_INTO_LOOP,
+ HIMNTN_TRACE_CLK_REGULATOR,
+ HIMNTN_CORESIGHT,
+ HIMNTN_RESERVED3,
+ HIMNTN_RESERVED4,
+ HIMNTN_RESERVED5,
+ HIMNTN_BOTTOM
+};
+typedef enum {
+ AP_S_COLDBOOT = 0x0,
+ BOOTLOADER = 0x01,
+ RECOVERY = 0x02,
+ RESETFACTORY = 0x03,
+ RESETUSER = 0x04,
+ SDUPDATE = 0x05,
+ CHARGEREBOOT = 0x06,
+ RESIZE = 0x07,
+ ERECOVERY = 0x08,
+ USBUPDATE = 0x09,
+ CUST = 0x0a,
+ OEM_RTC = 0x0c,
+ RESERVED5 = 0x0d,
+ MOUNTFAIL = 0x0e,
+ HUNGDETECT = 0x0f,
+ COLDBOOT = 0x10,
+ RESERVED1 = 0x11,
+ AP_S_FASTBOOTFLASH = 0x13,
+ REBOOT_REASON_LABEL1 = 0x14,
+ AP_S_ABNORMAL = REBOOT_REASON_LABEL1,
+ AP_S_TSENSOR0 = 0x15,
+ AP_S_TSENSOR1 = 0x16,
+ AP_S_AWDT = 0x17,
+ LPM3_S_GLOBALWDT = 0x18,
+ G3D_S_G3DTSENSOR = 0x19,
+ LPM3_S_LPMCURST = 0x1a,
+ CP_S_CPTSENSOR = 0x1b,
+ IOM3_S_IOMCURST = 0x1c,
+ ASP_S_ASPWD = 0x1d,
+ CP_S_CPWD = 0x1e,
+ IVP_S_IVPWD = 0x1f,
+ ISP_S_ISPWD = 0x20,
+ AP_S_DDR_UCE_WD = 0x21,
+ AP_S_DDR_FATAL_INTER = 0X22,
+ OCBC_S_WD = 0x23,
+ REBOOT_REASON_LABEL2 = 0x24,
+ AP_S_PANIC = REBOOT_REASON_LABEL2,
+ AP_S_NOC = 0x25,
+ RESERVED2 = 0x26,
+ AP_S_DDRC_SEC = 0x27,
+ AP_S_F2FS = 0x28,
+ AP_S_COMBINATIONKEY = 0x29,
+ RESERVED6 = 0x2a,
+ AP_S_MAILBOX = 0x2b,
+ REBOOT_REASON_LABEL3 = 0x2c,
+ CP_S_MODEMDMSS = REBOOT_REASON_LABEL3,
+ CP_S_MODEMNOC = 0x2d,
+ CP_S_MODEMAP = 0x2e,
+ CP_S_EXCEPTION = 0x2f,
+ CP_S_RESETFAIL = 0x30,
+ CP_S_NORMALRESET = 0x31,
+ LPM3_S_EXCEPTION = 0x32,
+ SOCHIFI_S_EXCEPTION = 0x33,
+ HIFI_S_RESETFAIL = 0x34,
+ ISP_S_EXCEPTION = 0x35,
+ IVP_S_EXCEPTION = 0x36,
+ IOM3_S_EXCEPTION = 0x37,
+ TEE_S_EXCEPTION = 0x38,
+ MMC_S_EXCEPTION = 0x39,
+ CODECHIFI_S_EXCEPTION = 0x3a,
+ CP_S_RILD_EXCEPTION = 0x3b,
+ CP_S_3RD_EXCEPTION = 0x3c,
+ IOM3_S_USER_EXCEPTION = 0x3d,
+ HISEE_S_EXCEPTION = 0x3e,
+ REBOOT_REASON_LABEL4 = 0x40,
+ RESERVED4 = REBOOT_REASON_LABEL4,
+ BR_KEY_VOLUMN_DOWN_UP_UPDATE_USB = 0x41,
+ BR_KEY_VOLUMN_DOWN_UP_UPDATE_SD_FORCE = 0x42,
+ BR_KEY_VOLUMN_UP = 0x43,
+ BR_KEY_POWERON_PRESS_1S = 0x44,
+ BR_KEY_POWERON_PRESS_10S = 0x45,
+ BR_CHECKPOINT_RECOVERY = 0x46,
+ BR_CHECKPOINT_ERECOVERY = 0x47,
+ BR_CHECKPOINT_SDUPDATE = 0x48,
+ BR_CHECKPOINT_USBUPDATE = 0x49,
+ BR_CHECKPOINT_RESETFACTORY = 0x4a,
+ BR_CHECKPOINT_HOTAUPDATE = 0x4b,
+ BR_POWERON_BY_USB_NO_BAT = 0x4d,
+ BR_NOGUI = 0x4e,
+ BR_FACTORY_VERSION = 0x4f,
+ BR_RESET_HAPPEN = 0x50,
+ BR_POWEROFF_ALARM = 0x51,
+ BR_POWEROFF_CHARGE = 0x52,
+ BR_POWERON_BY_SMPL = 0x53,
+ BR_CHECKPOINT_UPDATEDATAIMG = 0x54,
+ BR_REBOOT_CPU_BUCK = 0x55,
+ REBOOT_REASON_LABEL5 = 0x65,
+ AP_S_PMU = REBOOT_REASON_LABEL5,
+ AP_S_SMPL = 0x66,
+ AP_S_SCHARGER = 0x67,
+ REBOOT_REASON_LABEL6 = 0x6A,
+ XLOADER_S_DDRINIT_FAIL = REBOOT_REASON_LABEL6,
+ XLOADER_S_EMMCINIT_FAIL = 0x6B,
+ XLOADER_S_LOAD_FAIL = 0x6C,
+ XLOADER_S_VERIFY_FAIL = 0x6D,
+ XLOADER_S_WATCHDOG = 0x6E,
+ REBOOT_REASON_LABEL7 = 0x74,
+ FASTBOOT_S_EMMCINIT_FAIL = REBOOT_REASON_LABEL7,
+ FASTBOOT_S_PANIC = 0x75,
+ FASTBOOT_S_WATCHDOG = 0x76,
+ AP_S_PRESS6S = 0x77,
+ FASTBOOT_S_OCV_VOL_ERR = 0x78,
+ FASTBOOT_S_BAT_TEMP_ERR = 0x79,
+ FASTBOOT_S_MISC_ERR = 0x7A,
+ FASTBOOT_S_LOAD_DTIMG_ERR = 0x7B,
+ FASTBOOT_S_LOAD_OTHER_IMGS_ERR = 0x7C,
+ FASTBOOT_S_KERNEL_IMG_ERR = 0x7D,
+ FASTBOOT_S_LOADLPMCU_FAIL = 0x7E,
+ FASTBOOT_S_IMG_VERIFY_FAIL = 0x7F,
+ REBOOT_REASON_LABEL8 = 0x89,
+ REBOOT_REASON_LABEL9 = 0x90,
+ BFM_S_BOOT_NATIVE_BOOT_FAIL = REBOOT_REASON_LABEL9,
+ BFM_S_BOOT_TIMEOUT,
+ BFM_S_BOOT_FRAMEWORK_BOOT_FAIL,
+ BFM_S_BOOT_NATIVE_DATA_FAIL,
+ REBOOT_REASON_LABEL10 = 0xB0,
+} EXCH_SOURCE;
+enum MODID_LIST {
+ HISI_BB_MOD_MODEM_DRV_START = 0x00000000,
+ HISI_BB_MOD_MODEM_DRV_END = 0x0fffffff,
+ HISI_BB_MOD_MODEM_OSA_START = 0x10000000,
+ HISI_BB_MOD_MODEM_OSA_END = 0x1fffffff,
+ HISI_BB_MOD_MODEM_OM_START = 0x20000000,
+ HISI_BB_MOD_MODEM_OM_END = 0x2fffffff,
+ HISI_BB_MOD_MODEM_GU_L2_START = 0x30000000,
+ HISI_BB_MOD_MODEM_GU_L2_END = 0x3fffffff,
+ HISI_BB_MOD_MODEM_GU_WAS_START = 0x40000000,
+ HISI_BB_MOD_MODEM_GU_WAS_END = 0x4fffffff,
+ HISI_BB_MOD_MODEM_GU_GAS_START = 0x50000000,
+ HISI_BB_MOD_MODEM_GU_GAS_END = 0x5fffffff,
+ HISI_BB_MOD_MODEM_GU_NAS_START = 0x60000000,
+ HISI_BB_MOD_MODEM_GU_NAS_END = 0x6fffffff,
+ HISI_BB_MOD_MODEM_GU_DSP_START = 0x70000000,
+ HISI_BB_MOD_MODEM_GU_DSP_END = 0x7fffffff,
+ HISI_BB_MOD_AP_START = 0x80000000,
+ HISI_BB_MOD_AP_END = 0x81fff0ff,
+ HISI_BB_MOD_BFM_START = 0x81fff100,
+ HISI_BB_MOD_BFM_END = 0x81fff1ff,
+ HISI_BB_MOD_FASTBOOT_START = 0x81fff200,
+ HISI_BB_MOD_FASTBOOT_END = 0x81fffcff,
+ HISI_BB_MOD_ISP_START = 0x81fffd00,
+ HISI_BB_MOD_ISP_END = 0x81fffeff,
+ HISI_BB_MOD_EMMC_START = 0x81ffff00,
+ HISI_BB_MOD_EMMC_END = 0x81ffffff,
+ HISI_BB_MOD_CP_START = 0x82000000,
+ HISI_BB_MOD_CP_END = 0x82ffffff,
+ HISI_BB_MOD_TEE_START = 0x83000000,
+ HISI_BB_MOD_TEE_END = 0x83ffffff,
+ HISI_BB_MOD_HIFI_START = 0x84000000,
+ HISI_BB_MOD_HIFI_END = 0x84ffffff,
+ HISI_BB_MOD_LPM_START = 0x85000000,
+ HISI_BB_MOD_LPM_END = 0x85ffffff,
+ HISI_BB_MOD_IOM_START = 0x86000000,
+ HISI_BB_MOD_IOM_END = 0x86ffffff,
+ HISI_BB_MOD_HISEE_START = 0x87000000,
+ HISI_BB_MOD_HISEE_END = 0x87ffffff,
+ HISI_BB_MOD_RESERVED_START = 0x88000000,
+ HISI_BB_MOD_RESERVED_END = 0x9fffffff,
+ HISI_BB_MOD_MODEM_LPS_START = 0xa0000000,
+ HISI_BB_MOD_MODEM_LPS_END = 0xafffffff,
+ HISI_BB_MOD_MODEM_LMSP_START = 0xb0000000,
+ HISI_BB_MOD_MODEM_LMSP_END = 0xbfffffff,
+ HISI_BB_MOD_RANDOM_ALLOCATED_START = 0xc0000000,
+ HISI_BB_MOD_RANDOM_ALLOCATED_END = 0xf0ffffff
+};
+enum CORE_LIST {
+ RDR_AP = 0x1,
+ RDR_CP = 0x2,
+ RDR_TEEOS = 0x4,
+ RDR_HIFI = 0x8,
+ RDR_LPM3 = 0x10,
+ RDR_IOM3 = 0x20,
+ RDR_ISP = 0x40,
+ RDR_IVP = 0x80,
+ RDR_EMMC = 0x100,
+ RDR_MODEMAP = 0x200,
+ RDR_CLK = 0x400,
+ RDR_REGULATOR = 0x800,
+ RDR_BFM = 0x1000,
+ RDR_HISEE = 0x2000,
+ RDR_CORE_MAX = 14
+};
+#define FTRACE_MDUMP_MAGIC 0xF748FDE2
+#define FTRACE_BUF_MAX_SIZE 0x400000
+#define FTRACE_DUMP_NAME "ftrace"
+#define FTRACE_DUMP_FS_NAME "/proc/balong/memory/"FTRACE_DUMP_NAME
+typedef struct {
+ u64 magic;
+ u64 paddr;
+ u32 size;
+}
+FTRACE_MDUMP_HEAD;
+#define DTS_MNTNDUMP_NAME "/reserved-memory/mntndump"
+#define MNTNDUMP_MAGIC (0xDEADBEEFDEADBEEF)
+#define MAX_LEN_OF_MNTNDUMP_ADDR_STR (0x20)
+#define MNTN_DUMP_VERSION (0xFFFF0002)
+typedef enum {
+ MNTN_DUMP_HEAD,
+ MNTN_DUMP_ETR,
+ MNTN_DUMP_KERNEL_DUMP,
+ MNTN_DUMP_PANIC,
+ MNTN_DUMP_FTRACE,
+ MNTN_DUMP_PSTORE_RAMOOPS,
+ MNTN_DUMP_MAX
+} mntn_dump_module;
+#define MNTN_DUMP_HEAD_SIZE (sizeof(struct mdump_head))
+#define MNTN_DUMP_ETR_SIZE (0x30)
+#define MNTN_DUMP_KERNEL_DUMP_SIZE (0x300)
+#define MNTN_DUMP_PANIC_SIZE (0x100)
+#define MNTN_DUMP_FTRACE_SIZE (0x30)
+#define MNTN_DUMP_PSTORE_RAMOOPS_SIZE (0x30)
+#define MNTN_DUMP_MAXSIZE (0x1000)
+struct mdump_regs_info {
+ int mid;
+ unsigned int offset;
+ unsigned int size;
+} ;
+struct mdump_head {
+ unsigned long magic;
+ unsigned int version;
+ unsigned int nums;
+ struct mdump_regs_info regs_info[MNTN_DUMP_MAX];
+};
+struct mdump_end {
+ unsigned long magic;
+};
+struct mdump_pstore {
+ unsigned long magic;
+ unsigned long ramoops_addr;
+ unsigned long ramoops_size;
+};
+#endif
diff --git a/drivers/hisi/ap/platform/hi3660/soc_acpu_baseaddr_interface.h b/drivers/hisi/ap/platform/hi3660/soc_acpu_baseaddr_interface.h
new file mode 100644
index 000000000000..6e227ce23144
--- /dev/null
+++ b/drivers/hisi/ap/platform/hi3660/soc_acpu_baseaddr_interface.h
@@ -0,0 +1,324 @@
+#ifndef __SOC_ACPU_BASEADDR_INTERFACE_H__
+#define __SOC_ACPU_BASEADDR_INTERFACE_H__
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif
+#define SOC_ACPU_BOOTROM_OR_NANDC_BASE_ADDR (0xFFFF0000)
+#define SOC_ACPU_BOOTROM_BASE_ADDR (0xFFFE0000)
+#define SOC_ACPU_DMSS_CFG_BASE_ADDR (0xFFFC0000)
+#define SOC_ACPU_NANDC_CFG_BASE_ADDR (0xFFFA0000)
+#define SOC_ACPU_NANDC_BASE_ADDR (0xFFF80000)
+#define SOC_ACPU_LPMCU_RAM_BASE_ADDR (0xFFF60000)
+#define SOC_ACPU_LP_RAM_BASE_ADDR (0xFFF50000)
+#define SOC_ACPU_LP_CONFIG_BASE_ADDR (0xFFF3F000)
+#define SOC_ACPU_LP_TIMER_BASE_ADDR (0xFFF3E000)
+#define SOC_ACPU_LP_WDG_BASE_ADDR (0xFFF3D000)
+#define SOC_ACPU_PMU_SSI2_BASE_ADDR (0xFFF38000)
+#define SOC_ACPU_GNSPWM_BASE_ADDR (0xFFF37000)
+#define SOC_ACPU_PMU_SSI1_BASE_ADDR (0xFFF36000)
+#define SOC_ACPU_PERI_CRG_BASE_ADDR (0xFFF35000)
+#define SOC_ACPU_PMU_SSI0_BASE_ADDR (0xFFF34000)
+#define SOC_ACPU_PMU_I2C_BASE_ADDR (0xFFF33000)
+#define SOC_ACPU_UART6_BASE_ADDR (0xFFF32000)
+#define SOC_ACPU_PMCTRL_BASE_ADDR (0xFFF31000)
+#define SOC_ACPU_TSENSORC_BASE_ADDR (0xFFF30000)
+#define SOC_ACPU_GPIO1_SE_BASE_ADDR (0xFFF1E000)
+#define SOC_ACPU_GPIO28_BASE_ADDR (0xFFF1D000)
+#define SOC_ACPU_TIMER8_BASE_ADDR (0xFFF1C000)
+#define SOC_ACPU_TIMER7_BASE_ADDR (0xFFF1B000)
+#define SOC_ACPU_TIMER6_BASE_ADDR (0xFFF1A000)
+#define SOC_ACPU_TIMER5_BASE_ADDR (0xFFF19000)
+#define SOC_ACPU_TIMER4_BASE_ADDR (0xFFF18000)
+#define SOC_ACPU_TIMER3_BASE_ADDR (0xFFF17000)
+#define SOC_ACPU_TIMER2_BASE_ADDR (0xFFF16000)
+#define SOC_ACPU_TIMER1_BASE_ADDR (0xFFF15000)
+#define SOC_ACPU_TIMER0_BASE_ADDR (0xFFF14000)
+#define SOC_ACPU_BB_DRX_BASE_ADDR (0xFFF12000)
+#define SOC_ACPU_AO_IOC_BASE_ADDR (0xFFF11000)
+#define SOC_ACPU_GPIO27_BASE_ADDR (0xFFF10000)
+#define SOC_ACPU_GPIO26_BASE_ADDR (0xFFF0F000)
+#define SOC_ACPU_GPIO25_BASE_ADDR (0xFFF0E000)
+#define SOC_ACPU_GPIO24_BASE_ADDR (0xFFF0D000)
+#define SOC_ACPU_GPIO23_BASE_ADDR (0xFFF0C000)
+#define SOC_ACPU_GPIO22_BASE_ADDR (0xFFF0B000)
+#define SOC_ACPU_SCTRL_BASE_ADDR (0xFFF0A000)
+#define SOC_ACPU_SYS_CNT_BASE_ADDR (0xFFF08000)
+#define SOC_ACPU_SCI1_BASE_ADDR (0xFFF07000)
+#define SOC_ACPU_SCI0_BASE_ADDR (0xFFF06000)
+#define SOC_ACPU_RTC1_BASE_ADDR (0xFFF05000)
+#define SOC_ACPU_RTC0_BASE_ADDR (0xFFF04000)
+#define SOC_ACPU_EFUSEC_BASE_ADDR (0xFFF03000)
+#define SOC_ACPU_MMBUF_CFG_BASE_ADDR (0xFFF02000)
+#define SOC_ACPU_MMBUF_ASC1_BASE_ADDR (0xFFF01000)
+#define SOC_ACPU_MMBUF_ASC0_BASE_ADDR (0xFFF00000)
+#define SOC_ACPU_IOMCU_RTC_BASE_ADDR (0xFFD7F000)
+#define SOC_ACPU_IOMCU_CONFIG_BASE_ADDR (0xFFD7E000)
+#define SOC_ACPU_IOMCU_TIMER_BASE_ADDR (0xFFD7D000)
+#define SOC_ACPU_IOMCU_WDG_BASE_ADDR (0xFFD7C000)
+#define SOC_ACPU_IOMCU_GPIO3_BASE_ADDR (0xFFD7B000)
+#define SOC_ACPU_IOMCU_GPIO2_BASE_ADDR (0xFFD7A000)
+#define SOC_ACPU_IOMCU_GPIO1_BASE_ADDR (0xFFD79000)
+#define SOC_ACPU_IOMCU_GPIO0_BASE_ADDR (0xFFD78000)
+#define SOC_ACPU_IOMCU_DMAC_BASE_ADDR (0xFFD77000)
+#define SOC_ACPU_IOMCU_UART7_BASE_ADDR (0xFFD76000)
+#define SOC_ACPU_IOMCU_BLPWM_BASE_ADDR (0xFFD75000)
+#define SOC_ACPU_IOMCU_UART3_BASE_ADDR (0xFFD74000)
+#define SOC_ACPU_IOMCU_I2C2_BASE_ADDR (0xFFD73000)
+#define SOC_ACPU_IOMCU_I2C1_BASE_ADDR (0xFFD72000)
+#define SOC_ACPU_IOMCU_I2C0_BASE_ADDR (0xFFD71000)
+#define SOC_ACPU_IOMCU_SPI0_BASE_ADDR (0xFFD70000)
+#define SOC_ACPU_DTCM1Remap_BASE_ADDR (0xFFD6F000)
+#define SOC_ACPU_DTCM0Remap_BASE_ADDR (0xFFD6E000)
+#define SOC_ACPU_ITCMRemap_BASE_ADDR (0xFFD6C000)
+#define SOC_ACPU_RemapCtrl_BASE_ADDR (0xFFD6B000)
+#define SOC_ACPU_IOMCU_I2C3_BASE_ADDR (0xFFD6A000)
+#define SOC_ACPU_IOMCU_UART8_BASE_ADDR (0xFFD69000)
+#define SOC_ACPU_IOMCU_SPI2_BASE_ADDR (0xFFD68000)
+#define SOC_ACPU_IOMCU_DMMU_BASE_ADDR (0xFFD67000)
+#define SOC_ACPU_IOMCU_TIMER2_BASE_ADDR (0xFFD66000)
+#define SOC_ACPU_SDIO0_BASE_ADDR (0xFF3FF000)
+#define SOC_ACPU_PCIE_APB_CFG_BASE_ADDR (0xFF3FE000)
+#define SOC_ACPU_IOC_MMC1_BASE_ADDR (0xFF3FD000)
+#define SOC_ACPU_EMMC_BASE_ADDR (0xFF3FB000)
+#define SOC_ACPU_GPIO1_MMC1_BASE_ADDR (0xFF3E1000)
+#define SOC_ACPU_GPIO0_MMC1_BASE_ADDR (0xFF3E0000)
+#define SOC_ACPU_IOC_FIX_BASE_ADDR (0xFF3B6000)
+#define SOC_ACPU_GPIO19_BASE_ADDR (0xFF3B5000)
+#define SOC_ACPU_GPIO18_BASE_ADDR (0xFF3B4000)
+#define SOC_ACPU_SPI3_BASE_ADDR (0xFF3B3000)
+#define SOC_ACPU_UFS_SYS_CTRL_BASE_ADDR (0xFF3B1000)
+#define SOC_ACPU_UFS_CFG_BASE_ADDR (0xFF3B0000)
+#define SOC_ACPU_SD3_BASE_ADDR (0xFF37F000)
+#define SOC_ACPU_IOC_MMC0_BASE_ADDR (0xFF37E000)
+#define SOC_ACPU_USB3OTG_BC_BASE_ADDR (0xFF200000)
+#define SOC_ACPU_USB3OTG_BASE_ADDR (0xFF100000)
+#define SOC_ACPU_IPF_PSAM_BASE_ADDR (0xFF040000)
+#define SOC_ACPU_IPF_BASE_ADDR (0xFF031000)
+#define SOC_ACPU_SOCP_BASE_ADDR (0xFF030000)
+#define SOC_ACPU_IPC_MDM_NS_BASE_ADDR (0xFF011000)
+#define SOC_ACPU_IPC_MDM_S_BASE_ADDR (0xFF010000)
+#define SOC_ACPU_CS_STM_BASE_ADDR (0xFE000000)
+#define SOC_ACPU_PERI_DMAC_BASE_ADDR (0xFDF30000)
+#define SOC_ACPU_PERF_STAT_BASE_ADDR (0xFDF10000)
+#define SOC_ACPU_SECENG_S_BASE_ADDR (0xFDF0F000)
+#define SOC_ACPU_SECENG_P_BASE_ADDR (0xFDF0E000)
+#define SOC_ACPU_I2C4_BASE_ADDR (0xFDF0D000)
+#define SOC_ACPU_I2C3_BASE_ADDR (0xFDF0C000)
+#define SOC_ACPU_I2C7_BASE_ADDR (0xFDF0B000)
+#define SOC_ACPU_SPI1_BASE_ADDR (0xFDF08000)
+#define SOC_ACPU_SPI4_BASE_ADDR (0xFDF06000)
+#define SOC_ACPU_UART5_BASE_ADDR (0xFDF05000)
+#define SOC_ACPU_UART2_BASE_ADDR (0xFDF03000)
+#define SOC_ACPU_UART0_BASE_ADDR (0xFDF02000)
+#define SOC_ACPU_UART4_BASE_ADDR (0xFDF01000)
+#define SOC_ACPU_UART1_BASE_ADDR (0xFDF00000)
+#define SOC_ACPU_PCIECtrl_BASE_ADDR (0xF4000000)
+#define SOC_ACPU_PCIEPHY_BASE_ADDR (0xF3F00000)
+#define SOC_ACPU_OCBC_MBOX_3_BASE_ADDR (0xF110E000)
+#define SOC_ACPU_OCBC_MBOX_2_BASE_ADDR (0xF110D000)
+#define SOC_ACPU_OCBC_MBOX_1_BASE_ADDR (0xF110C000)
+#define SOC_ACPU_OCBC_MBOX_0_BASE_ADDR (0xF110B000)
+#define SOC_ACPU_OCBC_TIMER_BASE_ADDR (0xF110A000)
+#define SOC_ACPU_OCBC_WDOG_BASE_ADDR (0xF1109000)
+#define SOC_ACPU_OCBC_SCTRL_BASE_ADDR (0xF1108000)
+#define SOC_ACPU_OCBC_TCM_BASE_ADDR (0xF1100000)
+#define SOC_ACPU_HISEE_SCE_BASE_ADDR (0xF0E45000)
+#define SOC_ACPU_HISEE_KEY_MANAGER_BASE_ADDR (0xF0E44000)
+#define SOC_ACPU_HISEE_PKE_BASE_ADDR (0xF0E40000)
+#define SOC_ACPU_HISEE_IPC_BASE_ADDR (0xF0E30000)
+#define SOC_ACPU_HISEE_MAILBOX_BASE_ADDR (0xF0E20000)
+#define SOC_ACPU_HISEE_TRNG_BASE_ADDR (0xF0E12000)
+#define SOC_ACPU_HISEE_MEDRAM_BASE_ADDR (0xF0E11000)
+#define SOC_ACPU_HISEE_MEDROM_BASE_ADDR (0xF0E10000)
+#define SOC_ACPU_HISEE_TZPC_BASE_ADDR (0xF0E0F000)
+#define SOC_ACPU_HISEE_TIMER2_BASE_ADDR (0xF0E0E000)
+#define SOC_ACPU_HISEE_TIMER1_BASE_ADDR (0xF0E0D000)
+#define SOC_ACPU_HISEE_TIMER0_BASE_ADDR (0xF0E0C000)
+#define SOC_ACPU_HISEE_PUF_BASE_ADDR (0xF0E0B000)
+#define SOC_ACPU_HISEE_ACTIVE_SHIELD_BASE_ADDR (0xF0E0A000)
+#define SOC_ACPU_HISEE_SWP_BASE_ADDR (0xF0E09000)
+#define SOC_ACPU_HISEE_DMAC_BASE_ADDR (0xF0E08000)
+#define SOC_ACPU_HISEE_UART1_BASE_ADDR (0xF0E07000)
+#define SOC_ACPU_HISEE_UART0_BASE_ADDR (0xF0E06000)
+#define SOC_ACPU_HISEE_OTPC_BASE_ADDR (0xF0E05000)
+#define SOC_ACPU_HISEE_SENSOR_CTRL_BASE_ADDR (0xF0E04000)
+#define SOC_ACPU_HISEE_CONFIG_BASE_ADDR (0xF0E03000)
+#define SOC_ACPU_HISEE_WDOG_BASE_ADDR (0xF0E02000)
+#define SOC_ACPU_HISEE_DIGITAL_SENSOR_BASE_ADDR (0xF0E00000)
+#define SOC_ACPU_IOMCU_TCM_BASE_ADDR (0xF0000000)
+#define SOC_ACPU_CSSYS_APB_BASE_ADDR (0xEC000000)
+#define SOC_ACPU_DMCPACK3_BASE_ADDR (0xEA960000)
+#define SOC_ACPU_DMCPACK2_BASE_ADDR (0xEA940000)
+#define SOC_ACPU_DMCPACK1_BASE_ADDR (0xEA920000)
+#define SOC_ACPU_DMCPACK0_BASE_ADDR (0xEA900000)
+#define SOC_ACPU_MMBUF_BASE_ADDR (0xEA800000)
+#define SOC_ACPU_HKMEM_BASE_ADDR (0xEA000000)
+#define SOC_ACPU_MMC0_NOC_Service_Target_BASE_ADDR (0xE9890000)
+#define SOC_ACPU_MMC1_NOC_Service_Target_BASE_ADDR (0xE9880000)
+#define SOC_ACPU_AOBUS_Service_Target_BASE_ADDR (0xE9870000)
+#define SOC_ACPU_DMA_NOC_Service_Target_BASE_ADDR (0xE9860000)
+#define SOC_ACPU_IVP32_Sevice_Target_BASE_ADDR (0xE9850000)
+#define SOC_ACPU_SYS_BUS_Service_Target_BASE_ADDR (0xE9840000)
+#define SOC_ACPU_ASP_Service_Target_BASE_ADDR (0xE9830000)
+#define SOC_ACPU_Modem_Service_Target_BASE_ADDR (0xE9820000)
+#define SOC_ACPU_UFSBUS_Service_Target_BASE_ADDR (0xE9810000)
+#define SOC_ACPU_CFGBUS_Service_Target_BASE_ADDR (0xE9800000)
+#define SOC_ACPU_IVP32_SMMU_BASE_ADDR (0xE8DC0000)
+#define SOC_ACPU_IVP32_VP_CFG_BASE_ADDR (0xE8D84000)
+#define SOC_ACPU_IVP32_TIMER1_BASE_ADDR (0xE8D83000)
+#define SOC_ACPU_IVP32_TIMER0_BASE_ADDR (0xE8D82000)
+#define SOC_ACPU_IVP32_WDG_BASE_ADDR (0xE8D81000)
+#define SOC_ACPU_IVP32_CFG_BASE_ADDR (0xE8D80000)
+#define SOC_ACPU_IVP32_IRAM_BASE_ADDR (0xE8D00000)
+#define SOC_ACPU_IVP32_DRAM1_BASE_ADDR (0xE8C80000)
+#define SOC_ACPU_IVP32_DRAM0_BASE_ADDR (0xE8C00000)
+#define SOC_ACPU_ATGC_BASE_ADDR (0xE8A22000)
+#define SOC_ACPU_TZPC_BASE_ADDR (0xE8A21000)
+#define SOC_ACPU_GPIO21_BASE_ADDR (0xE8A20000)
+#define SOC_ACPU_GPIO20_BASE_ADDR (0xE8A1F000)
+#define SOC_ACPU_GPIO17_BASE_ADDR (0xE8A1C000)
+#define SOC_ACPU_GPIO16_BASE_ADDR (0xE8A1B000)
+#define SOC_ACPU_GPIO15_BASE_ADDR (0xE8A1A000)
+#define SOC_ACPU_GPIO14_BASE_ADDR (0xE8A19000)
+#define SOC_ACPU_GPIO13_BASE_ADDR (0xE8A18000)
+#define SOC_ACPU_GPIO12_BASE_ADDR (0xE8A17000)
+#define SOC_ACPU_GPIO11_BASE_ADDR (0xE8A16000)
+#define SOC_ACPU_GPIO10_BASE_ADDR (0xE8A15000)
+#define SOC_ACPU_GPIO9_BASE_ADDR (0xE8A14000)
+#define SOC_ACPU_GPIO8_BASE_ADDR (0xE8A13000)
+#define SOC_ACPU_GPIO7_BASE_ADDR (0xE8A12000)
+#define SOC_ACPU_GPIO6_BASE_ADDR (0xE8A11000)
+#define SOC_ACPU_GPIO5_BASE_ADDR (0xE8A10000)
+#define SOC_ACPU_GPIO4_BASE_ADDR (0xE8A0F000)
+#define SOC_ACPU_GPIO3_BASE_ADDR (0xE8A0E000)
+#define SOC_ACPU_GPIO2_BASE_ADDR (0xE8A0D000)
+#define SOC_ACPU_GPIO1_BASE_ADDR (0xE8A0C000)
+#define SOC_ACPU_GPIO0_BASE_ADDR (0xE8A0B000)
+#define SOC_ACPU_GPIO0_SE_BASE_ADDR (0xE8A0A000)
+#define SOC_ACPU_PCTRL_BASE_ADDR (0xE8A09000)
+#define SOC_ACPU_LoadMonitor_BASE_ADDR (0xE8A08000)
+#define SOC_ACPU_WD1_BASE_ADDR (0xE8A07000)
+#define SOC_ACPU_WD0_BASE_ADDR (0xE8A06000)
+#define SOC_ACPU_CTF_BASE_ADDR (0xE8A05000)
+#define SOC_ACPU_PWM_BASE_ADDR (0xE8A04000)
+#define SOC_ACPU_TIMER12_BASE_ADDR (0xE8A03000)
+#define SOC_ACPU_TIMER11_BASE_ADDR (0xE8A02000)
+#define SOC_ACPU_TIMER10_BASE_ADDR (0xE8A01000)
+#define SOC_ACPU_TIMER9_BASE_ADDR (0xE8A00000)
+#define SOC_ACPU_IOC_BASE_ADDR (0xE896C000)
+#define SOC_ACPU_IPC_NS_BASE_ADDR (0xE896B000)
+#define SOC_ACPU_IPC_BASE_ADDR (0xE896A000)
+#define SOC_ACPU_NOC_VENC_Service_Target_BASE_ADDR (0xE8940000)
+#define SOC_ACPU_NOC_VDEC_Service_Target_BASE_ADDR (0xE8930000)
+#define SOC_ACPU_NOC_VCODECBUS_Service_Target_BASE_ADDR (0xE8920000)
+#define SOC_ACPU_VENC_BASE_ADDR (0xE8900000)
+#define SOC_ACPU_VDEC_BASE_ADDR (0xE8800000)
+#define SOC_ACPU_NOC_ISP_Service_Target_BASE_ADDR (0xE86D0000)
+#define SOC_ACPU_NOC_DSS_Service_Target_BASE_ADDR (0xE86C0000)
+#define SOC_ACPU_LDI1_BASE_ADDR (0xE867E000)
+#define SOC_ACPU_DSC_BASE_ADDR (0xE867DC00)
+#define SOC_ACPU_IFBC_BASE_ADDR (0xE867D800)
+#define SOC_ACPU_LDI0_BASE_ADDR (0xE867D000)
+#define SOC_ACPU_DPP_SBL_BASE_ADDR (0xE867C000)
+#define SOC_ACPU_DPP_BASE_ADDR (0xE8670000)
+#define SOC_ACPU_HI_ACE_BASE_ADDR (0xE866F000)
+#define SOC_ACPU_DBUF1_BASE_ADDR (0xE866E000)
+#define SOC_ACPU_DBUF0_BASE_ADDR (0xE866D000)
+#define SOC_ACPU_OV3_BASE_ADDR (0xE8660C00)
+#define SOC_ACPU_OV2_BASE_ADDR (0xE8660800)
+#define SOC_ACPU_OV1_BASE_ADDR (0xE8660400)
+#define SOC_ACPU_OV0_BASE_ADDR (0xE8660000)
+#define SOC_ACPU_WCH2_BASE_ADDR (0xE865E000)
+#define SOC_ACPU_WCH1_BASE_ADDR (0xE865C000)
+#define SOC_ACPU_WCH0_BASE_ADDR (0xE865A000)
+#define SOC_ACPU_RCH_D1_BASE_ADDR (0xE8653000)
+#define SOC_ACPU_RCH_D0_BASE_ADDR (0xE8652000)
+#define SOC_ACPU_RCH_D3_BASE_ADDR (0xE8651000)
+#define SOC_ACPU_RCH_D2_BASE_ADDR (0xE8650000)
+#define SOC_ACPU_RCH_G1_BASE_ADDR (0xE8640000)
+#define SOC_ACPU_RCH_G0_BASE_ADDR (0xE8638000)
+#define SOC_ACPU_RCH_V2_BASE_ADDR (0xE8630000)
+#define SOC_ACPU_RCH_V1_BASE_ADDR (0xE8628000)
+#define SOC_ACPU_RCH_V0_BASE_ADDR (0xE8620000)
+#define SOC_ACPU_GLB_BASE_ADDR (0xE8612000)
+#define SOC_ACPU_DBG_BASE_ADDR (0xE8611000)
+#define SOC_ACPU_MCTL_MUTEX_BASE_ADDR (0xE8610800)
+#define SOC_ACPU_MCTL_SYS_BASE_ADDR (0xE8610000)
+#define SOC_ACPU_VBIF0_MIF_BASE_ADDR (0xE860A000)
+#define SOC_ACPU_VBIF1_AIF_BASE_ADDR (0xE8609000)
+#define SOC_ACPU_VBIF0_SMMU_BASE_ADDR (0xE8608000)
+#define SOC_ACPU_VBIF0_AIF_BASE_ADDR (0xE8607000)
+#define SOC_ACPU_CMD_BASE_ADDR (0xE8602000)
+#define SOC_ACPU_DSI1_BASE_ADDR (0xE8601400)
+#define SOC_ACPU_DSI0_BASE_ADDR (0xE8601000)
+#define SOC_ACPU_ISP_SUB_CTRL_BASE_ADDR (0xE8583000)
+#define SOC_ACPU_ISP_IPC_BASE_ADDR (0xE8582000)
+#define SOC_ACPU_ISP_TIMER_BASE_ADDR (0xE8581000)
+#define SOC_ACPU_ISP_WDT_BASE_ADDR (0xE8580000)
+#define SOC_ACPU_ISP_Core_CFG_BASE_ADDR (0xE8400000)
+#define SOC_ACPU_G3D_BASE_ADDR (0xE82C0000)
+#define SOC_ACPU_CODEC_SSI_BASE_ADDR (0xE82B9000)
+#define SOC_ACPU_HKADC_SSI_BASE_ADDR (0xE82B8000)
+#define SOC_ACPU_GIC400_BASE_ADDR (0xE82B0000)
+#define SOC_ACPU_CCI_CFG_BASE_ADDR (0xE8100000)
+#define SOC_ACPU_DSP_ITCM_BASE_ADDR (0xE8080000)
+#define SOC_ACPU_DSP_DTCM_BASE_ADDR (0xE8058000)
+#define SOC_ACPU_SLIMBUS_BASE_ADDR (0xE8050000)
+#define SOC_ACPU_DSD_BASE_ADDR (0xE804FC00)
+#define SOC_ACPU_SIO_BT_BASE_ADDR (0xE804F800)
+#define SOC_ACPU_SIO_AUDIO_BASE_ADDR (0xE804F000)
+#define SOC_ACPU_ASP_HDMI_SPDIF_BASE_ADDR (0xE804EC00)
+#define SOC_ACPU_ASP_HDMI_ASP_BASE_ADDR (0xE804E400)
+#define SOC_ACPU_ASP_CFG_BASE_ADDR (0xE804E000)
+#define SOC_ACPU_ASP_WD_BASE_ADDR (0xE804D000)
+#define SOC_ACPU_ASP_IPC_BASE_ADDR (0xE804C000)
+#define SOC_ACPU_ASP_DMAC_BASE_ADDR (0xE804B000)
+#define SOC_ACPU_ASP_TIMER1_BASE_ADDR (0xE804A000)
+#define SOC_ACPU_ASP_TIMER0_BASE_ADDR (0xE8049000)
+#define SOC_ACPU_ASP_GPIO_BASE_ADDR (0xE8048000)
+#define SOC_ACPU_ASP_DMMU_BASE_ADDR (0xE8047000)
+#define SOC_ACPU_SECRAM_BASE_ADDR (0xE8000000)
+#define SOC_ACPU_CBBE16_IMEM_BASE_ADDR (0xE2980000)
+#define SOC_ACPU_CBBE16_DMEM_BASE_ADDR (0xE2900000)
+#define SOC_ACPU_TLBBE16_IMEM_BASE_ADDR (0xE2800000)
+#define SOC_ACPU_TLBBE16_DMEM_BASE_ADDR (0xE2700000)
+#define SOC_ACPU_BBP_IRM_BASE_ADDR (0xE2000000)
+#define SOC_ACPU_GUTL_BBP_BASE_ADDR (0xE1000000)
+#define SOC_ACPU_AXIMEM_BASE_ADDR (0xE0800000)
+#define SOC_ACPU_AMON_MDM_BASE_ADDR (0xE0458000)
+#define SOC_ACPU_VIC_CBBE16_BASE_ADDR (0xE0457000)
+#define SOC_ACPU_VIC_TLBBE16_BASE_ADDR (0xE0456000)
+#define SOC_ACPU_UPACC_BASE_ADDR (0xE0453000)
+#define SOC_ACPU_CIPHER_BASE_ADDR (0xE0452000)
+#define SOC_ACPU_CICOM1_BASE_ADDR (0xE0443000)
+#define SOC_ACPU_HDLC_BASE_ADDR (0xE0442000)
+#define SOC_ACPU_IPCM_BASE_ADDR (0xE0441000)
+#define SOC_ACPU_CICOM0_BASE_ADDR (0xE0440000)
+#define SOC_ACPU_SYSCNT_BASE_ADDR (0xE0220000)
+#define SOC_ACPU_WDT1_MDM_BASE_ADDR (0xE0211000)
+#define SOC_ACPU_EDMA1_MDM_BASE_ADDR (0xE0210000)
+#define SOC_ACPU_TIMER9_MDM_BASE_ADDR (0xE020F000)
+#define SOC_ACPU_TIMER8_MDM_BASE_ADDR (0xE020E000)
+#define SOC_ACPU_TIMER7_MDM_BASE_ADDR (0xE020D000)
+#define SOC_ACPU_TIMER6_MDM_BASE_ADDR (0xE020C000)
+#define SOC_ACPU_TIMER5_MDM_BASE_ADDR (0xE020B000)
+#define SOC_ACPU_TIMER4_MDM_BASE_ADDR (0xE020A000)
+#define SOC_ACPU_TIMER3_MDM_BASE_ADDR (0xE0209000)
+#define SOC_ACPU_TIMER2_MDM_BASE_ADDR (0xE0208000)
+#define SOC_ACPU_TIMER1_MDM_BASE_ADDR (0xE0207000)
+#define SOC_ACPU_TIMER0_MDM_BASE_ADDR (0xE0206000)
+#define SOC_ACPU_UART1_MDM_BASE_ADDR (0xE0205000)
+#define SOC_ACPU_EDMA_MDM_BASE_ADDR (0xE0204000)
+#define SOC_ACPU_UART_MDM_BASE_ADDR (0xE0203000)
+#define SOC_ACPU_WDT_MDM_BASE_ADDR (0xE0201000)
+#define SOC_ACPU_SYSCTRL_MDM_BASE_ADDR (0xE0200000)
+#define SOC_ACPU_CCPU_PRIV_BASE_ADDR (0xE0100000)
+#define SOC_ACPU_CCPU_L2C_BASE_ADDR (0xE0000000)
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif
+#endif
diff --git a/drivers/hisi/hifi_dsp/Kconfig b/drivers/hisi/hifi_dsp/Kconfig
new file mode 100644
index 000000000000..987224a785f9
--- /dev/null
+++ b/drivers/hisi/hifi_dsp/Kconfig
@@ -0,0 +1,8 @@
+menu "Hifimisc driver serve HIFI dsp"
+
+config HIFI_DSP_ONE_TRACK
+ bool "hifi low power play device support"
+ help
+ hifi lpp device provide service for HIFI and AP, in case of HIFI ultra-low power audio play.
+endmenu
+
diff --git a/drivers/hisi/hifi_dsp/Makefile b/drivers/hisi/hifi_dsp/Makefile
new file mode 100644
index 000000000000..13fc910e9c49
--- /dev/null
+++ b/drivers/hisi/hifi_dsp/Makefile
@@ -0,0 +1,35 @@
+
+ifeq ($(CLT_IV),true)
+EXTRA_CFLAGS += -DCLT_VOICE
+endif
+
+ifeq ($(CFG_CONFIG_HISI_FAMA),true)
+LOCAL_CFLAGS += -DCONFIG_HISI_FAMA
+endif
+
+ifeq ($(CONFIG_HIFI_IPC_3660),y)
+#todo fixme: hi3650(PLATFORM_FPGA should be deleted on asic)
+EXTRA_CFLAGS += -DPLATFORM_FPGA
+EXTRA_CFLAGS += -DHIFI_TCM_208K
+EXTRA_CFLAGS += -Iinclude/modem/include/config/hi365x
+EXTRA_CFLAGS += -Iinclude/modem/include/drv/hi365x
+endif
+ifeq ($(CONFIG_HIFI_IPC_6250),y)
+EXTRA_CFLAGS += -DPLATFORM_HI6250
+#todo fixme: hi6250(PLATFORM_FPGA should be deleted on asic)
+EXTRA_CFLAGS += -DPLATFORM_FPGA
+endif
+
+EXTRA_CFLAGS += -DMULTI_MIC
+EXTRA_CFLAGS += -I$(srctree)/drivers/hisi/hifi_mailbox/mailbox
+EXTRA_CFLAGS += -I$(srctree)/drivers/hisi/hifi_mailbox/ipcm
+EXTRA_CFLAGS += -I$(srctree)/drivers/hisi/ap/platform/hi3660
+
+ifneq ($(TARGET_BUILD_VARIANT),user)
+EXTRA_CFLAGS += -DENABLE_HIFI_DEBUG
+endif
+
+obj-$(CONFIG_HIFI_DSP_ONE_TRACK) += hifi_lpp.o
+obj-$(CONFIG_HIFI_DSP_ONE_TRACK) += hifi_om.o
+obj-$(CONFIG_HIFI_DSP_ONE_TRACK) += memcpy_opt.o
+
diff --git a/drivers/hisi/hifi_dsp/audio_hifi.h b/drivers/hisi/hifi_dsp/audio_hifi.h
new file mode 100644
index 000000000000..f785714d9053
--- /dev/null
+++ b/drivers/hisi/hifi_dsp/audio_hifi.h
@@ -0,0 +1,263 @@
+/*
+ * hifi msg define.
+ *
+ * Copyright (c) 2013 Hisilicon Technologies CO., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _AUDIO_HIFI_H
+#define _AUDIO_HIFI_H
+
+#include <linux/types.h>
+
+typedef enum {
+ HIFI_CHN_SYNC_CMD = 0,
+ HIFI_CHN_READNOTICE_CMD,
+ HIFI_CHN_INVAILD_CMD
+} HIFI_CHN_CMD_TYPE;
+
+typedef struct HIFI_CHN_CMD_STRUCT {
+ HIFI_CHN_CMD_TYPE cmd_type;
+ unsigned int sn;
+} HIFI_CHN_CMD;
+
+struct misc_io_async_param {
+ unsigned int para_in_l;
+ unsigned int para_in_h;
+ unsigned int para_size_in;
+};
+
+/* misc_io_sync_cmd */
+struct misc_io_sync_param {
+ unsigned int para_in_l;
+ unsigned int para_in_h;
+ unsigned int para_size_in;
+
+ unsigned int para_out_l;
+ unsigned int para_out_h;
+ unsigned int para_size_out;
+};
+
+/* misc_io_senddata_cmd */
+struct misc_io_senddata_async_param {
+ unsigned int para_in_l;
+ unsigned int para_in_h;
+ unsigned int para_size_in;
+
+ unsigned int data_src_l;
+ unsigned int data_src_h;
+ unsigned int data_src_size;
+};
+
+struct misc_io_senddata_sync_param {
+ unsigned int para_in_l;
+ unsigned int para_in_h;
+ unsigned int para_size_in;
+
+ unsigned int src_l;
+ unsigned int src_h;
+ unsigned int src_size;
+
+ unsigned int dst_l;
+ unsigned int dst_h;
+ unsigned int dst_size;
+
+ unsigned int para_out_l;
+ unsigned int para_out_h;
+ unsigned int para_size_out;
+};
+
+struct misc_io_get_phys_param {
+ unsigned int flag;
+ unsigned int phys_addr_l;
+ unsigned int phys_addr_h;
+};
+
+struct misc_io_dump_buf_param {
+ unsigned int user_buf_l;
+ unsigned int user_buf_h;
+ unsigned int clear; /*clear current log buf */
+ unsigned int buf_size;
+};
+
+/*
+ *voice proxy interface
+ */
+/*****************************************************************************
+ 2 macro define
+*****************************************************************************/
+#define PROXY_VOICE_CODEC_MAX_DATA_LEN (32) /* 16 bit */
+#define PROXY_VOICE_RTP_MAX_DATA_LEN (256) /* 16 bit */
+/*****************************************************************************
+ 5 msg define
+*****************************************************************************/
+
+/* the MsgID define between PROXY and Voice */
+enum voice_proxy_voice_msg_id {
+ ID_VOICE_PROXY_RCTP_OM_INFO_NTF = 0xDDEC,
+ ID_PROXY_VOICE_RCTP_OM_INFO_CNF = 0xDDED,
+ ID_VOICE_PROXY_AJB_OM_INFO_NTF = 0xDDEE,
+ ID_PROXY_VOICE_AJB_OM_INFO_CNF = 0xDDEF,
+ ID_PROXY_VOICE_LTE_RX_NTF = 0xDDF0,
+ ID_VOICE_PROXY_LTE_RX_CNF = 0xDDF1,
+ ID_VOICE_PROXY_LTE_RX_NTF = 0xDDF2,
+ ID_PROXY_VOICE_LTE_RX_CNF = 0xDDF3,
+ ID_VOICE_PROXY_LTE_TX_NTF = 0xDDF4,
+ ID_PROXY_VOICE_LTE_TX_CNF = 0xDDF5,
+ ID_PROXY_VOICE_LTE_TX_NTF = 0xDDF6,
+ ID_VOICE_PROXY_LTE_TX_CNF = 0xDDF7,
+
+ ID_PROXY_VOICE_WIFI_RX_NTF = 0xDDF8,
+ ID_VOICE_PROXY_WIFI_RX_CNF = 0xDDF9,
+ ID_VOICE_PROXY_WIFI_TX_NTF = 0xDDFA,
+ ID_PROXY_VOICE_WIFI_TX_CNF = 0xDDFB,
+ ID_PROXY_VOICE_STATUS_IND = 0xDDFC,
+ ID_PROXY_VOICE_ENCRYPT_KEY_BEGIN = 0xDDFD,
+ ID_PROXY_VOICE_ENCRYPT_KEY_END = 0xDDFE,
+ ID_PROXY_VOICE_DATA_MSGID_BUT
+};
+
+/*
+ * 6 STRUCT define
+ */
+
+/*
+ * describe: the struct of the Rx request between PROXY and hifi_voice by lte
+ */
+struct voice_proxy_lte_rx_notify {
+ uint16_t msg_id;
+ uint16_t sn;
+ uint32_t ts;
+ uint16_t codec_type;
+ uint16_t dtx_enable;
+ uint16_t rate_mode;
+ uint16_t error_flag;
+ uint16_t frame_type;
+ uint16_t quality_idx;
+ uint16_t data[PROXY_VOICE_CODEC_MAX_DATA_LEN];
+ uint32_t ssrc;
+ uint32_t reserved;
+};
+
+/*
+ * describe: the struct of the Rx request between PROXY and hifi_voice by wifi
+ * the struct is as the same to IMSA_VOICE_RTP_RX_DATA_IND_STRU
+ */
+struct voice_proxy_wifi_rx_notify {
+ uint16_t msg_id;
+ uint16_t reserved;
+ uint32_t channel_id;
+ uint32_t port_type; /* Port Type: 0 RTP; 1 RTCP */
+ uint16_t data_len;
+ uint16_t data_seq;
+ uint8_t frag_seq;
+ uint8_t frag_max;
+ uint16_t reserved2;
+ uint8_t data[PROXY_VOICE_RTP_MAX_DATA_LEN];
+};
+
+/*
+ * describe: the struct of the confirm between PROXY and hifi_voice
+ */
+struct voice_proxy_confirm {
+ uint16_t msg_id;
+ uint16_t reserved;
+ uint32_t result;
+};
+
+/*
+ * describe: the struct of the Tx request between Voice-Proxy and hifi_voice by lte
+ */
+struct voice_proxy_lte_tx_notify {
+ uint16_t msg_id;
+ uint16_t reserved;
+ uint16_t amr_type;
+ uint16_t frame_type;
+ uint16_t data[PROXY_VOICE_CODEC_MAX_DATA_LEN];
+};
+
+/*
+ * describe: the struct of the Tx request between Voice-Proxy and hifi_voice by wifi
+ */
+struct voice_proxy_wifi_tx_notify {
+ uint16_t msg_id;
+ uint16_t reserved;
+ uint32_t channel_id;
+ uint32_t port_type;
+ uint16_t data_len;
+ uint16_t data_seq;
+ uint8_t frag_seq;
+ uint8_t frag_max;
+ uint16_t reserved2;
+ uint8_t data[PROXY_VOICE_RTP_MAX_DATA_LEN];
+};
+
+struct voice_proxy_status {
+ uint16_t msg_id;
+ uint16_t reserved;
+ uint32_t status;
+ uint32_t socket_cfg;
+};
+
+struct voice_proxy_voice_encrypt_key_end {
+ uint16_t msg_id;
+ uint16_t reserved;
+ bool encrypt_negotiation_result;
+ bool reserved2[3];
+};
+struct misc_io_pcm_buf_param {
+ uint64_t buf;
+ uint32_t buf_size;
+};
+
+
+/* XAF STRUCTURES */
+
+/* ...command/response message */
+typedef struct xf_proxy_msg {
+ /* ...session ID */
+ uint32_t id;
+
+ /* ...proxy API command/reponse code */
+ uint32_t opcode;
+
+ /* ...length of attached buffer */
+ uint32_t length;
+
+ /* ...shared logical address of message buffer */
+ uint64_t address;
+
+} __attribute__((__packed__)) xf_proxy_msg_t;
+
+/*
+ *end
+ */
+
+#define HIFI_MISC_IOCTL_ASYNCMSG _IOWR('A', 0x70, struct misc_io_async_param)
+#define HIFI_MISC_IOCTL_SYNCMSG _IOW('A', 0x71, struct misc_io_sync_param)
+#define HIFI_MISC_IOCTL_SENDDATA_SYNC _IOW('A', 0x72, struct misc_io_senddata_sync_param)
+#define HIFI_MISC_IOCTL_GET_PHYS _IOWR('A', 0x73, struct misc_io_get_phys_param)
+#define HIFI_MISC_IOCTL_TEST _IOWR('A', 0x74, struct misc_io_senddata_sync_param)
+#define HIFI_MISC_IOCTL_WRITE_PARAMS _IOWR('A', 0x75, struct misc_io_sync_param)
+#define HIFI_MISC_IOCTL_DUMP_HIFI _IOWR('A', 0x76, struct misc_io_dump_buf_param)
+#define HIFI_MISC_IOCTL_DUMP_CODEC _IOWR('A', 0x77, struct misc_io_dump_buf_param)
+#define HIFI_MISC_IOCTL_WAKEUP_THREAD _IOW('A', 0x78, unsigned int)
+#define HIFI_MISC_IOCTL_DISPLAY_MSG _IOWR('A', 0x79, struct misc_io_dump_buf_param)
+#define HIFI_MISC_IOCTL_WAKEUP_PCM_READ_THREAD _IOW('A', 0x7a, unsigned int)
+#define HIFI_MISC_IOCTL_PCM_GAIN _IOW('A', 0x7b, struct misc_io_pcm_buf_param)
+#define HIFI_MISC_IOCTL_XAF_IPC_MSG_SEND _IOW('A', 0x7c, xf_proxy_msg_t)
+#define HIFI_MISC_IOCTL_XAF_IPC_MSG_RECV _IOR('A', 0x7d, xf_proxy_msg_t)
+
+#ifdef CLT_VOICE
+#define CLT_HIFI_MISC_IOCTL_SEND_VOICE _IOWR('A', 0x90, struct misc_io_async_param)
+#endif
+
+#define HIFI_MISC_IOCTL_GET_VOICE_BSD_PARAM _IOWR('A', 0x7c, unsigned int)
+#define INT_TO_ADDR(low, high) (void *) (unsigned long)((unsigned long long)(low) | ((unsigned long long)(high)<<32))
+#define GET_LOW32(x) (unsigned int)(((unsigned long long)(unsigned long)(x))&0xffffffffULL)
+#define GET_HIG32(x) (unsigned int)((((unsigned long long)(unsigned long)(x))>>32)&0xffffffffULL)
+
+#endif /* _AUDIO_HIFI_H */
diff --git a/drivers/hisi/hifi_dsp/hifi_lpp.c b/drivers/hisi/hifi_dsp/hifi_lpp.c
new file mode 100644
index 000000000000..7ca25a876964
--- /dev/null
+++ b/drivers/hisi/hifi_dsp/hifi_lpp.c
@@ -0,0 +1,1699 @@
+/*
+ * hifi misc driver.
+ *
+ * Copyright (c) 2013 Hisilicon Technologies CO., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *
+ * Modifications made by Cadence Design Systems, Inc. 06/21/2017
+ * Copyright (C) 2017 Cadence Design Systems, Inc.All rights reserved worldwide.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/proc_fs.h>
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/errno.h>
+#include <linux/of_address.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/syscalls.h>
+#include <linux/unistd.h>
+#include <linux/completion.h>
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+
+#include <asm/memory.h>
+#include <asm/types.h>
+#include <asm/io.h>
+
+#include <linux/compat.h>
+
+#include "hifi_lpp.h"
+#include "audio_hifi.h"
+#include "drv_mailbox_msg.h"
+#include "bsp_drv_ipc.h"
+#include "hifi_om.h"
+#include <dsm/dsm_pub.h>
+
+#define DTS_COMP_HIFIDSP_NAME "hisilicon,k3hifidsp"
+#define FILE_PROC_DIRECTORY "hifidsp"
+#define SEND_MSG_TO_HIFI mailbox_send_msg
+#define RETRY_COUNT 3
+
+static DEFINE_SEMAPHORE(s_misc_sem);
+
+LIST_HEAD(recv_sync_work_queue_head);
+LIST_HEAD(recv_proc_work_queue_head);
+
+/* 3mic add for reset hi6402 audio dp clk */
+struct multi_mic {
+ struct workqueue_struct *reset_audio_dp_clk_wq;
+ struct work_struct reset_audio_dp_clk_work;
+ unsigned int audio_clk_state;
+ struct list_head cmd_queue;
+ spinlock_t cmd_lock; /* protects cmd queue */
+};
+
+struct misc_msg_info {
+ unsigned short msg_id;
+ const char *const info;
+};
+
+struct hifi_misc_priv {
+ spinlock_t recv_sync_lock;
+ spinlock_t recv_proc_lock;
+ spinlock_t pcm_read_lock;
+
+ struct completion completion;
+ wait_queue_head_t proc_waitq;
+ wait_queue_head_t pcm_read_waitq;
+ wait_queue_head_t xaf_waitq;
+
+ int wait_flag;
+ int pcm_read_wait_flag;
+ unsigned int sn;
+
+ struct wakeup_source hifi_misc_wakesrc;
+ struct wakeup_source update_buff_wakesrc;
+
+ unsigned char *hifi_priv_base_virt;
+ unsigned char *hifi_priv_base_phy;
+
+ struct device *dev;
+
+ struct multi_mic multi_mic_ctrl;
+
+};
+static struct hifi_misc_priv s_misc_data;
+extern bool hasData;
+static struct notifier_block s_hifi_sr_nb;
+static struct notifier_block s_hifi_reboot_nb;
+static atomic_t volatile s_hifi_in_suspend = ATOMIC_INIT(0);
+static atomic_t volatile s_hifi_in_saving = ATOMIC_INIT(0);
+
+static struct misc_msg_info msg_info[] = {
+ {ID_AP_AUDIO_SET_DTS_ENABLE_CMD, "ID_AP_AUDIO_SET_DTS_ENABLE_CMD"},
+ {ID_AP_AUDIO_SET_DTS_DEV_CMD, "ID_AP_AUDIO_SET_DTS_DEV_CMD"},
+ {ID_AP_AUDIO_SET_DTS_GEQ_CMD, "ID_AP_AUDIO_SET_DTS_GEQ_CMD"},
+ {ID_AP_AUDIO_SET_DTS_GEQ_ENABLE_CMD,
+ "ID_AP_AUDIO_SET_DTS_GEQ_ENABLE_CMD"},
+ {ID_AP_HIFI_VOICE_RECORD_START_CMD,
+ "ID_AP_HIFI_VOICE_RECORD_START_CMD"},
+ {ID_AP_HIFI_VOICE_RECORD_STOP_CMD, "ID_AP_HIFI_VOICE_RECORD_STOP_CMD"},
+ {ID_AP_VOICEPP_START_REQ, "ID_AP_VOICEPP_START_REQ"},
+ {ID_VOICEPP_AP_START_CNF, "ID_VOICEPP_AP_START_CNF"},
+ {ID_AP_VOICEPP_STOP_REQ, "ID_AP_VOICEPP_STOP_REQ"},
+ {ID_VOICEPP_AP_STOP_CNF, "ID_VOICEPP_AP_STOP_CNF"},
+ {ID_AP_AUDIO_PLAY_START_REQ, "ID_AP_AUDIO_PLAY_START_REQ"},
+ {ID_AUDIO_AP_PLAY_START_CNF, "ID_AUDIO_AP_PLAY_START_CNF"},
+ {ID_AP_AUDIO_PLAY_PAUSE_REQ, "ID_AP_AUDIO_PLAY_PAUSE_REQ"},
+ {ID_AUDIO_AP_PLAY_PAUSE_CNF, " ID_AUDIO_AP_PLAY_PAUSE_CNF"},
+ {ID_AUDIO_AP_PLAY_DONE_IND, "ID_AUDIO_AP_PLAY_DONE_IND"},
+ {ID_AP_AUDIO_PLAY_UPDATE_BUF_CMD, "ID_AP_AUDIO_PLAY_UPDATE_BUF_CMD"},
+ {ID_AP_AUDIO_PLAY_QUERY_TIME_REQ, "ID_AP_AUDIO_PLAY_QUERY_TIME_REQ"},
+ {ID_AP_AUDIO_PLAY_WAKEUPTHREAD_REQ,
+ "ID_AP_AUDIO_PLAY_WAKEUPTHREAD_REQ"},
+ {ID_AUDIO_AP_PLAY_QUERY_TIME_CNF, "ID_AUDIO_AP_PLAY_QUERY_TIME_CNF"},
+ {ID_AP_AUDIO_PLAY_QUERY_STATUS_REQ,
+ "ID_AP_AUDIO_PLAY_QUERY_STATUS_REQ"},
+ {ID_AUDIO_AP_PLAY_QUERY_STATUS_CNF,
+ "ID_AUDIO_AP_PLAY_QUERY_STATUS_CNF"},
+ {ID_AP_AUDIO_PLAY_SEEK_REQ, "ID_AP_AUDIO_PLAY_SEEK_REQ"},
+ {ID_AUDIO_AP_PLAY_SEEK_CNF, "ID_AUDIO_AP_PLAY_SEEK_CNF"},
+ {ID_AP_AUDIO_PLAY_SET_VOL_CMD, "ID_AP_AUDIO_PLAY_SET_VOL_CMD"},
+ {ID_AP_AUDIO_RECORD_PCM_HOOK_CMD, "ID_AP_AUDIO_RECORD_PCM_HOOK_CMD"},
+ {ID_AP_HIFI_ENHANCE_START_REQ, "ID_AP_HIFI_ENHANCE_START_REQ"},
+ {ID_HIFI_AP_ENHANCE_START_CNF, "ID_HIFI_AP_ENHANCE_START_CNF"},
+ {ID_AP_HIFI_ENHANCE_STOP_REQ, "ID_AP_HIFI_ENHANCE_STOP_REQ"},
+ {ID_HIFI_AP_ENHANCE_STOP_CNF, "ID_HIFI_AP_ENHANCE_STOP_CNF"},
+ {ID_AP_HIFI_ENHANCE_SET_DEVICE_REQ,
+ "ID_AP_HIFI_ENHANCE_SET_DEVICE_REQ"},
+ {ID_HIFI_AP_ENHANCE_SET_DEVICE_CNF,
+ "ID_HIFI_AP_ENHANCE_SET_DEVICE_CNF"},
+ {ID_AP_AUDIO_ENHANCE_SET_DEVICE_IND,
+ "ID_AP_AUDIO_ENHANCE_SET_DEVICE_IND"},
+ {ID_AP_AUDIO_MLIB_SET_PARA_IND, "ID_AP_AUDIO_MLIB_SET_PARA_IND"},
+ {ID_AP_AUDIO_CMD_SET_SOURCE_CMD, "ID_AP_AUDIO_CMD_SET_SOURCE_CMD"},
+ {ID_AP_AUDIO_CMD_SET_DEVICE_CMD, "ID_AP_AUDIO_CMD_SET_DEVICE_CMD"},
+ {ID_AP_AUDIO_CMD_SET_MODE_CMD, "ID_AP_AUDIO_CMD_SET_MODE_CMD"},
+ {ID_AP_AUDIO_CMD_SET_ANGLE_CMD, "ID_AP_AUDIO_CMD_SET_ANGLE_CMD"},
+ {ID_AP_AUDIO_ROUTING_COMPLETE_REQ, "ID_AP_AUDIO_ROUTING_COMPLETE_REQ"},
+ {ID_AUDIO_AP_OM_DUMP_CMD, "ID_AUDIO_AP_OM_DUMP_CMD"},
+ {ID_AUDIO_AP_FADE_OUT_REQ, "ID_AUDIO_AP_FADE_OUT_REQ"},
+ {ID_AP_ENABLE_MODEM_LOOP_REQ, "ID_AP_ENABLE_MODEM_LOOP_REQ"},
+ {ID_AP_AUDIO_DYN_EFFECT_GET_PARAM, "ID_AP_AUDIO_DYN_EFFECT_GET_PARAM"},
+ {ID_AP_AUDIO_DYN_EFFECT_GET_PARAM_CNF,
+ "ID_AP_AUDIO_DYN_EFFECT_GET_PARAM_CNF"},
+ {ID_AP_AUDIO_DYN_EFFECT_TRIGGER, "ID_AP_AUDIO_DYN_EFFECT_TRIGGER"},
+};
+
+void sochifi_watchdog_send_event(void)
+{
+ int retval = 0;
+ char *envp[2] = { "hifi_watchdog", NULL };
+
+ can_reset_system();
+
+ logi("soc hifi watchdog coming, now reset mediaserver \n");
+ retval = kobject_uevent_env(&s_misc_data.dev->kobj, KOBJ_CHANGE, envp);
+ if (retval) {
+ loge("send uevent failed, retval: %d\n", retval);
+ }
+
+ return;
+}
+
+static void hifi_misc_msg_info(unsigned short msg_id)
+{
+ int i;
+ int size;
+
+ size = sizeof(msg_info) / sizeof(msg_info[0]);
+
+ for (i = 0; i < size; i++) {
+ if (msg_info[i].msg_id == msg_id) {
+ logi("MSG: %s.\n", msg_info[i].info);
+ break;
+ }
+ }
+
+ if (i == size) {
+ logw("MSG: Not defined msg id: 0x%x.\n", msg_id);
+ }
+
+ return;
+}
+
+static int hifi_misc_async_write(unsigned char *arg, unsigned int len)
+{
+ int ret = OK;
+
+ IN_FUNCTION;
+
+ if (NULL == arg) {
+ loge("input arg is NULL.\n");
+ ret = ERROR;
+ goto END;
+ }
+
+ ret = SEND_MSG_TO_HIFI(MAILBOX_MAILCODE_ACPU_TO_HIFI_MISC, arg, len);
+ if (OK != ret) {
+ loge("msg send to hifi fail,ret is %d.\n", ret);
+ ret = ERROR;
+ goto END;
+ }
+
+ END:
+ OUT_FUNCTION;
+ return ret;
+}
+
+static int hifi_misc_sync_write(unsigned char *buff, unsigned int len)
+{
+ int ret = OK;
+
+ IN_FUNCTION;
+
+ if (NULL == buff) {
+ loge("input arg is NULL.\n");
+ ret = ERROR;
+ goto END;
+ }
+
+ reinit_completion(&s_misc_data.completion);
+
+ ret = SEND_MSG_TO_HIFI(MAILBOX_MAILCODE_ACPU_TO_HIFI_MISC, buff, len);
+ if (OK != ret) {
+ loge("msg send to hifi fail,ret is %d.\n", ret);
+ ret = ERROR;
+ goto END;
+ }
+
+ ret = wait_for_completion_timeout(&s_misc_data.completion, msecs_to_jiffies(10000));
+
+ s_misc_data.sn++;
+ if (unlikely(s_misc_data.sn & 0x10000000)) {
+ s_misc_data.sn = 0;
+ }
+
+ if (!ret) {
+ loge("wait completion timeout.\n");
+ hifi_dump_panic_log();
+ ret = ERROR;
+ goto END;
+ } else {
+ ret = OK;
+ }
+
+ END:
+ OUT_FUNCTION;
+ return ret;
+}
+
+static int hifi_misc_send_hifi_msg_async(struct common_hifi_cmd *cmd)
+{
+ int ret = OK;
+ logi("send msg: 0x%x to hifi !\n", cmd->msg_id);
+ ret =
+ (unsigned int)mailbox_send_msg(MAILBOX_MAILCODE_ACPU_TO_HIFI_MISC,
+ cmd, sizeof(struct common_hifi_cmd));
+ if (OK != ret) {
+ loge("msg: 0x%x send to hifi fail, ret is %d.\n", cmd->msg_id,
+ ret);
+ }
+
+ return ret;
+}
+
+static bool hifi_misc_local_process(unsigned short _msg_id)
+{
+ bool ret = false;
+ HIFI_MSG_ID msg_id = (HIFI_MSG_ID) _msg_id;
+
+ switch (msg_id) {
+ case ID_AUDIO_AP_OM_DUMP_CMD:
+ case ID_AUDIO_AP_FADE_OUT_REQ:
+ case ID_AUDIO_AP_DP_CLK_EN_IND:
+ case ID_AUDIO_AP_VOICE_BSD_PARAM_CMD:
+ case ID_AUDIO_AP_OM_CMD:
+ ret = true;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void hifi_misc_mesg_process(void *cmd)
+{
+ unsigned int cmd_id = 0;
+ struct common_hifi_cmd *common_cmd = NULL;
+ struct dp_clk_request *dp_clk_cmd = NULL;
+ struct hifi_om_ap_data *hifi_om_rev_data = NULL;
+
+ BUG_ON(NULL == cmd);
+
+ hifi_om_rev_data = (struct hifi_om_ap_data *)cmd;
+ common_cmd = (struct common_hifi_cmd *)cmd;
+ cmd_id = *(unsigned short *)cmd;
+
+ switch (cmd_id) {
+ case ID_AUDIO_AP_OM_CMD:
+ hifi_om_rev_data_handle(HIFI_OM_WORK_AUDIO_OM_DETECTION,
+ hifi_om_rev_data->data,
+ hifi_om_rev_data->data_len);
+ break;
+ case ID_AUDIO_AP_VOICE_BSD_PARAM_CMD:
+ hifi_om_rev_data_handle(HIFI_OM_WORK_VOICE_BSD,
+ hifi_om_rev_data->data,
+ hifi_om_rev_data->data_len);
+ break;
+ case ID_AUDIO_AP_OM_DUMP_CMD:
+ logi("hifi notify to dump hifi log, hifi errtype: %d.\n",
+ common_cmd->value);
+ break;
+ case ID_AUDIO_AP_DP_CLK_EN_IND:
+ case ID_AUDIO_AP_FADE_OUT_REQ:
+ dp_clk_cmd =
+ (struct dp_clk_request *)
+ kmalloc(sizeof(struct dp_clk_request), GFP_ATOMIC);
+ if (!dp_clk_cmd) {
+ loge("malloc fail\n");
+ break;
+ }
+ memset(dp_clk_cmd, 0, sizeof(struct dp_clk_request));
+
+ logi("multi mic cmd: 0x%x.\n", common_cmd->msg_id);
+ memcpy(&(dp_clk_cmd->dp_clk_msg), common_cmd,
+ sizeof(struct common_hifi_cmd));
+
+ spin_lock_bh(&(s_misc_data.multi_mic_ctrl.cmd_lock));
+ list_add_tail(&dp_clk_cmd->dp_clk_node,
+ &(s_misc_data.multi_mic_ctrl.cmd_queue));
+ spin_unlock_bh(&(s_misc_data.multi_mic_ctrl.cmd_lock));
+ __pm_wakeup_event(&s_misc_data.hifi_misc_wakesrc, 500);
+ if (queue_work(s_misc_data.multi_mic_ctrl.reset_audio_dp_clk_wq,
+ &s_misc_data.multi_mic_ctrl.
+ reset_audio_dp_clk_work))
+ logw("cmd 0x%x no trigger queue work\n",
+ common_cmd->msg_id);
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+static void hifi_misc_handle_mail(void *usr_para, void *mail_handle,
+ unsigned int mail_len)
+{
+ unsigned int ret_mail = 0;
+ struct recv_request *recv = NULL;
+ HIFI_CHN_CMD *cmd_para = NULL;
+ void *recmsg = NULL;
+
+ IN_FUNCTION;
+
+ if (NULL == mail_handle) {
+ loge("mail_handle is NULL.\n");
+ goto END;
+ }
+
+ if (mail_len >= MAIL_LEN_MAX || mail_len <= SIZE_CMD_ID) {
+ loge("mail_len is invalid: %u(>= 512 or <= 8)\n", mail_len);
+ goto END;
+ }
+
+ recv =
+ (struct recv_request *)kmalloc(sizeof(struct recv_request),
+ GFP_ATOMIC);
+ if (NULL == recv) {
+ loge("recv kmalloc failed.\n");
+ goto ERR;
+ }
+ memset(recv, 0, sizeof(struct recv_request));
+
+ recv->rev_msg.mail_buff_len = mail_len;
+ recv->rev_msg.mail_buff =
+ (unsigned char *)kmalloc(mail_len, GFP_ATOMIC);
+ if (NULL == recv->rev_msg.mail_buff) {
+ loge("recv->rev_msg.mail_buff kmalloc failed.\n");
+ goto ERR;
+ }
+ memset(recv->rev_msg.mail_buff, 0, mail_len);
+
+ ret_mail =
+ mailbox_read_msg_data(mail_handle,
+ (char *)(recv->rev_msg.mail_buff),
+ (unsigned int
+ *)(&(recv->rev_msg.mail_buff_len)));
+
+ if ((ret_mail != MAILBOX_OK) || (recv->rev_msg.mail_buff_len <= 0)) {
+ loge("Empty point or data length error! ret=0x%x, mail_size: %d.\n", (unsigned int)ret_mail, recv->rev_msg.mail_buff_len);
+ goto ERR;
+ }
+
+ logd("ret_mail=%d, mail_buff_len=%d, msgID=0x%x.\n", ret_mail,
+ recv->rev_msg.mail_buff_len,
+ *((unsigned int *)(recv->rev_msg.mail_buff + mail_len -
+ SIZE_CMD_ID)));
+
+ cmd_para =
+ (HIFI_CHN_CMD *) (recv->rev_msg.mail_buff + mail_len - SIZE_CMD_ID);
+ recmsg = (void *)recv->rev_msg.mail_buff;
+ if (HIFI_CHN_SYNC_CMD == cmd_para->cmd_type) {
+ if (s_misc_data.sn == cmd_para->sn) {
+ spin_lock_bh(&s_misc_data.recv_sync_lock);
+ list_add_tail(&recv->recv_node,
+ &recv_sync_work_queue_head);
+ spin_unlock_bh(&s_misc_data.recv_sync_lock);
+ complete(&s_misc_data.completion);
+ goto END;
+ } else {
+ loge("s_misc_data.sn !== cmd_para->sn: %d, %d.\n",
+ s_misc_data.sn, cmd_para->sn);
+ goto ERR;
+ }
+ } else if ((HIFI_CHN_READNOTICE_CMD == cmd_para->cmd_type)
+ && (ACPU_TO_HIFI_ASYNC_CMD == cmd_para->sn)) {
+ if (ID_AUDIO_AP_PLAY_DONE_IND == *((unsigned short *)recmsg)) {
+ /* only mesg ID_AUDIO_AP_PLAY_DONE_IND lock 5s */
+ __pm_wakeup_event(&s_misc_data.update_buff_wakesrc, 5000);
+
+ spin_lock_bh(&s_misc_data.recv_proc_lock);
+ list_add_tail(&recv->recv_node,
+ &recv_proc_work_queue_head);
+ s_misc_data.wait_flag++;
+ spin_unlock_bh(&s_misc_data.recv_proc_lock);
+ wake_up(&s_misc_data.proc_waitq);
+ goto END;
+ }
+
+ if (ID_AUDIO_AP_UPDATE_PCM_BUFF_CMD ==
+ *((unsigned short *)recmsg)) {
+ spin_lock_bh(&s_misc_data.pcm_read_lock);
+ s_misc_data.pcm_read_wait_flag = true;
+ spin_unlock_bh(&s_misc_data.pcm_read_lock);
+ wake_up(&s_misc_data.pcm_read_waitq);
+ }
+
+ if (hifi_misc_local_process(*(unsigned short *)recmsg)) {
+ hifi_misc_mesg_process(recmsg);
+ }
+ } else {
+ loge("unknown msg comed from hifi .\n");
+ }
+
+ ERR:
+ if (recv) {
+ if (recv->rev_msg.mail_buff) {
+ kfree(recv->rev_msg.mail_buff);
+ }
+ kfree(recv);
+ }
+
+ END:
+ OUT_FUNCTION;
+
+ return;
+}
+
+static int hifi_dsp_get_input_param(unsigned int usr_para_size,
+ void *usr_para_addr,
+ unsigned int *krn_para_size,
+ void **krn_para_addr)
+{
+ void *para_in = NULL;
+ unsigned int para_size_in = 0;
+
+ IN_FUNCTION;
+
+ para_size_in = usr_para_size + SIZE_CMD_ID;
+
+ if ((para_size_in > SIZE_LIMIT_PARAM) || (para_size_in <= SIZE_CMD_ID)) {
+ loge("para_size_in(%u) exceed LIMIT(%u/%u).\n", para_size_in,
+ SIZE_CMD_ID, SIZE_LIMIT_PARAM);
+ goto ERR;
+ }
+
+ para_in = kzalloc(para_size_in, GFP_KERNEL);
+ if (NULL == para_in) {
+ loge("kzalloc fail.\n");
+ goto ERR;
+ }
+
+ if (NULL != usr_para_addr) {
+ if (copy_from_user(para_in, usr_para_addr, usr_para_size)) {
+ loge("copy_from_user fail.\n");
+ goto ERR;
+ }
+ } else {
+ loge("usr_para_addr is null no user data.\n");
+ goto ERR;
+ }
+
+ *krn_para_size = para_size_in;
+ *krn_para_addr = para_in;
+
+ hifi_misc_msg_info(*(unsigned short *)para_in);
+
+ OUT_FUNCTION;
+ return OK;
+
+ ERR:
+ if (para_in != NULL) {
+ kfree(para_in);
+ para_in = NULL;
+ }
+
+ OUT_FUNCTION;
+ return ERROR;
+}
+
+static void hifi_dsp_get_input_param_free(void **krn_para_addr)
+{
+ IN_FUNCTION;
+
+ if (*krn_para_addr != NULL) {
+ kfree(*krn_para_addr);
+ *krn_para_addr = NULL;
+ } else {
+ loge("krn_para_addr to free is NULL.\n");
+ }
+
+ OUT_FUNCTION;
+ return;
+}
+
+static int hifi_dsp_get_output_param(unsigned int krn_para_size,
+ void *krn_para_addr,
+ unsigned int *usr_para_size,
+ void __user *usr_para_addr)
+{
+ int ret = OK;
+ void __user *para_to = NULL;
+ unsigned int para_n = 0;
+
+ IN_FUNCTION;
+
+ if (NULL == krn_para_addr) {
+ loge("krn_para_addr is NULL.\n");
+ ret = -EINVAL;
+ goto END;
+ }
+
+ if ((NULL == usr_para_addr) || (NULL == usr_para_size)) {
+ loge("usr_size_p=0x%pK, usr_addr=0x%pK.\n", usr_para_size,
+ usr_para_addr);
+ ret = -EINVAL;
+ goto END;
+ }
+
+ para_to = usr_para_addr;
+ para_n = krn_para_size;
+ if (para_n > SIZE_LIMIT_PARAM) {
+ loge("para_n exceed limit (%d / %d).\n", para_n,
+ SIZE_LIMIT_PARAM);
+ ret = -EINVAL;
+ goto END;
+ }
+
+ if (para_n > *usr_para_size) {
+ loge("para_n exceed usr_size(%d / %d).\n", para_n,
+ *usr_para_size);
+ ret = -EINVAL;
+ goto END;
+ }
+
+ /* Copy data from kernel space to user space
+ to, from, n */
+ ret = copy_to_user(para_to, krn_para_addr, para_n);
+ if (OK != ret) {
+ loge("copy_to_user fail, ret is %d.\n", ret);
+ ret = ERROR;
+ goto END;
+ }
+
+ *usr_para_size = para_n;
+ hifi_misc_msg_info(*(unsigned short *)krn_para_addr);
+
+ END:
+ OUT_FUNCTION;
+ return ret;
+}
+
+static int hifi_dsp_async_cmd(unsigned long arg)
+{
+ int ret = OK;
+ struct misc_io_async_param param;
+ void *para_krn_in = NULL;
+ unsigned int para_krn_size_in = 0;
+ HIFI_CHN_CMD *cmd_para = NULL;
+ void *para_addr_in = NULL;
+
+ IN_FUNCTION;
+
+ if (copy_from_user
+ (&param, (void *)arg, sizeof(struct misc_io_async_param))) {
+ loge("copy_from_user fail.\n");
+ ret = ERROR;
+ goto END;
+ }
+
+ para_addr_in = INT_TO_ADDR(param.para_in_l, param.para_in_h);
+ ret = hifi_dsp_get_input_param(param.para_size_in, para_addr_in,
+ &para_krn_size_in, &para_krn_in);
+ if (OK != ret) {
+ loge("get ret=%d.\n", ret);
+ goto END;
+ }
+ /* add cmd id and sn */
+ cmd_para =
+ (HIFI_CHN_CMD *) (para_krn_in + para_krn_size_in - SIZE_CMD_ID);
+ cmd_para->cmd_type = HIFI_CHN_SYNC_CMD;
+ cmd_para->sn = ACPU_TO_HIFI_ASYNC_CMD;
+
+ ret = hifi_misc_async_write(para_krn_in, para_krn_size_in);
+ if (OK != ret) {
+ loge("async_write ret=%d.\n", ret);
+ goto END;
+ }
+
+ if (ID_AP_AUDIO_PLAY_UPDATE_BUF_CMD == *(unsigned short *)para_krn_in) {
+ __pm_relax(&s_misc_data.update_buff_wakesrc);
+ }
+
+ END:
+ hifi_dsp_get_input_param_free(&para_krn_in);
+ OUT_FUNCTION;
+ return ret;
+}
+
+static int hifi_dsp_sync_cmd(unsigned long arg)
+{
+ int ret = OK;
+ struct misc_io_sync_param param;
+ void *para_krn_in = NULL;
+ unsigned int para_krn_size_in = 0;
+ HIFI_CHN_CMD *cmd_para = NULL;
+ void __user *para_addr_in = NULL;
+ void __user *para_addr_out = NULL;
+ struct recv_request *recv = NULL;
+
+ IN_FUNCTION;
+
+ if (copy_from_user
+ (&param, (void *)arg, sizeof(struct misc_io_sync_param))) {
+ loge("copy_from_user fail.\n");
+ ret = ERROR;
+ goto END;
+ }
+ logd("para_size_in=%d.\n", param.para_size_in);
+
+ para_addr_in = INT_TO_ADDR(param.para_in_l, param.para_in_h);
+ para_addr_out = INT_TO_ADDR(param.para_out_l, param.para_out_h);
+ ret = hifi_dsp_get_input_param(param.para_size_in, para_addr_in,
+ &para_krn_size_in, &para_krn_in);
+ if (OK != ret) {
+ loge("hifi_dsp_get_input_param fail: ret=%d.\n", ret);
+ goto END;
+ }
+
+ /* add cmd id and sn */
+ cmd_para =
+ (HIFI_CHN_CMD *) (para_krn_in + para_krn_size_in - SIZE_CMD_ID);
+ cmd_para->cmd_type = HIFI_CHN_SYNC_CMD;
+
+ cmd_para->sn = s_misc_data.sn;
+
+ ret = hifi_misc_sync_write(para_krn_in, para_krn_size_in);
+ if (OK != ret) {
+ loge("hifi_misc_sync_write ret=%d.\n", ret);
+ goto END;
+ }
+
+ spin_lock_bh(&s_misc_data.recv_sync_lock);
+
+ if (!list_empty(&recv_sync_work_queue_head)) {
+ recv =
+ list_entry(recv_sync_work_queue_head.next,
+ struct recv_request, recv_node);
+ ret =
+ hifi_dsp_get_output_param(recv->rev_msg.mail_buff_len -
+ SIZE_CMD_ID,
+ recv->rev_msg.mail_buff,
+ &param.para_size_out,
+ para_addr_out);
+ if (OK != ret) {
+ loge("get_out ret=%d.\n", ret);
+ }
+
+ list_del(&recv->recv_node);
+ kfree(recv->rev_msg.mail_buff);
+ kfree(recv);
+ recv = NULL;
+ }
+ spin_unlock_bh(&s_misc_data.recv_sync_lock);
+
+ if (copy_to_user
+ ((void *)arg, &param, sizeof(struct misc_io_sync_param))) {
+ loge("copy_to_user fail.\n");
+ ret = COPYFAIL;
+ goto END;
+ }
+
+ END:
+ hifi_dsp_get_input_param_free(&para_krn_in);
+
+ OUT_FUNCTION;
+ return ret;
+}
+
+static int hifi_dsp_get_phys_cmd(unsigned long arg)
+{
+ int ret = OK;
+ struct misc_io_get_phys_param param;
+ unsigned long para_addr_in = 0;
+
+ IN_FUNCTION;
+
+ if (copy_from_user
+ (&param, (void *)arg, sizeof(struct misc_io_get_phys_param))) {
+ loge("copy_from_user fail.\n");
+ OUT_FUNCTION;
+ return ERROR;
+ }
+
+ switch (param.flag) {
+ case 0:
+ para_addr_in =
+ (unsigned long)(s_misc_data.hifi_priv_base_phy -
+ HIFI_UNSEC_BASE_ADDR);
+ param.phys_addr_l = GET_LOW32(para_addr_in);
+ param.phys_addr_h = GET_HIG32(para_addr_in);
+ logd("para_addr_in = %ld.\n", para_addr_in);
+ break;
+
+ default:
+ ret = ERROR;
+ loge("invalid flag=%d.\n", param.flag);
+ break;
+ }
+
+ if (copy_to_user
+ ((void *)arg, &param, sizeof(struct misc_io_get_phys_param))) {
+ loge("copy_to_user fail.\n");
+ ret = ERROR;
+ }
+
+ OUT_FUNCTION;
+ return ret;
+}
+
+static int hifi_dsp_senddata_sync_cmd(unsigned long arg)
+{
+ loge("this cmd is not supported by now .\n");
+ return ERROR;
+}
+
+static int hifi_dsp_wakeup_read_thread(unsigned long arg)
+{
+ struct recv_request *recv = NULL;
+ struct misc_recmsg_param *recmsg = NULL;
+ struct list_head *pos = NULL;
+ unsigned int node_count = 0;
+ unsigned int wake_cmd = (unsigned int)arg;
+
+ list_for_each(pos, &recv_proc_work_queue_head) {
+ node_count++;
+ }
+
+ if (node_count > MAX_NODE_COUNT) {
+ loge("too much work left in proc_work_queue, node count:%u\n",
+ node_count);
+ return -EBUSY;
+ }
+
+ recv =
+ (struct recv_request *)kmalloc(sizeof(struct recv_request),
+ GFP_ATOMIC);
+ if (NULL == recv) {
+ loge("recv kmalloc failed.\n");
+ return -ENOMEM;
+ }
+ memset(recv, 0, sizeof(struct recv_request));
+
+ __pm_wakeup_event(&s_misc_data.hifi_misc_wakesrc, 1000);
+
+ recv->rev_msg.mail_buff_len =
+ sizeof(struct misc_recmsg_param) + SIZE_CMD_ID;
+
+ recv->rev_msg.mail_buff =
+ (unsigned char *)kmalloc(recv->rev_msg.mail_buff_len, GFP_ATOMIC);
+ if (NULL == recv->rev_msg.mail_buff) {
+ kfree(recv);
+ loge("recv->rev_msg.mail_buff kmalloc failed.\n");
+ return -ENOMEM;
+ }
+ memset(recv->rev_msg.mail_buff, 0, recv->rev_msg.mail_buff_len);
+
+ recmsg = (struct misc_recmsg_param *)recv->rev_msg.mail_buff;
+ recmsg->msgID = ID_AUDIO_AP_PLAY_DONE_IND;
+ recmsg->playStatus = (unsigned short)wake_cmd;
+
+ spin_lock_bh(&s_misc_data.recv_proc_lock);
+ list_add_tail(&recv->recv_node, &recv_proc_work_queue_head);
+ s_misc_data.wait_flag++;
+ spin_unlock_bh(&s_misc_data.recv_proc_lock);
+
+ wake_up(&s_misc_data.proc_waitq);
+
+ return OK;
+}
+
+static int hifi_dsp_wakeup_pcm_read_thread(unsigned long arg)
+{
+ (void)arg;
+
+ spin_lock_bh(&s_misc_data.pcm_read_lock);
+ s_misc_data.pcm_read_wait_flag = true;
+ spin_unlock_bh(&s_misc_data.pcm_read_lock);
+
+ wake_up(&s_misc_data.pcm_read_waitq);
+
+ return OK;
+}
+
+static int hifi_dsp_write_param(unsigned long arg)
+{
+ int ret = OK;
+ void *hifi_param_vir_addr = NULL;
+ void *para_addr_in = NULL;
+ void *para_addr_out = NULL;
+ struct misc_io_sync_param para;
+
+ IN_FUNCTION;
+
+ if (copy_from_user
+ (&para, (void *)arg, sizeof(struct misc_io_sync_param))) {
+ loge("copy_from_user fail.\n");
+ ret = ERROR;
+ goto error1;
+ }
+
+ para_addr_in = INT_TO_ADDR(para.para_in_l, para.para_in_h);
+ para_addr_out = INT_TO_ADDR(para.para_out_l, para.para_out_h);
+
+ hifi_param_vir_addr =
+ (unsigned char *)(s_misc_data.hifi_priv_base_virt +
+ (HIFI_AP_NV_DATA_ADDR - HIFI_UNSEC_BASE_ADDR));
+
+ logd("hifi_param_vir_addr = 0x%pK. (*hifi_param_vir_addr) = 0x%x\n",
+ hifi_param_vir_addr, (*(int *)hifi_param_vir_addr));
+
+ logd("user addr = 0x%pK, size = %d \n", para_addr_in,
+ para.para_size_in);
+
+ if (para.para_size_in != NVPARAM_TOTAL_SIZE) {
+ loge("the para_size_in(%u) is not equal to NVPARAM_TOTAL_SIZE(%u). \n", para.para_size_in, (unsigned int)(NVPARAM_TOTAL_SIZE));
+ ret = ERROR;
+ goto error1;
+ }
+
+ ret =
+ copy_from_user(hifi_param_vir_addr, (void __user *)para_addr_in,
+ para.para_size_in);
+
+ if (ret != 0) {
+ loge("copy data to hifi error! ret = %d.\n", ret);
+ ret = ERROR;
+ }
+
+ if (para.para_size_out != sizeof(ret)) {
+ loge("the para_size_out(%u) is not equal to sizeof(ret)(%zu) \n", para.para_size_out, sizeof(ret));
+ ret = ERROR;
+ goto error1;
+ }
+
+ ret = copy_to_user((void __user *)para_addr_out, &ret, sizeof(ret));
+ if (ret) {
+ loge("copy data to user fail! ret = %d.\n", ret);
+ ret = ERROR;
+ }
+
+ error1:
+ OUT_FUNCTION;
+ return ret;
+}
+
+static int hifi_misc_open(struct inode *finode, struct file *fd)
+{
+ logi("open device.\n");
+ load_hifi_img_by_misc();
+ return OK;
+}
+
+static int hifi_misc_release(struct inode *finode, struct file *fd)
+{
+ logi("close device.\n");
+ return OK;
+}
+
+static long hifi_misc_ioctl(struct file *fd, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = OK;
+ void __user *data32 = (void __user *)arg;
+
+ IN_FUNCTION;
+
+ if (!(void __user *)arg) {
+ loge("Input buff is NULL.\n");
+ OUT_FUNCTION;
+ return (long)-EINVAL;
+ }
+
+ if (!hifi_is_loaded()) {
+ loge("hifi isn't loaded.\n");
+ return (long)-EINVAL;
+ }
+
+ switch (cmd) {
+
+ case HIFI_MISC_IOCTL_PCM_GAIN:
+ {
+ struct misc_io_pcm_buf_param buf;
+
+ logd("ioctl: HIFI_MISC_IOCTL_PCM_GAIN.\n");
+ if (copy_from_user(&buf, data32, sizeof(buf))) {
+ ret = -EINVAL;
+ logd("HIFI_MISC_IOCTL_PCM_GAIN: couldn't copy misc_io_pcm_buf_param\n");
+ break;
+ }
+ send_pcm_data_to_dsp((void *)buf.buf, buf.buf_size);
+ }
+ break;
+
+ case HIFI_MISC_IOCTL_XAF_IPC_MSG_SEND:
+ {
+ struct xf_proxy_msg xaf_msg;
+
+ logi("ioctl: HIFI_MISC_IOCTL_XAF_IPC_MSG_SEND.\n");
+ if (copy_from_user(&xaf_msg, data32, sizeof(xaf_msg))) {
+ ret = -EINVAL;
+ logd("HIFI_MISC_IOCTL_XAF_IPC_MSG_SEND: couldn't copy xf_proxy_msg\n");
+ break;
+ }
+ send_xaf_ipc_msg_to_dsp(&xaf_msg);
+ }
+ break;
+
+ case HIFI_MISC_IOCTL_XAF_IPC_MSG_RECV:
+ {
+ struct xf_proxy_msg xaf_msg;
+
+ read_xaf_ipc_msg_from_dsp(&xaf_msg, xaf_msg.length);
+
+ logi("ioctl: HIFI_MISC_IOCTL_XAF_IPC_MSG_RECV.\n");
+ if (copy_to_user(data32, &xaf_msg, sizeof(xaf_msg))) {
+ ret = -EINVAL;
+ logd("HIFI_MISC_IOCTL_XAF_IPC_MSG_RECV: couldn't copy xf_proxy_msg\n");
+ }
+ }
+ break;
+
+ case HIFI_MISC_IOCTL_ASYNCMSG:
+ logd("ioctl: HIFI_MISC_IOCTL_ASYNCMSG.\n");
+ ret = hifi_dsp_async_cmd((unsigned long)data32);
+ break;
+
+ case HIFI_MISC_IOCTL_SYNCMSG:
+ logd("ioctl: HIFI_MISC_IOCTL_SYNCMSG.\n");
+ ret = down_interruptible(&s_misc_sem);
+ if (ret != 0) {
+ loge("SYNCMSG wake up by other irq err:%d.\n", ret);
+ goto out;
+ }
+ ret = hifi_dsp_sync_cmd((unsigned long)data32);
+ up(&s_misc_sem);
+ break;
+
+ case HIFI_MISC_IOCTL_SENDDATA_SYNC:
+ logd("ioctl: HIFI_MISC_IOCTL_SENDDATA_SYNC.\n");
+ ret = down_interruptible(&s_misc_sem);
+ if (ret != 0) {
+ loge("SENDDATA_SYNC wake up by other irq err:%d.\n", ret);
+ goto out;
+ }
+ ret = hifi_dsp_senddata_sync_cmd((unsigned long)data32); /*not used by now*/
+ up(&s_misc_sem);
+ break;
+
+ case HIFI_MISC_IOCTL_GET_PHYS:
+ logd("ioctl: HIFI_MISC_IOCTL_GET_PHYS.\n");
+ ret = hifi_dsp_get_phys_cmd((unsigned long)data32);
+ break;
+
+ case HIFI_MISC_IOCTL_WRITE_PARAMS: /* write algo param to hifi*/
+ ret = hifi_dsp_write_param((unsigned long)data32);
+ break;
+
+ case HIFI_MISC_IOCTL_DUMP_HIFI:
+ logi("ioctl: HIFI_MISC_IOCTL_DUMP_HIFI.\n");
+ ret = hifi_dsp_dump_hifi((void __user *)arg);
+ break;
+
+ case HIFI_MISC_IOCTL_DISPLAY_MSG:
+ logi("ioctl: HIFI_MISC_IOCTL_DISPLAY_MSG.\n");
+ ret = hifi_get_dmesg((void __user *)arg);
+ break;
+
+ case HIFI_MISC_IOCTL_GET_VOICE_BSD_PARAM:
+ logi("ioctl:HIFI_MISC_IOCTL_GET_VOICE_BSD_PARAM.\n");
+ ret = hifi_om_get_voice_bsd_param(data32);
+ break;
+ case HIFI_MISC_IOCTL_WAKEUP_THREAD:
+ logi("ioctl: HIFI_MISC_IOCTL_WAKEUP_THREAD.\n");
+ ret = hifi_dsp_wakeup_read_thread((unsigned long)data32);
+ break;
+
+ case HIFI_MISC_IOCTL_WAKEUP_PCM_READ_THREAD:
+ logi("ioctl: HIFI_MISC_IOCTL_WAKEUP_PCM_READ_THREAD.\n");
+ ret = hifi_dsp_wakeup_pcm_read_thread((unsigned long)data32);
+ break;
+ default:
+ ret = (long)ERROR;
+ loge("ioctl: Invalid CMD =0x%x.\n", (unsigned int)cmd);
+ break;
+ }
+ out:
+ OUT_FUNCTION;
+ return (long)ret;
+}
+
+static long hifi_misc_ioctl32(struct file *fd,
+ unsigned int cmd, unsigned long arg)
+{
+ void *user_ptr = compat_ptr(arg);
+ return hifi_misc_ioctl(fd, cmd, (unsigned long)user_ptr);
+}
+
+static int hifi_misc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int ret = OK;
+ unsigned long phys_page_addr = 0;
+ unsigned long size = 0;
+ IN_FUNCTION;
+
+ if (NULL == (void *)vma) {
+ logd("input error: vma is NULL\n");
+ return ERROR;
+ }
+
+ phys_page_addr = (u64) s_misc_data.hifi_priv_base_phy >> PAGE_SHIFT;
+ size = ((unsigned long)vma->vm_end - (unsigned long)vma->vm_start);
+ logd("vma=0x%pK.\n", vma);
+ logd("size=%ld, vma->vm_start=%ld, end=%ld.\n",
+ ((unsigned long)vma->vm_end - (unsigned long)vma->vm_start),
+ (unsigned long)vma->vm_start, (unsigned long)vma->vm_end);
+ logd("phys_page_addr=%ld.\n", (unsigned long)phys_page_addr);
+
+ vma->vm_page_prot = PAGE_SHARED;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ if (size > HIFI_MUSIC_DATA_SIZE) {
+ size = HIFI_MUSIC_DATA_SIZE;
+ }
+
+ ret = remap_pfn_range(vma,
+ vma->vm_start,
+ phys_page_addr, size, vma->vm_page_prot);
+ if (ret != 0) {
+ loge("remap_pfn_range ret=%d\n", ret);
+ return ERROR;
+ }
+
+ OUT_FUNCTION;
+ return ret;
+}
+
+static unsigned int hifi_misc_poll(struct file *filp, poll_table *wait)
+{
+ unsigned int mask = 0;
+
+ logi("Enter hifi_misc_poll.\n");
+ /*put the queue into poll_table*/
+ poll_wait(filp, &(s_misc_data.xaf_waitq), wait);
+
+ if (hasData) {
+ mask |= POLLIN | POLLRDNORM;
+ logi("notify read process\n");
+ }
+
+ return mask;
+}
+
+static ssize_t hifi_misc_proc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ static unsigned int len;
+ static int retry_cnt;
+ static char *to_user_buf;
+ int ret = -EAGAIN;
+ struct recv_request *recv = NULL;
+ struct misc_recmsg_param *recmsg = NULL;
+
+ IN_FUNCTION;
+
+ if (NULL == buf) {
+ loge("input error: buf is NULL\n");
+ return -EINVAL;
+ }
+
+ if (!hifi_is_loaded()) {
+ loge("hifi isn't loaded.\n");
+ return -ENXIO;
+ }
+
+ if (to_user_buf)
+ goto RETRY;
+
+ if (list_empty(&recv_proc_work_queue_head)) {
+ /* wait wake_up() has to be called after change the result of the wait condition. */
+ ret =
+ wait_event_interruptible(s_misc_data.proc_waitq,
+ s_misc_data.wait_flag != 0);
+ if (ret) {
+ logi("wait interrupted by a signal, ret: %d\n", ret);
+ return ret;
+ }
+ logi("wait_event_interruptible success.\n");
+ }
+
+ /* locked recv_proc_lock */
+ spin_lock_bh(&s_misc_data.recv_proc_lock);
+
+ if (likely(s_misc_data.wait_flag > 0)) {
+ s_misc_data.wait_flag--;
+ }
+
+ if (!list_empty(&recv_proc_work_queue_head)) {
+ recv =
+ list_entry(recv_proc_work_queue_head.next,
+ struct recv_request, recv_node);
+ len = recv->rev_msg.mail_buff_len;
+ recmsg = (struct misc_recmsg_param *)recv->rev_msg.mail_buff;
+
+ if (unlikely
+ (len >= MAIL_LEN_MAX || len <= SIZE_CMD_ID || !recmsg)) {
+ loge("buff size is invalid: %u(>= 512 or <= 8) or recmsg is null\n", len);
+ ret = -EINVAL;
+ } else {
+ len -= SIZE_CMD_ID;
+ to_user_buf = kzalloc(len, GFP_ATOMIC);
+ if (!to_user_buf) {
+ loge("kzalloc failed\n");
+ ret = -ENOMEM;
+ goto ERR;
+ }
+ memcpy(to_user_buf, recv->rev_msg.mail_buff, len);
+ }
+
+ list_del(&recv->recv_node);
+ kfree(recv->rev_msg.mail_buff);
+ kfree(recv);
+ recv = NULL;
+ } else {
+ loge("recv proc list is empty.\n");
+ }
+
+ /* release recv_proc_lock */
+ spin_unlock_bh(&s_misc_data.recv_proc_lock);
+
+ RETRY:
+ if (to_user_buf) {
+ recmsg = (struct misc_recmsg_param *)to_user_buf;
+ logi("msgid: 0x%x, len: %u, "
+ "play status : %d (0 - done normal, 1 - done complete, 2 - done abnormal, 3 - reset)\n",
+ recmsg->msgID, len, recmsg->playStatus);
+
+ if (len > count) {
+ loge("copy len[%u] bigger than count[%zu]\n", len,
+ count);
+ kzfree(to_user_buf);
+ to_user_buf = NULL;
+ return -EINVAL;
+ }
+
+ ret = (int)copy_to_user(buf, to_user_buf, len);
+ if (ret != 0) {
+ loge("copy to user fail, ret : %d len : %u, retry_cnt: %d\n", ret, len, retry_cnt);
+ retry_cnt++;
+ ret = -EFAULT;
+ }
+
+ if (0 == ret || retry_cnt >= RETRY_COUNT) {
+ kzfree(to_user_buf);
+ to_user_buf = NULL;
+ retry_cnt = 0;
+ len = 0;
+ }
+ }
+
+ OUT_FUNCTION;
+ return ret;
+
+ ERR:
+ spin_unlock_bh(&s_misc_data.recv_proc_lock);
+ return ret;
+
+}
+
+static ssize_t hifi_misc_pcm_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret = 0;
+
+ (void)file;
+ (void)ppos;
+
+ if (!hifi_is_loaded()) {
+ loge("hifi isn't loaded\n");
+ return -ENXIO;
+ }
+
+ if (!buf) {
+ loge("param err, buf is null\n");
+ return -EINVAL;
+ }
+
+ if (0 == count || count > HIFI_PCM_UPLOAD_BUFFER_SIZE) {
+ loge("param err, count = %zu\n", count);
+ return -EINVAL;
+ }
+
+ /* wait_event_interruptible(wq, condition);
+ * The function will return -ERESTARTSYS if it was interrupted by a signal,
+ * and 0 if condition evaluated to true.
+ */
+ ret = wait_event_interruptible(s_misc_data.pcm_read_waitq, s_misc_data.pcm_read_wait_flag); /*lint !e40 !e578 !e712 !e774 !e845 */
+ if (ret) {
+ loge("pcm read wait interrupted, 0x%x\n", ret);
+ return -EBUSY;
+ }
+
+ spin_lock_bh(&s_misc_data.pcm_read_lock);
+
+ s_misc_data.pcm_read_wait_flag = false;
+
+ spin_unlock_bh(&s_misc_data.pcm_read_lock);
+
+ if (copy_to_user
+ (buf,
+ s_misc_data.hifi_priv_base_virt + (HIFI_PCM_UPLOAD_BUFFER_ADDR -
+ HIFI_UNSEC_BASE_ADDR), count)) {
+ loge("pcm read copy_to_user fail\n");
+ return -EFAULT;
+ }
+
+ return (ssize_t) count;
+}
+
+static const struct file_operations hifi_misc_fops = {
+ .owner = THIS_MODULE,
+ .open = hifi_misc_open,
+ .release = hifi_misc_release,
+ .unlocked_ioctl = hifi_misc_ioctl,
+ .compat_ioctl = hifi_misc_ioctl32,
+ .mmap = hifi_misc_mmap,
+ .poll = hifi_misc_poll,
+};
+
+static struct miscdevice hifi_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "hifi_misc",
+ .fops = &hifi_misc_fops,
+};
+
+static const struct file_operations hifi_proc_fops = {
+ .owner = THIS_MODULE,
+ .read = hifi_misc_proc_read,
+};
+
+static const struct file_operations hifi_pcm_read_fops = {
+ .owner = THIS_MODULE,
+ .read = hifi_misc_pcm_read,
+};
+
+static void hifi_misc_proc_init(void)
+{
+ struct proc_dir_entry *hifi_misc_dir;
+ struct proc_dir_entry *entry_hifi;
+ struct proc_dir_entry *entry_hifi_pcm_read;
+
+ hifi_misc_dir = proc_mkdir(FILE_PROC_DIRECTORY, NULL);
+ if (hifi_misc_dir == NULL) {
+ loge("Unable to create /proc/hifidsp directory.\n");
+ return;
+ }
+
+ /* Creating read/write "status" entry */
+ entry_hifi =
+ proc_create("hifi", S_IRUSR | S_IRGRP | S_IROTH, hifi_misc_dir,
+ &hifi_proc_fops);
+ if (!entry_hifi) {
+ loge("Unable to create /proc/hifidsp/hifi entry.\n");
+ }
+
+ entry_hifi_pcm_read =
+ proc_create("hifi_pcm_read", S_IRUSR | S_IRGRP | S_IROTH,
+ hifi_misc_dir, &hifi_pcm_read_fops);
+ if (!entry_hifi_pcm_read) {
+ loge("Unable to create /proc/hifidsp/hifi_pcm_read entry.\n");
+ }
+
+ if (!entry_hifi && !entry_hifi_pcm_read) {
+ /* void remove_proc_entry(const char *name, struct proc_dir_entry *parent);
+ * remove a /proc entry and free it if it's not currently in use.
+ */
+ remove_proc_entry(FILE_PROC_DIRECTORY, NULL);
+ logi("remove /proc/hifidsp directory.\n");
+ }
+
+ return;
+}
+
+static int hifi_sr_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ logi("resume +.\n");
+ atomic_set(&s_hifi_in_suspend, 0);
+ logi("resume -.\n");
+ break;
+
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ logi("suspend +.\n");
+ atomic_set(&s_hifi_in_suspend, 1);
+ while (true) {
+ if (atomic_read(&s_hifi_in_saving)) {
+ msleep(100);
+ } else {
+ break;
+ }
+ }
+ logi("suspend -.\n");
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static int hifi_reboot_notifier(struct notifier_block *nb,
+ unsigned long foo, void *bar)
+{
+ logi("reboot +.\n");
+ atomic_set(&s_hifi_in_suspend, 1);
+ while (true) {
+ if (atomic_read(&s_hifi_in_saving)) {
+ msleep(100);
+ } else {
+ break;
+ }
+ }
+ logi("reboot -.\n");
+
+ return 0;
+}
+
+void hifi_get_log_signal(void)
+{
+ while (true) {
+ if (atomic_read(&s_hifi_in_suspend)) {
+ msleep(100);
+ } else {
+ atomic_set(&s_hifi_in_saving, 1);
+ break;
+ }
+ }
+ return;
+}
+
+void hifi_release_log_signal(void)
+{
+ atomic_set(&s_hifi_in_saving, 0);
+}
+
+int hifi_send_msg(unsigned int mailcode, void *data, unsigned int length)
+{
+ if (hifi_is_loaded()) {
+ return (unsigned int)mailbox_send_msg(mailcode, data, length);
+ }
+ return -1;
+}
+
+EXPORT_SYMBOL(hifi_send_msg);
+
+/*extern void hi6402_3mic_audio_clk(int mode);*/
+/*extern void hi6402_3mic_fade_out(void);*/
+/* 3mic add for reset hi6402 audio clk */
+void reset_audio_clk_work(struct work_struct *work)
+{
+ struct common_hifi_cmd cmd_cnf;
+ struct common_hifi_cmd tmp_mesg;
+ struct common_hifi_cmd *mesg = &(tmp_mesg);
+ struct dp_clk_request *dp_clk_cmd = NULL;
+ unsigned short msg_id = 0;
+
+ while (!list_empty(&s_misc_data.multi_mic_ctrl.cmd_queue)) {
+ memset(&cmd_cnf, 0, sizeof(struct common_hifi_cmd));
+ memset(mesg, 0, sizeof(struct common_hifi_cmd));
+
+ spin_lock_bh(&s_misc_data.multi_mic_ctrl.cmd_lock);
+
+ if (!list_empty(&s_misc_data.multi_mic_ctrl.cmd_queue)) {
+ dp_clk_cmd =
+ list_entry(s_misc_data.multi_mic_ctrl.cmd_queue.
+ next, struct dp_clk_request,
+ dp_clk_node);
+
+ if (NULL == dp_clk_cmd) {
+ loge("request is NULL.\n");
+ spin_unlock_bh(&s_misc_data.multi_mic_ctrl.
+ cmd_lock);
+ return;
+ } else {
+ memcpy(mesg, &(dp_clk_cmd->dp_clk_msg),
+ sizeof(struct common_hifi_cmd));
+ }
+
+ list_del(&dp_clk_cmd->dp_clk_node);
+ kfree(dp_clk_cmd);
+ dp_clk_cmd = NULL;
+ } else {
+ logw("list is empty!\n");
+ spin_unlock_bh(&s_misc_data.multi_mic_ctrl.cmd_lock);
+ return;
+ }
+ spin_unlock_bh(&s_misc_data.multi_mic_ctrl.cmd_lock);
+
+ msg_id = mesg->msg_id;
+
+ logi("%s++,mesg[0x%x],value[0x%x],reserve[0x%x]\n",
+ __FUNCTION__, mesg->msg_id, mesg->value, mesg->reserve);
+
+ switch (msg_id) {
+ case ID_AUDIO_AP_DP_CLK_EN_IND:
+ {
+ int audio_clk_state =
+ s_misc_data.multi_mic_ctrl.audio_clk_state;
+
+ /* reset hi6402 audio dp clk */
+ if ((audio_clk_state & HI6402_DP_CLK_ON) !=
+ (mesg->value & HI6402_DP_CLK_ON)) {
+ /* hi6402_3mic_audio_clk(mesg->value); */
+ s_misc_data.multi_mic_ctrl.
+ audio_clk_state = mesg->value;
+ }
+
+ /* send ack to hifi */
+ if ((mesg->value & HI6402_DP_CLK_ON) ==
+ HI6402_DP_CLK_ON) {
+ cmd_cnf.reserve = HI6402_DP_CLK_ON;
+ } else {
+ cmd_cnf.reserve = HI6402_DP_CLK_OFF;
+ }
+
+ cmd_cnf.msg_id = ID_AP_AUDIO_DP_CLK_STATE_IND;
+ cmd_cnf.value = mesg->reserve;
+ hifi_misc_send_hifi_msg_async(&cmd_cnf);
+ }
+ break;
+
+ case ID_AUDIO_AP_FADE_OUT_REQ:
+ {
+ /*hi6402_3mic_fade_out(); */
+ cmd_cnf.msg_id = ID_AP_AUDIO_FADE_OUT_IND;
+ cmd_cnf.value = mesg->value;
+ hifi_misc_send_hifi_msg_async(&cmd_cnf);
+
+ }
+ break;
+
+ default:
+ {
+ loge("error msg:0x%x\n", msg_id);
+ }
+ break;
+ }
+ }
+
+ if (!list_empty(&s_misc_data.multi_mic_ctrl.cmd_queue)) {
+ pr_info("%s have other cmd in list\n", __FUNCTION__);
+ }
+
+ logi("%s--\n", __FUNCTION__);
+}
+
+static int hifi_misc_probe(struct platform_device *pdev)
+{
+ int ret = OK;
+
+ IN_FUNCTION;
+
+ memset(&s_misc_data, 0, sizeof(struct hifi_misc_priv));
+
+ s_misc_data.dev = &pdev->dev;
+
+ /* Register to get PM events */
+ s_hifi_sr_nb.notifier_call = hifi_sr_event;
+ s_hifi_sr_nb.priority = -1;
+ (void)register_pm_notifier(&s_hifi_sr_nb);
+
+ s_hifi_reboot_nb.notifier_call = hifi_reboot_notifier;
+ s_hifi_reboot_nb.priority = -1;
+ (void)register_reboot_notifier(&s_hifi_reboot_nb);
+
+ s_misc_data.hifi_priv_base_virt =
+ (unsigned char *)ioremap_wc(HIFI_UNSEC_BASE_ADDR,
+ HIFI_UNSEC_REGION_SIZE);
+
+ if (NULL == s_misc_data.hifi_priv_base_virt) {
+ pr_err("hifi ioremap_wc error.\n");
+ goto ERR;
+ }
+ s_misc_data.hifi_priv_base_phy =
+ (unsigned char *)HIFI_MUSIC_DATA_LOCATION;
+
+ hifi_om_init(pdev, s_misc_data.hifi_priv_base_virt,
+ s_misc_data.hifi_priv_base_phy);
+
+ pr_err("hifi pdev name[%s].\n", pdev->name);
+
+ ret = misc_register(&hifi_misc_device);
+ if (OK != ret) {
+ loge("hifi misc device register fail,ERROR is %d.\n", ret);
+ goto ERR;
+ }
+
+ hifi_misc_proc_init();
+
+ spin_lock_init(&s_misc_data.recv_sync_lock);
+ spin_lock_init(&s_misc_data.recv_proc_lock);
+ spin_lock_init(&s_misc_data.pcm_read_lock);
+
+ init_completion(&s_misc_data.completion);
+
+ init_waitqueue_head(&s_misc_data.proc_waitq);
+ s_misc_data.wait_flag = 0;
+
+ /*xaf wait*/
+ init_waitqueue_head(&s_misc_data.xaf_waitq);
+
+ init_waitqueue_head(&s_misc_data.pcm_read_waitq);
+ s_misc_data.pcm_read_wait_flag = false;
+
+ s_misc_data.sn = 0;
+
+ wakeup_source_init(&s_misc_data.hifi_misc_wakesrc, "hifi_wakelock");
+ wakeup_source_init(&s_misc_data.update_buff_wakesrc, "update_buff_wakelock");
+
+ ret = DRV_IPCIntInit();
+ if (OK != ret) {
+ loge("hifi ipc init fail.\n");
+ goto ERR;
+ }
+ ret = (int)mailbox_init();
+ if (OK != ret) {
+ loge("hifi mailbox init fail.\n");
+ goto ERR;
+ }
+
+ /* init 3mic reset clk workqueue */
+ /* 3mic add for reset hi6402 audio dp clk
+ * init 3mic reset clk workqueue
+ */
+ s_misc_data.multi_mic_ctrl.reset_audio_dp_clk_wq =
+ create_singlethread_workqueue("multi_mic_reset_clk_wq");
+ if (!(s_misc_data.multi_mic_ctrl.reset_audio_dp_clk_wq)) {
+ pr_err("%s(%u) : workqueue create failed", __FUNCTION__,
+ __LINE__);
+ goto ERR;
+ }
+ INIT_WORK(&s_misc_data.multi_mic_ctrl.reset_audio_dp_clk_work,
+ reset_audio_clk_work);
+ s_misc_data.multi_mic_ctrl.audio_clk_state = HI6402_DP_CLK_ON;
+ INIT_LIST_HEAD(&s_misc_data.multi_mic_ctrl.cmd_queue);
+ spin_lock_init(&(s_misc_data.multi_mic_ctrl.cmd_lock));
+ ret =
+ mailbox_reg_msg_cb(MAILBOX_MAILCODE_HIFI_TO_ACPU_MISC,
+ (mb_msg_cb) hifi_misc_handle_mail, NULL);
+
+ if (OK != ret) {
+ loge("hifi mailbox handle func register fail.\n");
+ goto ERR;
+ }
+
+ /*Register for interrupt from DSP*/
+ ap_ipc_int_init();
+
+ OUT_FUNCTION;
+ return OK;
+
+ ERR:
+ hifi_om_deinit(pdev);
+
+ if (NULL != s_misc_data.hifi_priv_base_virt) {
+ iounmap(s_misc_data.hifi_priv_base_virt);
+ s_misc_data.hifi_priv_base_virt = NULL;
+ }
+
+ if (s_misc_data.multi_mic_ctrl.reset_audio_dp_clk_wq) {
+ flush_workqueue(s_misc_data.multi_mic_ctrl.
+ reset_audio_dp_clk_wq);
+ destroy_workqueue(s_misc_data.multi_mic_ctrl.
+ reset_audio_dp_clk_wq);
+ }
+
+ unregister_pm_notifier(&s_hifi_sr_nb);
+ unregister_reboot_notifier(&s_hifi_reboot_nb);
+
+ (void)misc_deregister(&hifi_misc_device);
+
+ OUT_FUNCTION;
+ return ERROR;
+}
+
+static int hifi_misc_remove(struct platform_device *pdev)
+{
+ IN_FUNCTION;
+
+ hifi_om_deinit(pdev);
+
+ if (NULL != s_misc_data.hifi_priv_base_virt) {
+ iounmap(s_misc_data.hifi_priv_base_virt);
+ s_misc_data.hifi_priv_base_virt = NULL;
+ }
+
+ /* Unregister for PM events */
+ unregister_pm_notifier(&s_hifi_sr_nb);
+ unregister_reboot_notifier(&s_hifi_reboot_nb);
+
+ /* misc deregister */
+ (void)misc_deregister(&hifi_misc_device);
+
+ /* wake lock destroy */
+ wakeup_source_trash(&s_misc_data.hifi_misc_wakesrc);
+ wakeup_source_trash(&s_misc_data.update_buff_wakesrc);
+
+ OUT_FUNCTION;
+ return OK;
+}
+
+static const struct of_device_id hifi_match_table[] = {
+ {
+ .compatible = DTS_COMP_HIFIDSP_NAME,
+ .data = NULL,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, hifi_match_table);
+
+static struct platform_driver hifi_misc_driver = {
+ .driver = {
+ .name = "hifi_dsp_misc",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(hifi_match_table),
+ },
+ .probe = hifi_misc_probe,
+ .remove = hifi_misc_remove,
+};
+
+module_platform_driver(hifi_misc_driver);
+
+MODULE_DESCRIPTION("hifi driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hisi/hifi_dsp/hifi_lpp.h b/drivers/hisi/hifi_dsp/hifi_lpp.h
new file mode 100644
index 000000000000..caea69d4c25e
--- /dev/null
+++ b/drivers/hisi/hifi_dsp/hifi_lpp.h
@@ -0,0 +1,351 @@
+/*
+ * hifi misc driver.
+ *
+ * Copyright (c) 2013 Hisilicon Technologies CO., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *
+ * Modifications made by Cadence Design Systems, Inc. 06/21/2017
+ * Copyright (C) 2017 Cadence Design Systems, Inc.All rights reserved worldwide.
+ *
+ */
+
+#ifndef __HIFI_LPP_H__
+#define __HIFI_LPP_H__
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif
+
+#include <linux/list.h>
+#include "../ap/platform/hi3660/global_ddr_map.h"
+
+/* mailbox mail_len max */
+#define MAIL_LEN_MAX (512)
+
+#ifndef OK
+#define OK 0
+#endif
+
+#define ERROR (-1)
+#define BUSY (-2)
+#define NOMEM (-3)
+#define INVAILD (-4)
+#define COPYFAIL (-0xFFF)
+
+#ifndef CLT_VOICE
+#define SIZE_LIMIT_PARAM (256)
+#else
+#define SIZE_LIMIT_PARAM (512)
+#endif
+
+#define NVPARAM_COUNT 400 /*HIFI NV size is 400 */
+#define NVPARAM_NUMBER 258 /*256+2, nv_data(256) + nv_id(2) */
+#define NVPARAM_START 2 /*head protect_number 0x5a5a5a5a */
+#define NVPARAM_TAIL 2 /*tail protect_number 0x5a5a5a5a */
+#define NVPARAM_TOTAL_SIZE ((NVPARAM_NUMBER * NVPARAM_COUNT + NVPARAM_START + NVPARAM_TAIL) * sizeof(unsigned short))
+
+#ifdef CONFIG_HISI_FAMA
+#define HIFI_UNSEC_BASE_ADDR (HISI_RESERVED_HIFI_DATA_PHYMEM_BASE_FAMA)
+#else
+#define HIFI_UNSEC_BASE_ADDR (HISI_RESERVED_HIFI_DATA_PHYMEM_BASE)
+#endif
+
+#ifdef CONFIG_HISI_FAMA
+#define HIFI_SEC_BASE_ADDR (HISI_RESERVED_HIFI_PHYMEM_BASE_FAMA)
+#else
+#define HIFI_SEC_BASE_ADDR (HISI_RESERVED_HIFI_PHYMEM_BASE)
+#endif
+
+/** for chicago only **/
+/**Non Secure 3.5M **/
+/* |0x8B300000|0x8B432000|0x8B532000|0x8B5B1000|0x8B5B2000|0x8B5C5000|0x8B5C6000|0x8B5C7000|0x8B5F9800|~~0x8B609800~~|~~0x8B618800~~|~0x8B618880~|0x8B627880|~0x8B629880~|0x8B62C880~~~| */
+/* |Music data|~~PCM data|~hifi uart|panicstack|icc debug~|flag data~|DDRsechead|~~AP NV ~~|AP&HIFIMB~|codec dma buff|codecdmaconfig|soundtrigger|pcm upload|~hikey share|unsec reserve| */
+/* |~~~~1.2M~~|~~~~1M~~~~|~~508k~~~~|~~~~~4k~~~|~~76k~~~~~|~~~4k~~~~~|~~~4k~~~~~|~~202k~~~~|~~~64k~~~~|~~~~60k~~~~~~~|~~~~128b~~~~~~|~~~~60k~~~~~|~~~8k~~~~~|~~~~~12k~~~~|~~334k-128b~~| */
+/* |0x8B431fff|0x8B531fff|0x8B5B0fff|0x8B5B1fff|0x8B5C4fff|0x8B5C5fff|0x8B5C6fff|0x8B5F97ff|0x8B6097ff|~~0x8B6187FF~~|~~0x8B61887F~~|~0x8B62787F~|0x8B62987F|0x8B62C87F~~|~~0x8B67FFFF~| */
+
+/** Secure9.5M **/
+/* |~~~0x89200000~~~|~~~0x89800000~~~|~~~0x89830000~~|~~~0x89864000~~~| */
+/* |~~HIFI RUNNING~~|~OCRAM img bak~~|~~TCM img bak~~|~~~~IMG bak~~~~~| */
+/* |~~~~~~~6M~~~~~~~|~~~~~~192K~~~~~~|~~~~~208k~~~~~~|~~~~~~3.1M ~~~~~| */
+/* |~~~0x897fffff~~~|~~~0x8982ffff~~~|~~~0x89863fff~~|~~~0x89B80000~~~| */
+
+#define HIFI_UNSEC_REGION_SIZE (0x380000)
+#define HIFI_MUSIC_DATA_SIZE (0x132000)
+#define PCM_PLAY_BUFF_SIZE (0x100000)
+#define DRV_DSP_UART_TO_MEM_SIZE (0x7f000)
+#define DRV_DSP_UART_TO_MEM_RESERVE_SIZE (0x100)
+#define DRV_DSP_STACK_TO_MEM_SIZE (0x1000)
+#define HIFI_ICC_DEBUG_SIZE (0x13000)
+#define HIFI_FLAG_DATA_SIZE (0x1000)
+#define HIFI_SEC_HEAD_SIZE (0x1000)
+#define HIFI_AP_NV_DATA_SIZE (0x32800)
+#define HIFI_AP_MAILBOX_TOTAL_SIZE (0x10000)
+#define CODEC_DSP_OM_DMA_BUFFER_SIZE (0xF000)
+#define CODEC_DSP_OM_DMA_CONFIG_SIZE (0x80)
+#define CODEC_DSP_SOUNDTRIGGER_TOTAL_SIZE (0xF000)
+#define HIFI_PCM_UPLOAD_BUFFER_SIZE (0x2000)
+#define HIFI_HIKEY_SHARE_SIZE (0x1800 * 2)
+#define HIFI_UNSEC_RESERVE_SIZE (0x53780)
+
+#define HIFI_MUSIC_DATA_LOCATION (HIFI_UNSEC_BASE_ADDR)
+#define PCM_PLAY_BUFF_LOCATION (HIFI_MUSIC_DATA_LOCATION + HIFI_MUSIC_DATA_SIZE)
+#define DRV_DSP_UART_TO_MEM (PCM_PLAY_BUFF_LOCATION + PCM_PLAY_BUFF_SIZE)
+#define DRV_DSP_STACK_TO_MEM (DRV_DSP_UART_TO_MEM + DRV_DSP_UART_TO_MEM_SIZE)
+#define HIFI_ICC_DEBUG_LOCATION (DRV_DSP_STACK_TO_MEM + DRV_DSP_STACK_TO_MEM_SIZE)
+#define HIFI_FLAG_DATA_ADDR (HIFI_ICC_DEBUG_LOCATION + HIFI_ICC_DEBUG_SIZE)
+#define HIFI_SEC_HEAD_BACKUP (HIFI_FLAG_DATA_ADDR + HIFI_FLAG_DATA_SIZE)
+#define HIFI_AP_NV_DATA_ADDR (HIFI_SEC_HEAD_BACKUP + HIFI_SEC_HEAD_SIZE)
+#define HIFI_AP_MAILBOX_BASE_ADDR (HIFI_AP_NV_DATA_ADDR + HIFI_AP_NV_DATA_SIZE)
+#define CODEC_DSP_OM_DMA_BUFFER_ADDR (HIFI_AP_MAILBOX_BASE_ADDR + HIFI_AP_MAILBOX_TOTAL_SIZE)
+#define CODEC_DSP_OM_DMA_CONFIG_ADDR (CODEC_DSP_OM_DMA_BUFFER_ADDR + CODEC_DSP_OM_DMA_BUFFER_SIZE)
+#define CODEC_DSP_SOUNDTRIGGER_BASE_ADDR (CODEC_DSP_OM_DMA_CONFIG_ADDR + CODEC_DSP_OM_DMA_CONFIG_SIZE)
+#define HIFI_PCM_UPLOAD_BUFFER_ADDR (CODEC_DSP_SOUNDTRIGGER_BASE_ADDR + CODEC_DSP_SOUNDTRIGGER_TOTAL_SIZE)
+#define HIFI_HIKEY_SHARE_MEM_ADDR (HIFI_PCM_UPLOAD_BUFFER_ADDR + HIFI_AP_MAILBOX_TOTAL_SIZE)
+#define HIFI_UNSEC_RESERVE_ADDR (HIFI_HIKEY_SHARE_MEM_ADDR + HIFI_HIKEY_SHARE_SIZE)
+#if 0
+#define HIFI_OM_LOG_SIZE (0xA000)
+#define HIFI_OM_LOG_ADDR (DRV_DSP_UART_TO_MEM - HIFI_OM_LOG_SIZE)
+#define HIFI_DUMP_BIN_SIZE (HIFI_UNSEC_RESERVE_ADDR - HIFI_OM_LOG_ADDR)
+#define HIFI_DUMP_BIN_ADDR (HIFI_OM_LOG_ADDR)
+#endif
+#define HIFI_DUMP_TCM_ADDR (DRV_DSP_UART_TO_MEM - HIFI_IMAGE_TCMBAK_SIZE)
+#define HIFI_DUMP_BIN_SIZE (HIFI_UNSEC_RESERVE_ADDR - DRV_DSP_UART_TO_MEM + HIFI_IMAGE_TCMBAK_SIZE)
+#define HIFI_DUMP_BIN_ADDR (HIFI_DUMP_TCM_ADDR)
+
+#define DRV_DSP_PANIC_MARK (HIFI_FLAG_DATA_ADDR)
+#define DRV_DSP_UART_LOG_LEVEL (DRV_DSP_PANIC_MARK + 4)
+#define DRV_DSP_UART_TO_MEM_CUR_ADDR (DRV_DSP_UART_LOG_LEVEL + 4)
+#define DRV_DSP_EXCEPTION_NO (DRV_DSP_UART_TO_MEM_CUR_ADDR + 4)
+#define DRV_DSP_IDLE_COUNT_ADDR (DRV_DSP_EXCEPTION_NO + 4)
+#define DRV_DSP_LOADED_INDICATE (DRV_DSP_IDLE_COUNT_ADDR + 4)
+#define DRV_DSP_KILLME_ADDR (DRV_DSP_LOADED_INDICATE + 4)
+#define DRV_DSP_WRITE_MEM_PRINT_ADDR (DRV_DSP_KILLME_ADDR + 4)
+#define DRV_DSP_POWER_STATUS_ADDR (DRV_DSP_WRITE_MEM_PRINT_ADDR + 4)
+#define DRV_DSP_NMI_FLAG_ADDR (DRV_DSP_POWER_STATUS_ADDR + 4)
+#define DRV_DSP_SOCP_FAMA_CONFIG_ADDR (DRV_DSP_NMI_FLAG_ADDR + 4)
+#define DRV_DSP_FLAG_DATA_RESERVED (DRV_DSP_SOCP_FAMA_CONFIG_ADDR + sizeof(struct drv_fama_config))
+
+#define DRV_DSP_POWER_ON (0x55AA55AA)
+#define DRV_DSP_POWER_OFF (0x55FF55FF)
+#define DRV_DSP_KILLME_VALUE (0xA5A55A5A)
+#define DRV_DSP_NMI_COMPLETE (0xB5B5B5B5)
+#define DRV_DSP_NMI_INIT (0xA5A5A5A5)
+#define DRV_DSP_SOCP_FAMA_HEAD_MAGIC (0x5A5A5A5A)
+#define DRV_DSP_SOCP_FAMA_REAR_MAGIC (0xA5A55A5A)
+#define DRV_DSP_FAMA_ON (0x1)
+#define DRV_DSP_FAMA_OFF (0x0)
+
+#define HIFI_SEC_REGION_SIZE (0x980000)
+#define HIFI_IMAGE_OCRAMBAK_SIZE (0x30000)
+#ifdef HIFI_TCM_208K
+#define HIFI_RUN_SIZE (0x600000)
+#define HIFI_IMAGE_TCMBAK_SIZE (0x34000)
+#define HIFI_IMAGE_SIZE (0x31C000)
+#define HIFI_RUN_ITCM_BASE (0xe8080000)
+#define HIFI_RUN_ITCM_SIZE (0x9000)
+#define HIFI_RUN_DTCM_BASE (0xe8058000)
+#define HIFI_RUN_DTCM_SIZE (0x28000)
+#else
+#define HIFI_RUN_SIZE (0x500000)
+#define HIFI_IMAGE_TCMBAK_SIZE (0x1E000)
+#define HIFI_IMAGE_SIZE (0x400000)
+#define HIFI_SEC_RESERVE_SIZE (0x32000)
+#define HIFI_RUN_ITCM_BASE (0xe8070000)
+#define HIFI_RUN_ITCM_SIZE (0x6000)
+#define HIFI_RUN_DTCM_BASE (0xe8058000)
+#define HIFI_RUN_DTCM_SIZE (0x18000)
+#endif
+
+#ifdef HIFI_TCM_208K
+#define HIFI_SEC_REGION_ADDR (HIFI_SEC_BASE_ADDR) /* chciago */
+#define HIFI_RUN_LOCATION (HIFI_SEC_REGION_ADDR)
+#define HIFI_IMAGE_OCRAMBAK_LOCATION (HIFI_RUN_LOCATION + HIFI_RUN_SIZE)
+#define HIFI_IMAGE_TCMBAK_LOCATION (HIFI_IMAGE_OCRAMBAK_LOCATION + HIFI_IMAGE_OCRAMBAK_SIZE)
+#define HIFI_IMAGE_LOCATION (HIFI_IMAGE_TCMBAK_LOCATION + HIFI_IMAGE_TCMBAK_SIZE)
+#else
+#define HIFI_SEC_REGION_ADDR (HIFI_SEC_BASE_ADDR) /* chciago */
+#define HIFI_IMAGE_OCRAMBAK_LOCATION (HIFI_SEC_REGION_ADDR)
+#define HIFI_IMAGE_TCMBAK_LOCATION (HIFI_IMAGE_OCRAMBAK_LOCATION + HIFI_IMAGE_OCRAMBAK_SIZE)
+#define HIFI_IMAGE_LOCATION (HIFI_IMAGE_TCMBAK_LOCATION + HIFI_IMAGE_TCMBAK_SIZE)
+#define HIFI_SEC_RESERVE_ADDR (HIFI_IMAGE_LOCATION + HIFI_IMAGE_SIZE)
+#define HIFI_RUN_LOCATION (HIFI_SEC_RESERVE_ADDR + HIFI_SEC_RESERVE_SIZE)
+#endif
+
+#define HIFI_OCRAM_BASE_ADDR (0xE8000000)
+#define HIFI_TCM_BASE_ADDR (0xE8058000)
+#define HIFI_RUN_DDR_REMAP_BASE (0xC0000000)
+
+#define HIFI_TCM_PHY_BEGIN_ADDR (HIFI_TCM_BASE_ADDR)
+#define HIFI_TCM_PHY_END_ADDR (HIFI_TCM_PHY_BEGIN_ADDR + HIFI_TCM_SIZE - 1)
+#define HIFI_TCM_SIZE (HIFI_RUN_ITCM_SIZE + HIFI_RUN_DTCM_SIZE)
+
+#define HIFI_OCRAM_PHY_BEGIN_ADDR (HIFI_OCRAM_BASE_ADDR)
+#define HIFI_OCRAM_PHY_END_ADDR (HIFI_OCRAM_PHY_BEGIN_ADDR + HIFI_OCRAM_SIZE - 1)
+#define HIFI_OCRAM_SIZE (HIFI_IMAGE_OCRAMBAK_SIZE)
+
+#define SIZE_PARAM_PRIV (206408) /*refer from function dsp_nv_init in dsp_soc_para_ctl.c */
+#define HIFI_SYS_MEM_ADDR (HIFI_RUN_LOCATION)
+#define SYS_TIME_STAMP_REG (0xFFF0A534)
+
+#define SIZE_CMD_ID (8)
+
+#define REV_MSG_NOTICE_ID_MAX 2
+
+#define ACPU_TO_HIFI_ASYNC_CMD 0xFFFFFFFF
+
+#define BUFFER_NUM 8
+#define MAX_NODE_COUNT 10
+
+ typedef enum HIFI_MSG_ID_ {
+
+ /*DTS command id from ap */
+ ID_AP_AUDIO_SET_DTS_ENABLE_CMD = 0xDD36,
+ ID_AP_AUDIO_SET_DTS_DEV_CMD = 0xDD38,
+ ID_AP_AUDIO_SET_DTS_GEQ_CMD = 0xDD39,
+ ID_AP_AUDIO_SET_DTS_GEQ_ENABLE_CMD = 0xDD3B,
+
+ ID_AP_AUDIO_SET_EXCODEC_ENABLE_CMD = 0xDD3D,
+
+ /* Voice Record */
+ ID_AP_HIFI_VOICE_RECORD_START_CMD = 0xDD40,
+ ID_AP_HIFI_VOICE_RECORD_STOP_CMD = 0xDD41,
+
+ /* voicePP MSG_ID */
+ ID_AP_VOICEPP_START_REQ = 0xDD42,
+ ID_VOICEPP_MSG_START = ID_AP_VOICEPP_START_REQ,
+ ID_VOICEPP_AP_START_CNF = 0xDD43,
+ ID_AP_VOICEPP_STOP_REQ = 0xDD44,
+ ID_VOICEPP_AP_STOP_CNF = 0xDD45,
+ ID_VOICEPP_MSG_END = 0xDD4A,
+
+ ID_AP_AUDIO_PLAY_START_REQ = 0xDD51,
+ ID_AUDIO_AP_PLAY_START_CNF = 0xDD52,
+ ID_AP_AUDIO_PLAY_PAUSE_REQ = 0xDD53,
+ ID_AUDIO_AP_PLAY_PAUSE_CNF = 0xDD54,
+ ID_AUDIO_AP_PLAY_DONE_IND = 0xDD56,
+ ID_AP_AUDIO_PLAY_UPDATE_BUF_CMD = 0xDD57,
+ ID_AP_AUDIO_PLAY_QUERY_TIME_REQ = 0xDD59,
+ ID_AP_AUDIO_PLAY_WAKEUPTHREAD_REQ = 0xDD5A,
+ ID_AUDIO_AP_PLAY_QUERY_TIME_CNF = 0xDD60,
+ ID_AP_AUDIO_PLAY_QUERY_STATUS_REQ = 0xDD61,
+ ID_AUDIO_AP_PLAY_QUERY_STATUS_CNF = 0xDD62,
+ ID_AP_AUDIO_PLAY_SEEK_REQ = 0xDD63,
+ ID_AUDIO_AP_PLAY_SEEK_CNF = 0xDD64,
+ ID_AP_AUDIO_PLAY_SET_VOL_CMD = 0xDD70,
+ ID_AP_AUDIO_RECORD_PCM_HOOK_CMD = 0xDD7A,
+ ID_AUDIO_AP_UPDATE_PCM_BUFF_CMD = 0xDD7C,
+ ID_AP_AUDIO_DYN_EFFECT_GET_PARAM = 0xDD7D,
+ ID_AP_AUDIO_DYN_EFFECT_GET_PARAM_CNF = 0xDD7E,
+ ID_AP_AUDIO_DYN_EFFECT_TRIGGER = 0xDD7F,
+ /* enhance msgid between ap and hifi */
+ ID_AP_HIFI_ENHANCE_START_REQ = 0xDD81,
+ ID_HIFI_AP_ENHANCE_START_CNF = 0xDD82,
+ ID_AP_HIFI_ENHANCE_STOP_REQ = 0xDD83,
+ ID_HIFI_AP_ENHANCE_STOP_CNF = 0xDD84,
+ ID_AP_HIFI_ENHANCE_SET_DEVICE_REQ = 0xDD85,
+ ID_HIFI_AP_ENHANCE_SET_DEVICE_CNF = 0xDD86,
+
+ /* audio enhance msgid between ap and hifi */
+ ID_AP_AUDIO_ENHANCE_SET_DEVICE_IND = 0xDD91,
+ ID_AP_AUDIO_MLIB_SET_PARA_IND = 0xDD92,
+ ID_AP_AUDIO_CMD_SET_SOURCE_CMD = 0xDD95,
+ ID_AP_AUDIO_CMD_SET_DEVICE_CMD = 0xDD96,
+ ID_AP_AUDIO_CMD_SET_MODE_CMD = 0xDD97,
+ ID_AP_AUDIO_CMD_SET_ANGLE_CMD = 0xDD99,
+
+ /* for 3mic */
+ ID_AP_AUDIO_ROUTING_COMPLETE_REQ = 0xDDC0,
+ ID_AUDIO_AP_DP_CLK_EN_IND = 0xDDC1,
+ ID_AP_AUDIO_DP_CLK_STATE_IND = 0xDDC2,
+ ID_AUDIO_AP_OM_DUMP_CMD = 0xDDC3,
+ ID_AUDIO_AP_FADE_OUT_REQ = 0xDDC4,
+ ID_AP_AUDIO_FADE_OUT_IND = 0xDDC5,
+
+ ID_AUDIO_AP_OM_CMD = 0xDDC9,
+ ID_AP_AUDIO_STR_CMD = 0xDDCB,
+ ID_AUDIO_AP_VOICE_BSD_PARAM_CMD = 0xDDCC,
+
+ ID_AP_ENABLE_MODEM_LOOP_REQ = 0xDDCD, /* the audio hal notify HIFI to start/stop MODEM LOOP */
+ ID_AP_HIFI_REQUEST_VOICE_PARA_REQ = 0xDF00, /*AP REQUEST VOICE MSG */
+ ID_HIFI_AP_REQUEST_VOICE_PARA_CNF = 0xDF01, /*HIFI REPLAY VOICE MSG */
+
+ /* XAF message IDs */
+ ID_XAF_AP_TO_DSP = 0xDF10,
+ ID_XAF_DSP_TO_AP = 0xDF11,
+ } HIFI_MSG_ID;
+
+ typedef enum HI6402_DP_CLK_STATE_ {
+ HI6402_DP_CLK_OFF = 0x0,
+ HI6402_DP_CLK_ON = 0x1,
+ } HI6402_DP_CLK_STATE;
+
+ typedef struct {
+ unsigned char *mail_buff;
+ unsigned int mail_buff_len;
+ unsigned int cmd_id;
+ unsigned char *out_buff_ptr;
+ unsigned int out_buff_len;
+ } rev_msg_buff;
+
+ struct recv_request {
+ struct list_head recv_node;
+ rev_msg_buff rev_msg;
+ };
+
+ struct misc_recmsg_param {
+ unsigned short msgID;
+ unsigned short playStatus;
+ };
+
+ struct common_hifi_cmd {
+ unsigned short msg_id;
+ unsigned short reserve;
+ unsigned int value;
+ };
+
+ struct dp_clk_request {
+ struct list_head dp_clk_node;
+ struct common_hifi_cmd dp_clk_msg;
+ };
+
+ typedef struct {
+ unsigned short down_cpu_utilization;
+ unsigned short up_cpu_utilization;
+ unsigned short ddr_freq;
+ unsigned short is_vote_ddr;
+ } audio_vote_ddr_freq_stru;
+
+ typedef struct {
+ unsigned short enable_vote_ddr;
+ unsigned short ddr_freq_count;
+ unsigned short check_interval;
+ unsigned short report_interval;
+ audio_vote_ddr_freq_stru *pst_vote_ddr_freq;
+ } audio_cpu_load_cfg_stru;
+
+ struct drv_fama_config {
+ unsigned int head_magic;
+ unsigned int flag;
+ unsigned int rear_magic;
+ };
+
+ int hifi_send_msg(unsigned int mailcode, void *data,
+ unsigned int length);
+ void hifi_get_log_signal(void);
+ void hifi_release_log_signal(void);
+ void sochifi_watchdog_send_event(void);
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif
+#endif /* end of hifi_lpp.h */
diff --git a/drivers/hisi/hifi_dsp/hifi_om.c b/drivers/hisi/hifi_dsp/hifi_om.c
new file mode 100644
index 000000000000..22263a5822ec
--- /dev/null
+++ b/drivers/hisi/hifi_dsp/hifi_om.c
@@ -0,0 +1,1741 @@
+/*
+ * hifi om.
+ *
+ * Copyright (c) 2013 Hisilicon Technologies CO., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/*
+ *
+ * Modifications made by Cadence Design Systems, Inc. 06/21/2017
+ * Copyright (C) 2017 Cadence Design Systems, Inc.All rights reserved worldwide.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/syscalls.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <asm/memory.h>
+#include <asm/types.h>
+#include <asm/io.h>
+
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/rtc.h>
+
+#include "audio_hifi.h"
+#include "hifi_lpp.h"
+#include "hifi_om.h"
+#include "drv_mailbox_msg.h"
+#include <linux/hisi/hisi_rproc.h>
+#include <dsm/dsm_pub.h>
+#include <linux/hisi/rdr_pub.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include "bsp_drv_ipc.h"
+
+#define HI_DECLARE_SEMAPHORE(name) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 0)
+HI_DECLARE_SEMAPHORE(hifi_log_sema);
+struct hifi_om_s g_om_data;
+static struct proc_dir_entry *hifi_debug_dir;
+bool hasData;
+
+#define MAX_LEVEL_STR_LEN 32
+#define UNCONFIRM_ADDR (0)
+static struct hifi_dsp_dump_info s_dsp_dump_info[] = {
+ {DSP_NORMAL, DUMP_DSP_LOG, FILE_NAME_DUMP_DSP_LOG, UNCONFIRM_ADDR,
+ (DRV_DSP_UART_TO_MEM_SIZE - DRV_DSP_UART_TO_MEM_RESERVE_SIZE)},
+ {DSP_NORMAL, DUMP_DSP_BIN, FILE_NAME_DUMP_DSP_BIN, UNCONFIRM_ADDR,
+ HIFI_DUMP_BIN_SIZE},
+ {DSP_PANIC, DUMP_DSP_LOG, FILE_NAME_DUMP_DSP_PANIC_LOG, UNCONFIRM_ADDR,
+ (DRV_DSP_UART_TO_MEM_SIZE - DRV_DSP_UART_TO_MEM_RESERVE_SIZE)},
+ {DSP_PANIC, DUMP_DSP_BIN, FILE_NAME_DUMP_DSP_PANIC_BIN, UNCONFIRM_ADDR,
+ HIFI_DUMP_BIN_SIZE},
+ {DSP_PANIC, DUMP_DSP_BIN, FILE_NAME_DUMP_DSP_OCRAM_BIN, UNCONFIRM_ADDR,
+ HIFI_IMAGE_OCRAMBAK_SIZE},
+ {DSP_PANIC, DUMP_DSP_BIN, FILE_NAME_DUMP_DSP_TCM_BIN, UNCONFIRM_ADDR,
+ HIFI_IMAGE_TCMBAK_SIZE},
+};
+
+static struct hifi_effect_info_stru effect_algo[] = {
+ {ID_EFFECT_ALGO_FORMATER, "FORMATER"},
+
+ {ID_EFFECT_ALGO_FORTE_VOICE_SPKOUT, "FORTE_VOICE_SPKOUT"},
+ {ID_EFFECT_ALGO_FORTE_VOICE_MICIN, "FORTE_VOICE_MICIN"},
+ {ID_EFFECT_ALGO_FORTE_VOICE_SPKOUT_BWE, "FORTE_VOICE_SPKOUT_BWE"},
+
+ {ID_EFFECT_ALGO_FORTE_VOIP_MICIN, "FORTE_VOIP_MICIN"},
+ {ID_EFFECT_ALGO_FORTE_VOIP_SPKOUT, "FORTE_VOIP_SPKOUT"},
+
+ {ID_EFFECT_ALGO_IN_CONVERT_I2S_GENERAL, "IN_CONVERT_I2S_GENERAL"},
+ {ID_EFFECT_ALGO_IN_CONVERT_I2S_HI363X, "IN_CONVERT_I2S_HI363X"},
+
+ {ID_EFFECT_ALGO_INTERLACE, "INTERLACE"},
+
+ {ID_EFFECT_ALGO_OUT_CONVERT_I2S_GENERAL, "OUT_CONVERT_I2S_GENERAL"},
+ {ID_EFFECT_ALGO_OUT_CONVERT_I2S_HI363X, "OUT_CONVERT_I2S_HI363X"},
+
+ {ID_EFFECT_ALGO_SWAP, "SWAP"},
+
+ {ID_EFFECT_ALGO_IMEDIA_WNR_MICIN, "IMEDIA_WNR_MICIN"},
+ {ID_EFFECT_ALGO_IMEDIA_WNR_SPKOUT, "IMEDIA_WNR_SPKOUT"},
+
+ {ID_EFFECT_ALGO_SWS_INTERFACE, "SWS_INTERFACE"},
+ {ID_EFFECT_ALGO_DTS, "DTS"},
+ {ID_EFFECT_ALGO_DRE, "DRE"},
+ {ID_EFFECT_ALGO_CHC, "CHC"},
+ {ID_EFFECT_ALGO_SRC, "SRC"},
+ {ID_EFFECT_ALGO_TTY, "TTY"},
+
+ {ID_EFFECT_ALGO_KARAOKE_RECORD, "KARAOKE_RECORD"},
+ {ID_EFFECT_ALGO_KARAOKE_PLAY, "KARAOKE_PLAY"},
+
+ {ID_EFFECT_ALGO_MLIB_CS_VOICE_CALL_MICIN, "MLIB_CS_VOICE_CALL_MICIN"},
+ {ID_EFFECT_ALGO_MLIB_CS_VOICE_CALL_SPKOUT, "MLIB_CS_VOICE_CALL_SPKOUT"},
+ {ID_EFFECT_ALGO_MLIB_VOIP_CALL_MICIN, "MLIB_VOIP_CALL_MICIN"},
+ {ID_EFFECT_ALGO_MLIB_VOIP_CALL_SPKOUT, "MLIB_VOIP_CALL_MICIN"},
+ {ID_EFFECT_ALGO_MLIB_AUDIO_PLAY, "MLIB_AUDIO_PLAY"},
+ {ID_EFFECT_ALGO_MLIB_AUDIO_RECORD, "MLIB_AUDIO_RECORD"},
+ {ID_EFFECT_ALGO_MLIB_SIRI_MICIN, "MLIB_SIRI_MICIN"},
+ {ID_EFFECT_ALGO_MLIB_SIRI_SPKOUT, "MLIB_SIRI_SPKOUT"},
+
+ {ID_EFFECT_ALGO_EQ, "EQ"},
+ {ID_EFFECT_ALGO_MBDRC6402, "MBDRC6402"},
+
+ {ID_EFFECT_ALGO_IMEDIA_VOIP_MICIN, "IMEDIA_VOIP_MICIN"},
+ {ID_EFFECT_ALGO_IMEDIA_VOIP_SPKOUT, "IMEDIA_VOIP_SPKOUT"},
+ {ID_EFFECT_ALGO_IMEDIA_VOICE_CALL_MICIN, "IMEDIA_VOICE_CALL_MICIN"},
+ {ID_EFFECT_ALGO_IMEDIA_VOICE_CALL_SPKOUT, "IMEDIA_VOICE_CALL_SPKOUT"},
+ {ID_EFFECT_ALGO_IMEDIA_VOICE_CALL_SPKOUT_BWE,
+ "IMEDIA_VOICE_CALL_SPKOUT_BWE"},
+};
+
+static void hifi_om_voice_bsd_work_handler(struct work_struct *work);
+static void hifi_om_show_audio_detect_info(struct work_struct *work);
+
+static struct hifi_om_work_info work_info[] = {
+ {HIFI_OM_WORK_VOICE_BSD, "hifi_om_work_voice_bsd",
+ hifi_om_voice_bsd_work_handler, {0} },
+ {HIFI_OM_WORK_AUDIO_OM_DETECTION, "hifi_om_work_audio_om_detect",
+ hifi_om_show_audio_detect_info, {0} },
+};
+
+extern struct dsm_client *dsm_audio_client;
+
+static void hifi_get_time_stamp(char *timestamp_buf, unsigned int len)
+{
+ struct timeval tv = { 0 };
+ struct rtc_time tm = { 0 };
+
+ BUG_ON(NULL == timestamp_buf);
+
+ memset(&tv, 0, sizeof(struct timeval));
+ memset(&tm, 0, sizeof(struct rtc_time));
+
+ do_gettimeofday(&tv);
+ tv.tv_sec -= sys_tz.tz_minuteswest * 60;
+ rtc_time_to_tm(tv.tv_sec, &tm);
+
+ snprintf(timestamp_buf, len, "%04d%02d%02d%02d%02d%02d",
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+ return;
+}
+
+static int hifi_chown(char *path, uid_t user, gid_t group)
+{
+ int ret = 0;
+ mm_segment_t old_fs;
+
+ if (NULL == path)
+ return -1;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ ret = (int)sys_chown((const char __user *)path, user, group);
+ if (ret) {
+ loge("chown %s uid [%d] gid [%d] failed error [%d]!\n", path,
+ user, group, ret);
+ }
+
+ set_fs(old_fs);
+
+ return ret;
+}
+
+static int hifi_create_dir(char *path)
+{
+ int fd = -1;
+ mm_segment_t old_fs = 0;
+
+ if (!path) {
+ loge("path(%pK) is invailed\n", path);
+ return -1;
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ fd = sys_access(path, 0);
+ if (0 != fd) {
+ logi("need create dir %s.\n", path);
+ fd = sys_mkdir(path, HIFI_OM_DIR_LIMIT);
+ if (fd < 0) {
+ set_fs(old_fs);
+ loge("create dir %s fail, ret: %d.\n", path, fd);
+ return fd;
+ }
+ logi("create dir %s successed, fd: %d.\n", path, fd);
+
+ /* hifi_log dir limit root-system */
+ if (hifi_chown(path, ROOT_UID, SYSTEM_GID)) {
+ loge("chown %s failed!\n", path);
+ }
+ }
+
+ set_fs(old_fs);
+
+ return 0;
+}
+
+static int hifi_om_create_log_dir(char *path)
+{
+ char cur_path[HIFI_DUMP_FILE_NAME_MAX_LEN];
+ int index = 0;
+
+ if (!path || (strlen(path) + 1) > HIFI_DUMP_FILE_NAME_MAX_LEN) {
+ loge("path(%pK) is invailed\n", path);
+ return -1;
+ }
+
+ if (0 == sys_access(path, 0))
+ return 0;
+
+ memset(cur_path, 0, HIFI_DUMP_FILE_NAME_MAX_LEN);
+
+ if (*path != '/')
+ return -1;
+
+ cur_path[index++] = *path++;
+
+ while (*path != '\0') {
+ if (*path == '/') {
+ if (hifi_create_dir(cur_path)) {
+ loge("create dir %s failed\n", cur_path);
+ return -1;
+ }
+ }
+ cur_path[index++] = *path++;
+ }
+
+ return 0;
+
+}
+
+int hifi_om_get_voice_bsd_param(void __user *uaddr)
+{
+ int ret = OK;
+ int work_id = HIFI_OM_WORK_VOICE_BSD;
+ struct hifi_om_work *om_work = NULL;
+ unsigned char data[MAIL_LEN_MAX] = { '\0' };
+ unsigned int data_len = 0;
+ struct voice_bsd_param_hsm param;
+
+ memset(&param, 0, sizeof(param));
+ if (copy_from_user(&param, uaddr, sizeof(param))) {
+ loge("copy_from_user failed\n");
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (!param.pdata) {
+ loge("user buffer is null\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ spin_lock_bh(&work_info[work_id].ctl.lock);
+ if (!list_empty(&work_info[work_id].ctl.list)) {
+ om_work =
+ list_entry(work_info[work_id].ctl.list.next,
+ struct hifi_om_work, om_node);
+
+ data_len = om_work->data_len;
+ memcpy(data, om_work->data, om_work->data_len);
+
+ list_del(&om_work->om_node);
+ kzfree(om_work);
+ }
+ spin_unlock_bh(&work_info[work_id].ctl.lock);
+
+ if (param.data_len < data_len) {
+ loge("userspace len(%u) is less than data_len(%u)\n",
+ param.data_len, data_len);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (copy_to_user((void __user *)param.pdata, data, data_len)) {
+ loge("copy_to_user failed\n");
+ ret = -EFAULT;
+ goto exit;
+ }
+ logd("size(%u)copy to user success\n", data_len);
+
+ exit:
+
+ return ret;
+}
+
+static void hifi_om_voice_bsd_work_handler(struct work_struct *work)
+{
+ int retval = 0;
+ char *envp[2] = { "hifi_voice_bsd_param", NULL };
+
+ retval = kobject_uevent_env(&g_om_data.dev->kobj, KOBJ_CHANGE, envp);
+ if (retval) {
+ loge("send uevent failed, retval: %d\n", retval);
+ return;
+ }
+ logi("report uevent success\n");
+
+ return;
+}
+
+void hifi_om_rev_data_handle(int work_id, const unsigned char *addr,
+ unsigned int len)
+{
+ struct hifi_om_work *work = NULL;
+
+ if (!addr || 0 == len || len > MAIL_LEN_MAX) {
+ loge("addr is null or len is invaled, len: %u", len);
+ return;
+ }
+
+ work = kzalloc(sizeof(*work) + len, GFP_ATOMIC);
+ if (!work) {
+ loge("malloc size %zu failed\n", sizeof(*work) + len);
+ return;
+ }
+ memcpy(work->data, addr, len);
+ work->data_len = len;
+
+ spin_lock_bh(&work_info[work_id].ctl.lock);
+ list_add_tail(&work->om_node, &work_info[work_id].ctl.list);
+ spin_unlock_bh(&work_info[work_id].ctl.lock);
+
+ if (!queue_work
+ (work_info[work_id].ctl.wq, &work_info[work_id].ctl.work)) {
+ loge("work_id: %d, This work was already on the queue\n",
+ work_id);
+ }
+
+ return;
+}
+
+static void hifi_om_show_audio_detect_info(struct work_struct *work)
+{
+ int work_id = HIFI_OM_WORK_AUDIO_OM_DETECTION;
+ struct hifi_om_work *om_work = NULL;
+ unsigned char data[MAIL_LEN_MAX] = { '\0' };
+ unsigned int data_len = 0;
+ unsigned int hifi_msg_type = 0;
+ struct hifi_om_load_info_stru hifi_om_info;
+ struct hifi_om_effect_mcps_stru mcps_info;
+ struct hifi_om_update_buff_delay_info update_buff_delay_info;
+
+ spin_lock_bh(&work_info[work_id].ctl.lock);
+ if (!list_empty(&work_info[work_id].ctl.list)) {
+ om_work =
+ list_entry(work_info[work_id].ctl.list.next,
+ struct hifi_om_work, om_node);
+
+ data_len = om_work->data_len;
+ memcpy(data, om_work->data, om_work->data_len);
+
+ list_del(&om_work->om_node);
+ kzfree(om_work);
+ }
+ spin_unlock_bh(&work_info[work_id].ctl.lock);
+
+ memset(&hifi_om_info, 0, sizeof(hifi_om_info));
+ memset(&mcps_info, 0, sizeof(mcps_info));
+ memset(&update_buff_delay_info, 0, sizeof(update_buff_delay_info));
+
+ hifi_msg_type = *(unsigned int *)data;
+
+ switch (hifi_msg_type) {
+ case HIFI_CPU_OM_LOAD_INFO:
+ if ((sizeof(hifi_om_info)) != data_len) {
+ logw("unavailable data from hifi, data_len: %u\n",
+ data_len);
+ return;
+ }
+ memcpy(&hifi_om_info, data, sizeof(hifi_om_info));
+
+ hifi_om_cpu_load_info_show(&hifi_om_info);
+ break;
+ case HIFI_CPU_OM_ALGO_MCPS_INFO:
+ if ((sizeof(mcps_info)) != data_len) {
+ logw("unavailable data from hifi, data_len: %u\n",
+ data_len);
+ return;
+ }
+ memcpy(&mcps_info, data, sizeof(mcps_info));
+
+ hifi_om_effect_mcps_info_show(&mcps_info);
+ break;
+ case HIFI_CPU_OM_UPDATE_BUFF_DELAY_INFO:
+ if ((sizeof(update_buff_delay_info)) != data_len) {
+ logw("unavailable data from hifi, data_len: %u\n",
+ data_len);
+ return;
+ }
+ memcpy(&update_buff_delay_info, data,
+ sizeof(update_buff_delay_info));
+
+ hifi_om_update_buff_delay_info_show(&update_buff_delay_info);
+ break;
+ default:
+ logi("type(%d), not support\n", hifi_msg_type);
+ break;
+ }
+
+ return;
+}
+
+int hifi_get_dmesg(void __user *arg)
+{
+ int ret = OK;
+ unsigned int len = 0;
+ struct misc_io_dump_buf_param dump_info;
+ void __user *dump_info_user_buf = NULL;
+
+ len = (unsigned int)(*g_om_data.dsp_log_cur_addr);
+ if (len > DRV_DSP_UART_TO_MEM_SIZE) {
+ loge("len is larger: %d(%d), don't dump log\n", len,
+ DRV_DSP_UART_TO_MEM_SIZE);
+ return 0;
+ }
+
+ if (copy_from_user
+ (&dump_info, arg, sizeof(struct misc_io_dump_buf_param))) {
+ loge("copy_from_user fail, don't dump log\n");
+ return 0;
+ }
+
+ if (dump_info.buf_size == 0) {
+ loge("input buf size is zero, don't dump log\n");
+ return 0;
+ }
+
+ if (len > dump_info.buf_size) {
+ logw("input buf size smaller, input buf size: %d, log size: %d, contiue to dump using smaller size\n", dump_info.buf_size, len);
+ len = dump_info.buf_size;
+ }
+
+ dump_info_user_buf =
+ INT_TO_ADDR(dump_info.user_buf_l, dump_info.user_buf_h);
+ if (!dump_info_user_buf) {
+ loge("input dump buff addr is null\n");
+ return 0;
+ }
+ logi("get msg: len:%d from:%pK to:%pK.\n", len,
+ s_dsp_dump_info[0].data_addr, dump_info_user_buf);
+
+ s_dsp_dump_info[0].data_addr = g_om_data.dsp_log_addr;
+
+ ret =
+ copy_to_user(dump_info_user_buf, s_dsp_dump_info[0].data_addr, len);
+ if (OK != ret) {
+ loge("copy_to_user fail: %d\n", ret);
+ len -= ret;
+ }
+
+ if (dump_info.clear) {
+ *g_om_data.dsp_log_cur_addr = DRV_DSP_UART_TO_MEM_RESERVE_SIZE;
+ if (s_dsp_dump_info[0].data_len > DRV_DSP_UART_TO_MEM_SIZE) {
+ loge("s_dsp_dump_info[0].data_len is larger than DRV_DSP_UART_TO_MEM_SIZE\n");
+ len = 0;
+ } else {
+ memset(s_dsp_dump_info[0].data_addr, 0,
+ s_dsp_dump_info[0].data_len);
+ }
+ }
+
+ return (int)len;
+}
+
+static void hifi_dump_dsp(DUMP_DSP_INDEX index)
+{
+ int ret = 0;
+
+ mm_segment_t fs = 0;
+ struct file *fp = NULL;
+ int file_flag = O_RDWR;
+ struct kstat file_stat;
+ int write_size = 0;
+ unsigned int err_no = 0xFFFFFFFF;
+
+ char tmp_buf[64] = { 0 };
+ unsigned long tmp_len = 0;
+ struct rtc_time cur_tm;
+ struct timespec now;
+
+ char path_name[HIFI_DUMP_FILE_NAME_MAX_LEN] = { 0 };
+ char *file_name = s_dsp_dump_info[index].file_name;
+ char *data_addr = NULL;
+ unsigned int data_len = s_dsp_dump_info[index].data_len;
+
+ char *is_panic = "i'm panic.\n";
+ char *is_exception = "i'm exception.\n";
+ char *not_panic = "i'm ok.\n";
+
+ memset(path_name, 0, HIFI_DUMP_FILE_NAME_MAX_LEN);
+
+ if (down_interruptible(&g_om_data.dsp_dump_sema) < 0) {
+ loge("acquire the semaphore error.\n");
+ return;
+ }
+
+ IN_FUNCTION;
+
+ hifi_get_log_signal();
+
+ s_dsp_dump_info[NORMAL_LOG].data_addr =
+ g_om_data.dsp_log_addr + DRV_DSP_UART_TO_MEM_RESERVE_SIZE;
+ s_dsp_dump_info[PANIC_LOG].data_addr =
+ g_om_data.dsp_log_addr + DRV_DSP_UART_TO_MEM_RESERVE_SIZE;
+
+ if (index == OCRAM_BIN) {
+ s_dsp_dump_info[index].data_addr =
+ (unsigned char *)ioremap_wc(HIFI_OCRAM_BASE_ADDR,
+ HIFI_IMAGE_OCRAMBAK_SIZE);
+ }
+ if (index == TCM_BIN) {
+ s_dsp_dump_info[index].data_addr =
+ (unsigned char *)ioremap_wc(HIFI_TCM_BASE_ADDR,
+ HIFI_IMAGE_TCMBAK_SIZE);
+ }
+
+ if (NULL == s_dsp_dump_info[index].data_addr) {
+ loge("dsp log ioremap fail.\n");
+ goto END;
+ }
+
+ data_addr = s_dsp_dump_info[index].data_addr;
+
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ ret = hifi_om_create_log_dir(LOG_PATH_HIFI_LOG);
+ if (0 != ret) {
+ goto END;
+ }
+
+ snprintf(path_name, HIFI_DUMP_FILE_NAME_MAX_LEN, "%s%s",
+ LOG_PATH_HIFI_LOG, file_name);
+
+ ret = vfs_stat(path_name, &file_stat);
+ if (ret < 0) {
+ logi("there isn't a dsp log file:%s, and need to create.\n",
+ path_name);
+ file_flag |= O_CREAT;
+ }
+
+ fp = filp_open(path_name, file_flag, HIFI_OM_FILE_LIMIT);
+ if (IS_ERR(fp)) {
+ loge("open file fail: %s.\n", path_name);
+ fp = NULL;
+ goto END;
+ }
+
+ /*write from file start */
+ vfs_llseek(fp, 0, SEEK_SET);
+
+ /*write file head */
+ if (DUMP_DSP_LOG == s_dsp_dump_info[index].dump_type) {
+ /*write dump log time */
+ now = current_kernel_time();
+ rtc_time_to_tm(now.tv_sec, &cur_tm);
+
+ memset(tmp_buf, 0, sizeof(tmp_buf));
+ tmp_len =
+ snprintf(tmp_buf, sizeof(tmp_buf),
+ "%04d-%02d-%02d %02d:%02d:%02d.\n",
+ cur_tm.tm_year + 1900, cur_tm.tm_mon + 1,
+ cur_tm.tm_mday, cur_tm.tm_hour, cur_tm.tm_min,
+ cur_tm.tm_sec);
+ vfs_write(fp, tmp_buf, tmp_len, &fp->f_pos);
+
+ /*write exception no */
+ memset(tmp_buf, 0, sizeof(tmp_buf));
+ err_no = (unsigned int)(*(g_om_data.dsp_exception_no));
+ if (err_no != 0xFFFFFFFF) {
+ tmp_len =
+ snprintf(tmp_buf, sizeof(tmp_buf),
+ "the exception no: %u.\n", err_no);
+ } else {
+ tmp_len =
+ snprintf(tmp_buf, sizeof(tmp_buf), "%s",
+ "hifi is fine, just dump log.\n");
+ }
+
+ vfs_write(fp, tmp_buf, tmp_len, &fp->f_pos);
+
+ /*write error type */
+ if (0xdeadbeaf == *g_om_data.dsp_panic_mark) {
+ vfs_write(fp, is_panic, strlen(is_panic), &fp->f_pos);
+ } else if (0xbeafdead == *g_om_data.dsp_panic_mark) {
+ vfs_write(fp, is_exception, strlen(is_exception),
+ &fp->f_pos);
+ } else {
+ vfs_write(fp, not_panic, strlen(not_panic), &fp->f_pos);
+ }
+ }
+
+ /*write dsp info */
+ if ((write_size = vfs_write(fp, data_addr, data_len, &fp->f_pos)) < 0) {
+ loge("write file fail.\n");
+ }
+
+ /* hifi.log file limit root-system */
+ if (hifi_chown(path_name, ROOT_UID, SYSTEM_GID)) {
+ loge("chown %s failed!\n", path_name);
+ }
+
+ logi("write file size: %d.\n", write_size);
+
+ END:
+ if (fp) {
+ filp_close(fp, 0);
+ }
+ set_fs(fs);
+
+ if ((index == OCRAM_BIN || index == TCM_BIN)
+ && (NULL != s_dsp_dump_info[index].data_addr)) {
+ iounmap(s_dsp_dump_info[index].data_addr);
+ s_dsp_dump_info[index].data_addr = NULL;
+ }
+
+ hifi_release_log_signal();
+
+ up(&g_om_data.dsp_dump_sema);
+ OUT_FUNCTION;
+
+ return;
+}
+
+static debug_level_com s_debug_level_com[4] = { {'d', 3}, {'i', 2}, {'w', 1}, {'e', 0} };
+
+static unsigned int hifi_get_debug_level_num(char level_char)
+{
+ int i = 0;
+ int len = sizeof(s_debug_level_com) / sizeof(s_debug_level_com[0]);
+
+ for (i = 0; i < len; i++) {
+ if (level_char == s_debug_level_com[i].level_char) {
+ return s_debug_level_com[i].level_num;
+ }
+ }
+
+ return 2; /*info */
+}
+
+static char hifi_get_debug_level_char(char level_num)
+{
+ int i = 0;
+ int len = sizeof(s_debug_level_com) / sizeof(s_debug_level_com[0]);
+
+ for (i = 0; i < len; i++) {
+ if (level_num == s_debug_level_com[i].level_num) {
+ return s_debug_level_com[i].level_char;
+ }
+ }
+
+ return 'i'; /*info */
+}
+
+static void hifi_set_dsp_debug_level(unsigned int level)
+{
+ *(unsigned int *)g_om_data.dsp_debug_level_addr = level;
+}
+
+static ssize_t hifi_debug_level_show(struct file *file, char __user *buf,
+ size_t size, loff_t *data)
+{
+ char level_str[MAX_LEVEL_STR_LEN] = { 0 };
+
+ if (NULL == buf) {
+ loge("Input param buf is invalid\n");
+ return -EINVAL;
+ }
+
+ snprintf(level_str, MAX_LEVEL_STR_LEN, "debug level: %c.\n",
+ hifi_get_debug_level_char(g_om_data.debug_level));
+
+ return simple_read_from_buffer(buf, size, data, level_str,
+ strlen(level_str));
+}
+
+static ssize_t hifi_debug_level_store(struct file *file,
+ const char __user *buf, size_t size,
+ loff_t *data)
+{
+ ssize_t ret;
+ char level_str[MAX_LEVEL_STR_LEN] = { 0 };
+ loff_t pos = 0;
+
+ if (NULL == buf) {
+ loge("Input param buf is invalid\n");
+ return -EINVAL;
+ }
+ ret =
+ simple_write_to_buffer(level_str, MAX_LEVEL_STR_LEN - 1, &pos, buf,
+ size);
+ if (ret != size) {
+ loge("Input param buf read error, return value: %zd\n", ret);
+ return -EINVAL;
+ }
+
+ if (!strchr("diwe", level_str[0])) {
+ loge("Input param buf is error(valid: d,i,w,e): %s.\n",
+ level_str);
+ return -EINVAL;
+ }
+ if (level_str[1] != '\n') {
+ loge("Input param buf is error, last char is not \\n .\n");
+ return -EINVAL;
+ }
+
+ g_om_data.debug_level = hifi_get_debug_level_num(level_str[0]);
+ return size;
+}
+
+static const struct file_operations hifi_debug_proc_ops = {
+ .owner = THIS_MODULE,
+ .read = hifi_debug_level_show,
+ .write = hifi_debug_level_store,
+};
+
+#define HIKEY_MSG_HEAD_PROTECT_WORD 0xffff1234
+#define HIKEY_MSG_BODY_PROTECT_WORD 0xffff4321
+
+#define HIKEY_AP2DSP_MSG_QUEUE_ADDR HIFI_HIKEY_SHARE_ADDR
+#define HIKEY_AP2DSP_MSG_QUEUE_SIZE 0x1800
+#define HIKEY_DSP2AP_MSG_QUEUE_ADDR (HIKEY_AP2DSP_MSG_QUEUE_ADDR + HIKEY_AP2DSP_MSG_QUEUE_SIZE)
+#define HIKEY_DSP2AP_MSG_QUEUE_SIZE 0x1800
+
+
+
+static struct hikey_ap2dsp_msg_head *msg_head;
+
+static void hikey_ap_mailbox_read_queue(
+ struct hikey_ap2dsp_msg_head *queue_head,
+ char *data, unsigned int size)
+{
+ unsigned int size_to_bottom = 0;
+ struct hikey_ap2dsp_msg_head *hikey_msg_head = 0;
+
+ hikey_msg_head = (struct hikey_ap2dsp_msg_head *)((char *)(msg_head)+HIKEY_DSP2AP_MSG_QUEUE_SIZE);
+ size_to_bottom = (HIKEY_AP2DSP_MSG_QUEUE_SIZE - queue_head->read_pos);
+ if (size_to_bottom > size) {
+ memcpy(data, (char *)((char *)hikey_msg_head + queue_head->read_pos), size);
+ queue_head->read_pos += size;
+ } else {
+ memcpy(data, (char *)((char *)hikey_msg_head + queue_head->read_pos), size_to_bottom);
+ memcpy(data + size_to_bottom,
+ (char *)((char *)hikey_msg_head + sizeof(struct hikey_ap2dsp_msg_head)),
+ size - size_to_bottom);
+ queue_head->read_pos = sizeof(struct hikey_ap2dsp_msg_head) + (size - size_to_bottom);
+ }
+}
+int hikey_ap_mailbox_read(struct hikey_msg_with_content *hikey_msg)
+{
+ struct hikey_ap2dsp_msg_head *hikey_msg_head = 0;
+
+ if (!hikey_msg) {
+ loge("have no memory to save hikey msg\n");
+ return -1;
+ }
+
+ hikey_msg_head = (struct hikey_ap2dsp_msg_head *)((char *)(msg_head)+HIKEY_DSP2AP_MSG_QUEUE_SIZE);
+
+ if (hikey_msg_head->head_protect_word != HIKEY_MSG_HEAD_PROTECT_WORD) {
+ loge("hikey msg head protect word error,0x%x\n", hikey_msg_head->head_protect_word);
+ return -1;
+ }
+
+ hikey_ap_mailbox_read_queue(hikey_msg_head, (char *)hikey_msg, offsetof(struct hikey_ap2dsp_msg_body, msg_content));
+
+ if (hikey_msg->msg_info.msg_id == 0 || hikey_msg->msg_info.msg_len > HIKEY_AP_DSP_MSG_MAX_LEN) {
+ loge("msg id error:0x%x, or msg len error:%u\n",
+ hikey_msg->msg_info.msg_id,
+ hikey_msg->msg_info.msg_len);
+ return -1;
+ }
+
+ hikey_ap_mailbox_read_queue(hikey_msg_head, hikey_msg->msg_info.msg_content,
+ hikey_msg->msg_info.msg_len - offsetof(struct hikey_ap2dsp_msg_body, msg_content));
+
+ return 0;
+}
+
+
+static DECLARE_COMPLETION(msg_completion);
+
+void hikey_ap_msg_process(struct hikey_msg_with_content *hikey_msg)
+{
+ if (!hikey_msg) {
+ loge("hikey msg is null\n");
+ return;
+ }
+ switch (hikey_msg->msg_info.msg_id) {
+ case ID_AUDIO_AP_OM_CMD:
+ complete(&msg_completion);
+ break;
+ case ID_XAF_DSP_TO_AP:
+ hasData = true;
+ loge("msg id:%x\n", hikey_msg->msg_info.msg_id);
+ loge("id:%x\n", hikey_msg->msg_info.xf_dsp_msg.id);
+ loge("opcode:%x\n", hikey_msg->msg_info.xf_dsp_msg.opcode);
+ loge("length:%x\n", hikey_msg->msg_info.xf_dsp_msg.length);
+ loge("address:%lx\n", (unsigned long)hikey_msg->msg_info.xf_dsp_msg.address);
+ break;
+ default:
+ loge("unknown msg id:0x%x\n", hikey_msg->msg_info.msg_id);
+ break;
+ }
+
+ return;
+}
+
+
+static void hikey_init_share_mem(char *share_mem_addr, unsigned int share_mem_size)
+{
+ if (!share_mem_addr) {
+ loge("share memory is null\n");
+ return;
+ }
+
+ memset(share_mem_addr, 0, share_mem_size);
+ msg_head = (struct hikey_ap2dsp_msg_head *)share_mem_addr;
+ msg_head->head_protect_word = HIKEY_MSG_HEAD_PROTECT_WORD;
+ msg_head->msg_num = 0;
+ msg_head->read_pos = (unsigned int)sizeof(struct hikey_ap2dsp_msg_head);
+ msg_head->write_pos = msg_head->read_pos;
+}
+
+static void hikey_ap2dsp_write_msg(struct hikey_ap2dsp_msg_body *hikey_msg)
+{
+ unsigned int size_to_bottom = 0;
+ unsigned int write_size = 0;
+
+ if (!msg_head) {
+ loge("hikey share memory not init\n");
+ return;
+ }
+
+ if (!hikey_msg) {
+ loge("msg is null\n");
+ return;
+ }
+
+ if (msg_head->head_protect_word != HIKEY_MSG_HEAD_PROTECT_WORD) {
+ loge("hikey msg head protect word error,0x%x\n",
+ msg_head->head_protect_word);
+ return;
+ }
+
+ write_size = hikey_msg->msg_len;
+ size_to_bottom = (HIKEY_DSP2AP_MSG_QUEUE_SIZE - msg_head->write_pos);
+
+ if (write_size >= HIKEY_DSP2AP_MSG_QUEUE_SIZE) {
+ loge("msg is too long\n");
+ return;
+ }
+
+ if (size_to_bottom > write_size) {
+ memcpy((char *)((char *)msg_head + msg_head->write_pos),
+ hikey_msg, write_size);
+ msg_head->write_pos += write_size;
+ } else {
+ memcpy((char *)((char *)msg_head + msg_head->write_pos),
+ hikey_msg, size_to_bottom);
+ memcpy((char *)((char *)msg_head +
+ sizeof(struct hikey_ap2dsp_msg_head)),
+ (char *)hikey_msg + size_to_bottom,
+ write_size - size_to_bottom);
+ msg_head->write_pos =
+ sizeof(struct hikey_ap2dsp_msg_head) + (write_size -
+ size_to_bottom);
+ }
+
+ msg_head->msg_num++;
+}
+
+/*Interrupt receiver */
+#define IPC_ACPU_INT_SRC_HIFI_MSG (1)
+#define K3_SYS_IPC_CORE_HIFI (4)
+typedef void (*VOIDFUNCCPTR)(unsigned int);
+static void _dsp_to_ap_ipc_irq_proc(void)
+{
+ struct hikey_msg_with_content hikey_msg;
+
+ memset(&hikey_msg, 0, sizeof(struct hikey_msg_with_content));
+ if (hikey_ap_mailbox_read(&hikey_msg)) {
+ loge("read msg error\n");
+ } else {
+ hikey_ap_msg_process(&hikey_msg);
+ }
+
+ /*clear interrupt */
+ DRV_k3IpcIntHandler_Autoack();
+}
+
+void ap_ipc_int_init(void)
+{
+ logi("Enter %s\n", __func__);
+ IPC_IntConnect(IPC_ACPU_INT_SRC_HIFI_MSG,
+ (VOIDFUNCCPTR)_dsp_to_ap_ipc_irq_proc,
+ IPC_ACPU_INT_SRC_HIFI_MSG);
+ IPC_IntEnable(IPC_ACPU_INT_SRC_HIFI_MSG);
+ logi("Exit %s\n", __func__);
+}
+
+int send_xaf_ipc_msg_to_dsp(struct xf_proxy_msg *xaf_msg)
+{
+ int ret;
+ unsigned char *music_buf = NULL;
+ struct hikey_ap2dsp_msg_body hikey_msg;
+
+ if (WARN_ON(xaf_msg->length > HIFI_MUSIC_DATA_SIZE))
+ return -EINVAL;
+
+ hikey_msg.msg_id = ID_XAF_AP_TO_DSP;
+ hikey_msg.msg_len = sizeof(hikey_msg);
+ hikey_msg.xf_dsp_msg = *xaf_msg;
+ hikey_msg.xf_dsp_msg.address = HIFI_MUSIC_DATA_LOCATION;
+
+ music_buf = (unsigned char *)ioremap_wc(HIFI_MUSIC_DATA_LOCATION, HIFI_MUSIC_DATA_SIZE);
+ /* ...get proxy message from user-space */
+ if (copy_from_user(music_buf, (void __user *)xaf_msg->address, xaf_msg->length)) {
+ iounmap(music_buf);
+ loge("%s: couldn't copy buffer %p from user %p\n", __func__, music_buf, (void *)xaf_msg->address);
+ return -EINVAL;
+ }
+ iounmap(music_buf);
+ hikey_ap2dsp_write_msg(&hikey_msg);
+ ret = (int)mailbox_send_msg(MAILBOX_MAILCODE_ACPU_TO_HIFI_MISC, &hikey_msg, hikey_msg.msg_len);
+
+ return ret;
+}
+
+int read_xaf_ipc_msg_from_dsp(void *buf, unsigned int size)
+{
+ int ret = OK;
+ return ret;
+}
+
+static int hifi_send_str_todsp(const char *cmd_str, size_t size)
+{
+ int ret = OK;
+ struct hikey_ap2dsp_msg_body *hikey_msg = kmalloc(sizeof(*hikey_msg) + size, GFP_KERNEL);
+
+ if (!hikey_msg)
+ return -ENOMEM;
+ BUG_ON(cmd_str == NULL);
+
+ hikey_msg->msg_id = ID_AP_AUDIO_STR_CMD;
+ hikey_msg->msg_len = offsetof(struct hikey_ap2dsp_msg_body, msg_content) + size + 1;
+ memcpy(&hikey_msg->msg_content, cmd_str, size);
+ hikey_msg->msg_content[size] = 0;
+
+ hikey_ap2dsp_write_msg(hikey_msg);
+ ret = (int)IPC_IntSend(K3_SYS_IPC_CORE_HIFI,0);
+ kfree(hikey_msg);
+ return ret;
+}
+int send_pcm_data_to_dsp(void __user *buf, unsigned int size)
+{
+ char cmd[40];
+ int ret = OK;
+ unsigned char *music_buf = NULL;
+ unsigned char *pcm_buf = NULL;
+
+ if (WARN_ON(size > HIFI_MUSIC_DATA_SIZE))
+ return -EINVAL;
+
+ music_buf = (unsigned char *)ioremap_wc(HIFI_MUSIC_DATA_LOCATION, HIFI_MUSIC_DATA_SIZE);
+ ret = copy_from_user(music_buf, buf, size);
+ iounmap(music_buf);
+ if (ret) {
+ loge("%s: couldn't copy buffer %p from user %p\n", __func__, music_buf, buf);
+ return -EINVAL;
+ }
+
+ sprintf(cmd, "pcm_gain 0x%08x 0x%08x", HIFI_MUSIC_DATA_LOCATION, size);
+ ret = hifi_send_str_todsp(cmd, strlen(cmd));
+
+ if (ret < 0) {
+ loge("%s: couldn't send message to DSP\n", __func__);
+ return ret;
+ }
+
+ wait_for_completion(&msg_completion);
+ pcm_buf = (unsigned char *)ioremap_wc(PCM_PLAY_BUFF_LOCATION, PCM_PLAY_BUFF_SIZE);
+ ret = copy_to_user(buf, pcm_buf, size);
+
+ if (ret != 0) {
+ ret = -EINVAL;
+ loge("%s: couldn't copy buffer %p to user %p\n", __func__, pcm_buf, buf);
+ }
+
+ iounmap(pcm_buf);
+ return ret;
+
+}
+
+static ssize_t hifi_dsp_fault_inject_show(struct file *file, char __user *buf,
+ size_t size, loff_t *data)
+{
+ char useage[] =
+ "Useage:\necho \"test_case param1 param2 ...\" > dspfaultinject\n"
+ "test_case maybe:\n" "read_mem addr\n" "write_mem addr value\n"
+ "endless_loop\n"
+ "overload [type(1:RT 2:Nomal 3:Low else:default)]\n" "auto_reset\n"
+ "param_test\n" "exit\n"
+ "dyn_mem_leak [size count type(1:DDR 2:TCM)]\n"
+ "dyn_mem_overlap [type(1:DDR 2:TCM)]\n"
+ "stack_overflow [type(1:RT 2:Nomal 3:Low else:default)]\n"
+ "isr_ipc\n" "om_report_ctrl [type(on off status)]\n"
+ "performance_leak [count]\n" "mailbox_overlap\n" "msg_leak\n"
+ "msg_deliver_err\n" "isr_malloc\n" "power_off\n" "power_on\n"
+ "watchdog_timeout\n" "ulpp_ddr\n" "mailbox_full\n"
+ "repeat_start [count]\n" "stop_dma\n"
+ "set_codec [codec_type(enum VCVOICE_TYPE_ENUM 0-9)]\n"
+ "voice_abnormal [str(all set_codec bad_frame dtx_state gcell_quality ucell_quality)]\n"
+ "diagnose_apkreport [cause]\n" "policy_null [target_index]\n"
+ "performance_check [count]\n"
+ "dma_adjust [str(in out) len(0-16000)]\n"
+ "trigger_vqi [str(ber fer)]\n"
+ "fifo_vp_test [str(start_once start_period stop)]\n"
+ "volte_encrypt [str(on off)]\n" "timer_start [timer_count]\n"
+ "test_timer [str(dump_num dump_node dump_blk dump_blk normal_oneshot normal_loop normal_clear high_oneshot high_loop high_clear callback_release)]\n"
+ "cdma_test [str(dl)]\n" "modem_loop [str(on off)]\n"
+ "rtp_cn_package [str(on off)]\n" "usbvoice_stop_audio_micin\n"
+ "usbvoice_stop_audio_spkout\n" "usbvoice_start_audio_micin\n"
+ "usbvoice_start_audio_spkout\n" "usbvoice_stop_voice_tx\n"
+ "usbvoice_stop_voice_rx\n" "usbvoice_start_voice_tx\n"
+ "usbvoice_start_voice_rx\n" "apk_trigger [str(start stop)]\n"
+ "whistle\n" "offload_effect_switch [str(on off)]\n" "\n";
+
+ if (!buf) {
+ loge("Input param buf is invalid\n");
+ return -EINVAL;
+ }
+
+ return simple_read_from_buffer(buf, size, data, useage, strlen(useage));
+}
+
+#define FAULT_INJECT_CMD_STR_MAX_LEN 200
+static ssize_t hifi_dsp_fault_inject_store(struct file *file,
+ const char __user *buf, size_t size,
+ loff_t *data)
+{
+ ssize_t len = 0;
+ loff_t pos = 0;
+ char cmd_str[FAULT_INJECT_CMD_STR_MAX_LEN] = { 0 };
+
+ if (!buf) {
+ loge("param buf is NULL\n");
+ return -EINVAL;
+ }
+
+ if (size > FAULT_INJECT_CMD_STR_MAX_LEN) {
+ loge("input size(%zd) is larger than max-len(%d)\n", size,
+ FAULT_INJECT_CMD_STR_MAX_LEN);
+ return -EINVAL;
+ }
+
+ memset(cmd_str, 0, sizeof(cmd_str));
+ len =
+ simple_write_to_buffer(cmd_str, (FAULT_INJECT_CMD_STR_MAX_LEN - 1),
+ &pos, buf, size);
+ if (len != size) {
+ loge("write to buffer fail: %zd\n", len);
+ return -EINVAL;
+ }
+
+ hifi_send_str_todsp(cmd_str, size);
+
+ return size;
+}
+
+static const struct file_operations hifi_dspfaultinject_proc_ops = {
+ .owner = THIS_MODULE,
+ .read = hifi_dsp_fault_inject_show,
+ .write = hifi_dsp_fault_inject_store,
+};
+
+struct image_partition_table addr_remap_tables[] = {
+ {HIFI_RUN_DDR_REMAP_BASE, HIFI_RUN_DDR_REMAP_BASE + HIFI_RUN_SIZE,
+ HIFI_RUN_SIZE, HIFI_RUN_LOCATION},
+ {HIFI_TCM_PHY_BEGIN_ADDR, HIFI_TCM_PHY_END_ADDR, HIFI_TCM_SIZE,
+ HIFI_IMAGE_TCMBAK_LOCATION},
+ {HIFI_OCRAM_PHY_BEGIN_ADDR, HIFI_OCRAM_PHY_END_ADDR, HIFI_OCRAM_SIZE,
+ HIFI_IMAGE_OCRAMBAK_LOCATION}
+};
+
+extern void *memcpy64(void *dst, const void *src, unsigned len);
+extern void *memcpy128(void *dst, const void *src, unsigned len);
+
+void *memcpy_aligned(void *_dst, const void *_src, unsigned len)
+{
+ unsigned char *dst = _dst;
+ const unsigned char *src = _src;
+ unsigned int length = len;
+ unsigned int cpy_len;
+
+ if (((unsigned long)dst % 16 == 0) && ((unsigned long)src % 16 == 0)
+ && (length >= 16)) {
+ cpy_len = length & 0xFFFFFFF0;
+ memcpy128(dst, src, cpy_len);
+ length = length % 16;
+ dst = dst + cpy_len;
+ src = src + cpy_len;
+
+ if (length == 0)
+ return _dst;
+ }
+
+ if (((unsigned long)dst % 8 == 0) && ((unsigned long)src % 8 == 0)
+ && (length >= 8)) {
+ cpy_len = length & 0xFFFFFFF8;
+ memcpy64(dst, src, cpy_len);
+ length = length % 8;
+ dst = dst + cpy_len;
+ src = src + cpy_len;
+ if (length == 0)
+ return _dst;
+ }
+
+ if (((unsigned long)dst % 4 == 0) && ((unsigned long)src % 4 == 0)) {
+ cpy_len = length >> 2;
+ while (cpy_len-- > 0) {
+ *(unsigned int *)dst = *(unsigned int *)src;
+ dst += 4;
+ src += 4;
+ }
+ length = length % 4;
+ if (length == 0)
+ return _dst;
+ }
+
+ while (length-- > 0)
+ *dst++ = *src++;
+
+ return _dst;
+}
+
+int load_hifi_img_by_misc(void)
+{
+ unsigned int i = 0;
+ char *img_buf = NULL;
+ struct drv_hifi_image_head *hifi_img = NULL;
+ const struct firmware *hifi_firmware;
+
+ if (g_om_data.dsp_loaded == true)
+ return 0;
+
+ loge("load hifi image now\n");
+
+ if (request_firmware(&hifi_firmware, "hifi/hifi.img", g_om_data.dev) < 0) {
+ loge("could not find firmware file hifi/hifi.img\n");
+ return -ENOENT;
+ }
+
+ img_buf = (char *)hifi_firmware->data;
+
+ hifi_img = (struct drv_hifi_image_head *)img_buf;
+ logi("sections_num:%u, image_size:%u\n", hifi_img->sections_num,
+ hifi_img->image_size);
+
+ for (i = 0; i < hifi_img->sections_num; i++) {
+ unsigned int index = 0;
+ unsigned long remap_dest_addr = 0;
+
+ logi("sections_num:%u, i:%u\n", hifi_img->sections_num, i);
+ logi("des_addr:0x%x, load_attib:%u, size:%u, sn:%hu, src_offset:%x, type:%u\n", hifi_img->sections[i].des_addr, hifi_img->sections[i].load_attib, hifi_img->sections[i].size, hifi_img->sections[i].sn, hifi_img->sections[i].src_offset, hifi_img->sections[i].type);
+
+ remap_dest_addr = (unsigned long)hifi_img->sections[i].des_addr;
+ if (remap_dest_addr >= HIFI_OCRAM_PHY_BEGIN_ADDR
+ && remap_dest_addr <= HIFI_OCRAM_PHY_END_ADDR) {
+ index = 2;
+ } else if (remap_dest_addr >= HIFI_TCM_PHY_BEGIN_ADDR
+ && remap_dest_addr <= HIFI_TCM_PHY_END_ADDR) {
+ index = 1;
+ } else { /*(remap_addr >= HIFI_DDR_PHY_BEGIN_ADDR && remap_addr <= HIFI_DDR_PHY_END_ADDR) */
+ index = 0;
+ }
+ remap_dest_addr -=
+ addr_remap_tables[index].phy_addr_start -
+ addr_remap_tables[index].remap_addr;
+
+ if (hifi_img->sections[i].type != DRV_HIFI_IMAGE_SEC_TYPE_BSS) {
+ unsigned int *iomap_dest_addr = NULL;
+
+ if (hifi_img->sections[i].load_attib ==
+ (unsigned char)DRV_HIFI_IMAGE_SEC_UNLOAD) {
+ logi("unload section\n");
+ continue;
+ }
+
+ iomap_dest_addr =
+ (unsigned int *)ioremap(remap_dest_addr,
+ hifi_img->sections[i].size);
+ if (!iomap_dest_addr) {
+ loge("ioremap failed\n");
+ release_firmware(hifi_firmware);
+ return -1;
+ }
+ memcpy_aligned((void *)(iomap_dest_addr),
+ (void *)((char *)hifi_img +
+ hifi_img->
+ sections[i].src_offset),
+ hifi_img->sections[i].size);
+ iounmap(iomap_dest_addr);
+ }
+ }
+
+ g_om_data.dsp_loaded = true;
+ release_firmware(hifi_firmware);
+ return 0;
+}
+
+#define RESET_OPTION_LEN 100
+
+static ssize_t hifi_dsp_reset_option_show(struct file *file, char __user *buf,
+ size_t size, loff_t *data)
+{
+ char reset_option[RESET_OPTION_LEN] = { 0 };
+
+ if (NULL == buf) {
+ loge("Input param buf is invalid\n");
+ return -EINVAL;
+ }
+ if (load_hifi_img_by_misc() == 0) {
+ g_om_data.dsp_loaded = true;
+ loge("g_om_data.dsp_loaded:%d\n", (int)g_om_data.dsp_loaded);
+ }
+ snprintf(reset_option, RESET_OPTION_LEN,
+ "reset_option: 0(reset mediasever) 1(reset system) current:%d.\n",
+ g_om_data.reset_system);
+
+ return simple_read_from_buffer(buf, size, data, reset_option,
+ strlen(reset_option));
+}
+
+static ssize_t hifi_dsp_reset_option_store(struct file *file,
+ const char __user *buf, size_t size,
+ loff_t *data)
+{
+ ssize_t ret;
+ char reset_option[RESET_OPTION_LEN] = { 0 };
+ loff_t pos = 0;
+
+ if (NULL == buf) {
+ loge("Input param buf is invalid\n");
+ return -EINVAL;
+ }
+ ret =
+ simple_write_to_buffer(reset_option, RESET_OPTION_LEN - 1, &pos,
+ buf, size);
+ if (ret != size) {
+ loge("Input param buf read error, return value: %zd\n", ret);
+ return -EINVAL;
+ }
+
+ if (!strchr("01", reset_option[0])) {
+ loge("Input param buf is error(valid: d,i,w,e): %s.\n",
+ reset_option);
+ return -EINVAL;
+ }
+ if (reset_option[1] != '\n') {
+ loge("Input param buf is error, last char is not \\n .\n");
+ return -EINVAL;
+ }
+
+ g_om_data.reset_system = (reset_option[0] == '0') ? false : true;
+ return size;
+}
+
+static const struct file_operations hifi_reset_option_proc_ops = {
+ .owner = THIS_MODULE,
+ .read = hifi_dsp_reset_option_show,
+ .write = hifi_dsp_reset_option_store,
+};
+
+static ssize_t hifi_dsp_dump_log_show(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ ssize_t ret = 0;
+
+ if (NULL == buf) {
+ loge("Input param buf is invalid\n");
+ return -EINVAL;
+ }
+
+ ret = simple_read_from_buffer(buf, size, ppos,
+ LOG_PATH_HIFI_LOG,
+ (strlen(LOG_PATH_HIFI_LOG) + 1));
+ if (ret == (ssize_t) (strlen(LOG_PATH_HIFI_LOG) + 1)) {
+ g_om_data.force_dump_log = true;
+ up(&hifi_log_sema);
+ }
+
+ return ret;
+}
+
+static const struct file_operations hifi_dspdumplog_proc_ops = {
+ .owner = THIS_MODULE,
+ .read = hifi_dsp_dump_log_show,
+};
+
+static void create_hifidebug_proc_file(void)
+{
+ struct proc_dir_entry *ent_debuglevel;
+ struct proc_dir_entry *ent_dspdumplog;
+ struct proc_dir_entry *ent_dspfaultinject;
+ struct proc_dir_entry *poc_reset_option;
+
+ /* Creating read/write "status" entry */
+ ent_debuglevel =
+ proc_create(HIFIDEBUG_LEVEL_PROC_FILE, S_IRUGO | S_IWUSR | S_IWGRP,
+ hifi_debug_dir, &hifi_debug_proc_ops);
+ ent_dspdumplog =
+ proc_create(HIFIDEBUG_DSPDUMPLOG_PROC_FILE, S_IRUGO, hifi_debug_dir,
+ &hifi_dspdumplog_proc_ops);
+ if ((ent_debuglevel == NULL) || (ent_dspdumplog == NULL)) {
+ remove_proc_entry("hifidebug", 0);
+ loge("create proc file fail\n");
+ return;
+ }
+ ent_dspfaultinject =
+ proc_create(HIFIDEBUG_FAULTINJECT_PROC_FILE,
+ S_IRUGO | S_IWUSR | S_IWGRP, hifi_debug_dir,
+ &hifi_dspfaultinject_proc_ops);
+ if (ent_dspfaultinject == NULL) {
+ remove_proc_entry("hifidebug", 0);
+ loge("create proc file fail\n");
+ }
+
+ poc_reset_option =
+ proc_create(HIFIDEBUG_RESETOPTION_PROC_FILE,
+ S_IRUGO | S_IWUSR | S_IWGRP, hifi_debug_dir,
+ &hifi_reset_option_proc_ops);
+ if (poc_reset_option == NULL) {
+ remove_proc_entry("hifidebug", 0);
+ loge("create proc file fail\n");
+ }
+
+}
+
+static void remove_hifidebug_proc_file(void)
+{
+ remove_proc_entry(HIFIDEBUG_LEVEL_PROC_FILE, hifi_debug_dir);
+ remove_proc_entry(HIFIDEBUG_DSPDUMPLOG_PROC_FILE, hifi_debug_dir);
+ remove_proc_entry(HIFIDEBUG_FAULTINJECT_PROC_FILE, hifi_debug_dir);
+ remove_proc_entry(HIFIDEBUG_RESETOPTION_PROC_FILE, hifi_debug_dir);
+}
+
+static void hifi_create_procfs(void)
+{
+#ifdef ENABLE_HIFI_DEBUG
+ hifi_debug_dir = proc_mkdir(HIFIDEBUG_PATH, NULL);
+ if (hifi_debug_dir == NULL) {
+ loge("Unable to create /proc/hifidebug directory\n");
+ return;
+ }
+ create_hifidebug_proc_file();
+#endif
+}
+
+static void hifi_remove_procfs(void)
+{
+#ifdef ENABLE_HIFI_DEBUG
+ remove_hifidebug_proc_file();
+ remove_proc_entry("hifidebug", 0);
+#endif
+}
+
+static int hifi_dump_dsp_thread(void *p)
+{
+#define HIFI_TIME_STAMP_1S 32768
+#define HIFI_DUMPLOG_TIMESPAN (10 * HIFI_TIME_STAMP_1S)
+
+ unsigned int exception_no = 0;
+ unsigned int time_now = 0;
+ unsigned int time_diff = 0;
+ unsigned int *hifi_info_addr = NULL;
+ unsigned int hifi_stack_addr = 0;
+ int i;
+
+ IN_FUNCTION;
+
+ while (!kthread_should_stop()) {
+ if (down_interruptible(&hifi_log_sema) != 0) {
+ loge("hifi_dump_dsp_thread wake up err.\n");
+ }
+ time_now = (unsigned int)readl(g_om_data.dsp_time_stamp);
+ time_diff = time_now - g_om_data.pre_dsp_dump_timestamp;
+ g_om_data.pre_dsp_dump_timestamp = time_now;
+ hifi_info_addr = g_om_data.dsp_stack_addr;
+
+ exception_no = *(unsigned int *)(hifi_info_addr + 3);
+ hifi_stack_addr = *(unsigned int *)(hifi_info_addr + 4);
+ logi("errno:%x pre_errno:%x is_first:%d is_force:%d time_diff:%d ms.\n", exception_no, g_om_data.pre_exception_no, g_om_data.first_dump_log, g_om_data.force_dump_log, (time_diff * 1000) / HIFI_TIME_STAMP_1S);
+
+ hifi_get_time_stamp(g_om_data.cur_dump_time,
+ HIFI_DUMP_FILE_NAME_MAX_LEN);
+
+ if (exception_no < 40
+ && (exception_no != g_om_data.pre_exception_no)) {
+ logi("panic addr:0x%x, cur_pc:0x%x, pre_pc:0x%x, cause:0x%x\n", *(unsigned int *)(hifi_info_addr), *(unsigned int *)(hifi_info_addr + 1), *(unsigned int *)(hifi_info_addr + 2), *(unsigned int *)(hifi_info_addr + 3));
+ for (i = 0;
+ i <
+ (DRV_DSP_STACK_TO_MEM_SIZE / 2) / sizeof(int) / 4;
+ i += 4) {
+ logi("0x%x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ (hifi_stack_addr + i * 4),
+ *(hifi_info_addr + i),
+ *(hifi_info_addr + 1 + i),
+ *(hifi_info_addr + 2 + i),
+ *(hifi_info_addr + i + 3));
+ }
+
+ hifi_dump_dsp(PANIC_LOG);
+ hifi_dump_dsp(PANIC_BIN);
+
+ g_om_data.pre_exception_no = exception_no;
+ } else if (g_om_data.first_dump_log || g_om_data.force_dump_log
+ || time_diff > HIFI_DUMPLOG_TIMESPAN) {
+ hifi_dump_dsp(NORMAL_LOG);
+ if (DSP_LOG_BUF_FULL != g_om_data.dsp_error_type) { /*needn't dump bin when hifi log buffer full */
+ hifi_dump_dsp(NORMAL_BIN);
+ }
+ g_om_data.first_dump_log = false;
+ }
+
+ hifi_info_addr = NULL;
+ }
+ OUT_FUNCTION;
+ return 0;
+}
+
+void hifi_dump_panic_log(void)
+{
+ if (!g_om_data.dsp_loaded) {
+ loge("hifi isn't loaded, errno: 0x%x .\n",
+ g_om_data.dsp_loaded_sign);
+ return;
+ }
+ up(&hifi_log_sema);
+ return;
+}
+
+static bool hifi_check_img_loaded(void)
+{
+ bool dsp_loaded = false;
+ g_om_data.dsp_loaded_sign = *(g_om_data.dsp_loaded_indicate_addr);
+
+ if (0xA5A55A5A == g_om_data.dsp_loaded_sign) {
+ loge("hifi img is not be loaded.\n");
+ } else if (g_om_data.dsp_loaded_sign > 0) {
+ loge("hifi img is loaded fail: 0x%x.\n",
+ g_om_data.dsp_loaded_sign);
+ } else {
+ logi("hifi img be loaded.\n");
+ dsp_loaded = true;
+ }
+
+ return dsp_loaded;
+}
+
+bool hifi_is_loaded(void)
+{
+ if (!g_om_data.dsp_loaded) {
+ loge("hifi isn't load, errno is 0x%x.\n",
+ g_om_data.dsp_loaded_sign);
+ }
+ return g_om_data.dsp_loaded;
+}
+
+int hifi_dsp_dump_hifi(void __user *arg)
+{
+ unsigned int err_type = 0;
+
+ if (!arg) {
+ loge("arg is null\n");
+ return -1;
+ }
+
+ if (copy_from_user(&err_type, arg, sizeof(err_type))) {
+ loge("copy_from_user fail, don't dump log\n");
+ return -1;
+ }
+ g_om_data.dsp_error_type = err_type;
+ g_om_data.force_dump_log = true;
+ up(&hifi_log_sema);
+
+ return 0;
+}
+
+void hifi_om_init(struct platform_device *pdev,
+ unsigned char *hifi_priv_base_virt,
+ unsigned char *hifi_priv_base_phy)
+{
+ int i = 0;
+ BUG_ON(NULL == pdev);
+
+ BUG_ON(NULL == hifi_priv_base_virt);
+ BUG_ON(NULL == hifi_priv_base_phy);
+
+ memset(&g_om_data, 0, sizeof(struct hifi_om_s));
+
+ g_om_data.dev = &pdev->dev;
+ g_om_data.debug_level = 2; /*info level */
+ g_om_data.reset_system = false;
+
+ g_om_data.dsp_time_stamp =
+ (unsigned int *)ioremap(SYS_TIME_STAMP_REG, 0x4);
+ if (NULL == g_om_data.dsp_time_stamp) {
+ pr_err("time stamp reg ioremap Error.\n"); /*can't use logx */
+ return;
+ }
+
+ IN_FUNCTION;
+
+ g_om_data.dsp_debug_level = 2; /*info level */
+ g_om_data.first_dump_log = true;
+
+ g_om_data.dsp_panic_mark =
+ (unsigned int *)(hifi_priv_base_virt +
+ (DRV_DSP_PANIC_MARK - HIFI_UNSEC_BASE_ADDR));
+ g_om_data.dsp_bin_addr =
+ (char *)(hifi_priv_base_virt +
+ (HIFI_DUMP_BIN_ADDR - HIFI_UNSEC_BASE_ADDR));
+ g_om_data.dsp_exception_no =
+ (unsigned int *)(hifi_priv_base_virt +
+ (DRV_DSP_EXCEPTION_NO - HIFI_UNSEC_BASE_ADDR));
+ g_om_data.dsp_log_cur_addr =
+ (unsigned int *)(hifi_priv_base_virt +
+ (DRV_DSP_UART_TO_MEM_CUR_ADDR -
+ HIFI_UNSEC_BASE_ADDR));
+ g_om_data.dsp_log_addr =
+ (char *)(hifi_priv_base_virt +
+ (DRV_DSP_UART_TO_MEM - HIFI_UNSEC_BASE_ADDR));
+ *g_om_data.dsp_log_cur_addr = DRV_DSP_UART_TO_MEM_RESERVE_SIZE;
+
+ g_om_data.dsp_debug_level_addr =
+ (unsigned int *)(hifi_priv_base_virt +
+ (DRV_DSP_UART_LOG_LEVEL - HIFI_UNSEC_BASE_ADDR));
+ g_om_data.dsp_debug_kill_addr =
+ (unsigned int *)(hifi_priv_base_virt +
+ (DRV_DSP_KILLME_ADDR - HIFI_UNSEC_BASE_ADDR));
+ g_om_data.dsp_fama_config =
+ (struct drv_fama_config *)(hifi_priv_base_virt +
+ (DRV_DSP_SOCP_FAMA_CONFIG_ADDR -
+ HIFI_UNSEC_BASE_ADDR));
+ g_om_data.dsp_stack_addr =
+ (unsigned int *)(hifi_priv_base_virt +
+ (DRV_DSP_STACK_TO_MEM - HIFI_UNSEC_BASE_ADDR));
+ g_om_data.dsp_loaded_indicate_addr =
+ (unsigned int *)(hifi_priv_base_virt +
+ (DRV_DSP_LOADED_INDICATE - HIFI_UNSEC_BASE_ADDR));
+
+ *(g_om_data.dsp_exception_no) = ~0;
+ g_om_data.pre_exception_no = ~0;
+ hikey_init_share_mem((char *)(hifi_priv_base_virt +
+ (HIFI_HIKEY_SHARE_MEM_ADDR -
+ HIFI_UNSEC_BASE_ADDR)),
+ (unsigned int)HIFI_HIKEY_SHARE_SIZE);
+ g_om_data.dsp_fama_config->head_magic = DRV_DSP_SOCP_FAMA_HEAD_MAGIC;
+ g_om_data.dsp_fama_config->flag = DRV_DSP_FAMA_OFF;
+ g_om_data.dsp_fama_config->rear_magic = DRV_DSP_SOCP_FAMA_REAR_MAGIC;
+
+ s_dsp_dump_info[NORMAL_BIN].data_addr = g_om_data.dsp_bin_addr;
+ s_dsp_dump_info[PANIC_BIN].data_addr = g_om_data.dsp_bin_addr;
+
+ g_om_data.dsp_loaded = hifi_check_img_loaded();
+ hifi_set_dsp_debug_level(g_om_data.dsp_debug_level);
+
+ sema_init(&g_om_data.dsp_dump_sema, 1);
+
+ g_om_data.kdumpdsp_task =
+ kthread_create(hifi_dump_dsp_thread, 0, "dspdumplog");
+ if (IS_ERR(g_om_data.kdumpdsp_task)) {
+ loge("creat hifi dump log thread fail.\n");
+ } else {
+ wake_up_process(g_om_data.kdumpdsp_task);
+ }
+
+ hifi_create_procfs();
+
+ for (i = 0; i < ARRAY_SIZE(work_info); i++) {
+ work_info[i].ctl.wq =
+ create_singlethread_workqueue(work_info[i].work_name);
+ if (!work_info[i].ctl.wq) {
+ pr_err("%s(%u):workqueue create failed!\n",
+ __FUNCTION__, __LINE__);
+ } else {
+ INIT_WORK(&work_info[i].ctl.work, work_info[i].func);
+ spin_lock_init(&work_info[i].ctl.lock);
+ INIT_LIST_HEAD(&work_info[i].ctl.list);
+ }
+ }
+
+ OUT_FUNCTION;
+ return;
+}
+
+void hifi_om_deinit(struct platform_device *dev)
+{
+ int i = 0;
+
+ IN_FUNCTION;
+
+ BUG_ON(NULL == dev);
+
+ up(&g_om_data.dsp_dump_sema);
+ kthread_stop(g_om_data.kdumpdsp_task);
+
+ if (NULL != g_om_data.dsp_time_stamp) {
+ iounmap(g_om_data.dsp_time_stamp);
+ g_om_data.dsp_time_stamp = NULL;
+ }
+
+ hifi_remove_procfs();
+
+ for (i = 0; i < ARRAY_SIZE(work_info); i++) {
+ if (work_info[i].ctl.wq) {
+ flush_workqueue(work_info[i].ctl.wq);
+ destroy_workqueue(work_info[i].ctl.wq);
+ work_info[i].ctl.wq = NULL;
+ }
+ }
+
+ OUT_FUNCTION;
+
+ return;
+}
+
+void hifi_om_cpu_load_info_show(struct hifi_om_load_info_stru *hifi_om_info)
+{
+ switch (hifi_om_info->info_type) {
+ case HIFI_CPU_LOAD_VOTE_UP:
+ case HIFI_CPU_LOAD_VOTE_DOWN:
+ logi("CpuUtilization:%d%%, Vote DDR to %dM\n",
+ hifi_om_info->cpu_load_info.cpu_load,
+ hifi_om_info->cpu_load_info.ddr_freq);
+ break;
+
+ case HIFI_CPU_LOAD_LACK_PERFORMANCE:
+ logw("DDRFreq: %dM, CpuUtilization:%d%%, Lack of performance!!!\n", hifi_om_info->cpu_load_info.ddr_freq, hifi_om_info->cpu_load_info.cpu_load);
+ /*upload totally 16 times in every 16 times in case of flushing msg */
+ /*stop upload because it's nothing but waste resource
+ if (unlikely((dsm_notify_limit <= 0x100) && (dsm_notify_limit & 0xF))) {
+ if (!dsm_client_ocuppy(dsm_audio_client)) {
+ dsm_client_record(dsm_audio_client, "DSM_SOC_HIFI_HIGH_CPU\n");
+ dsm_client_notify(dsm_audio_client, DSM_SOC_HIFI_HIGH_CPU);
+ }
+ }
+ dsm_notify_limit++;
+ */
+ break;
+
+ default:
+ break;
+ }
+}
+
+void hifi_om_effect_mcps_info_show(struct hifi_om_effect_mcps_stru
+ *hifi_mcps_info)
+{
+ unsigned int i;
+
+ logw("DDRFreq: %dM, CpuUtilization:%d%%\n",
+ hifi_mcps_info->cpu_load_info.ddr_freq,
+ hifi_mcps_info->cpu_load_info.cpu_load);
+
+ for (i = 0;
+ i <
+ (sizeof(hifi_mcps_info->effect_mcps_info) /
+ sizeof(hifi_effect_mcps_stru)); i++) {
+ if (hifi_mcps_info->effect_mcps_info[i].effect_algo_id <
+ ID_EFFECT_ALGO_BUTT
+ && hifi_mcps_info->effect_mcps_info[i].effect_algo_id >
+ ID_EFFECT_ALGO_START) {
+ switch (hifi_mcps_info->effect_mcps_info[i].
+ effect_stream_id) {
+ case AUDIO_STREAM_PCM_OUTPUT:
+ logw("Algorithm: %s, Mcps: %d, Stream: PCM_OUTPUT \n", effect_algo[hifi_mcps_info->effect_mcps_info[i].effect_algo_id - 1].effect_name, hifi_mcps_info->effect_mcps_info[i].effect_algo_mcps);
+ break;
+
+ case AUDIO_STREAM_PLAYER_OUTPUT:
+ logw("Algorithm: %s, Mcps: %d, Stream: PLAYER_OUTPUT \n", effect_algo[hifi_mcps_info->effect_mcps_info[i].effect_algo_id - 1].effect_name, hifi_mcps_info->effect_mcps_info[i].effect_algo_mcps);
+ break;
+
+ case AUDIO_STREAM_MIXER_OUTPUT:
+ logw("Algorithm: %s, Mcps: %d, Stream: MIXER_OUTPUT \n", effect_algo[hifi_mcps_info->effect_mcps_info[i].effect_algo_id - 1].effect_name, hifi_mcps_info->effect_mcps_info[i].effect_algo_mcps);
+ break;
+
+ case AUDIO_STREAM_VOICE_OUTPUT:
+ logw("Algorithm: %s, Mcps: %d, Stream: VOICE_OUTPUT \n", effect_algo[hifi_mcps_info->effect_mcps_info[i].effect_algo_id - 1].effect_name, hifi_mcps_info->effect_mcps_info[i].effect_algo_mcps);
+ break;
+
+ case AUDIO_STREAM_VOICEPP_OUTPUT:
+ logw("Algorithm: %s, Mcps: %d, Stream: VOICEPP_OUTPUT \n", effect_algo[hifi_mcps_info->effect_mcps_info[i].effect_algo_id - 1].effect_name, hifi_mcps_info->effect_mcps_info[i].effect_algo_mcps);
+ break;
+
+ case AUDIO_STREAM_PCM_INPUT:
+ logw("Algorithm: %s, Mcps: %d, Stream: PCM_INPUT \n", effect_algo[hifi_mcps_info->effect_mcps_info[i].effect_algo_id - 1].effect_name, hifi_mcps_info->effect_mcps_info[i].effect_algo_mcps);
+ break;
+
+ case AUDIO_STREAM_VOICE_INPUT:
+ logw("Algorithm: %s, Mcps: %d, Stream: VOICE_INPUT \n", effect_algo[hifi_mcps_info->effect_mcps_info[i].effect_algo_id - 1].effect_name, hifi_mcps_info->effect_mcps_info[i].effect_algo_mcps);
+ break;
+
+ case AUDIO_STREAM_VOICEPP_INPUT:
+ logw("Algorithm: %s, Mcps: %d, Stream: VOICEPP_INPUT \n", effect_algo[hifi_mcps_info->effect_mcps_info[i].effect_algo_id - 1].effect_name, hifi_mcps_info->effect_mcps_info[i].effect_algo_mcps);
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+}
+
+void hifi_om_update_buff_delay_info_show(struct hifi_om_update_buff_delay_info
+ *info)
+{
+
+ logw("Hifi continuous update play/capture buff delay : %d(0-play, 1-capture)\n", info->pcm_mode);
+}
diff --git a/drivers/hisi/hifi_dsp/hifi_om.h b/drivers/hisi/hifi_dsp/hifi_om.h
new file mode 100644
index 000000000000..4cbd10de7402
--- /dev/null
+++ b/drivers/hisi/hifi_dsp/hifi_om.h
@@ -0,0 +1,405 @@
+/*
+ * hifi om.
+ *
+ * Copyright (c) 2013 Hisilicon Technologies CO., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *
+ * Modifications made by Cadence Design Systems, Inc. 06/21/2017
+ * Copyright (C) 2017 Cadence Design Systems, Inc.All rights reserved worldwide.
+ *
+ */
+
+#ifndef __HIFI_OM_H__
+#define __HIFI_OM_H__
+
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif
+
+#define HIFIDEBUG_PATH "hifidebug"
+#define LOG_PATH_HISI_LOGS "/data/hisi_logs/"
+#define LOG_PATH_RUNNING_TRACE "/data/hisi_logs/running_trace/"
+#define LOG_PATH_HIFI_LOG "/data/hisi_logs/running_trace/hifi_log/"
+#define LOG_PATH_BSD_LOG "/data/hisi_logs/running_trace/hifi_log/bsd_log/"
+#define FILE_NAME_DUMP_DSP_LOG "hifi.log"
+#define FILE_NAME_DUMP_DSP_BIN "hifi.bin"
+#define FILE_NAME_DUMP_DSP_PANIC_LOG "hifi_panic.log"
+#define FILE_NAME_DUMP_DSP_PANIC_BIN "hifi_panic.bin"
+#define FILE_NAME_DUMP_DSP_OCRAM_BIN "hifi_ocram.bin"
+#define FILE_NAME_DUMP_DSP_TCM_BIN "hifi_tcm.bin"
+
+#define HIFIDEBUG_PATH "hifidebug"
+#define HIFIDEBUG_LEVEL_PROC_FILE "debuglevel"
+#define HIFIDEBUG_DSPDUMPLOG_PROC_FILE "dspdumplog"
+#define HIFIDEBUG_FAULTINJECT_PROC_FILE "dspfaultinject"
+#define HIFIDEBUG_RESETOPTION_PROC_FILE "resetsystem"
+#define HIKEY_AP_DSP_MSG_MAX_LEN 100
+
+#ifndef LOG_TAG
+#define LOG_TAG "hifi_misc "
+#endif
+
+#define HIFI_DUMP_FILE_NAME_MAX_LEN 256
+#define DSP_DUMP_MAX_EFFECTS_CNT (10)
+#define ROOT_UID 0
+#define SYSTEM_GID 1000
+#define HIFI_OM_DIR_LIMIT 0750
+#define HIFI_OM_FILE_LIMIT 0640
+#define HIFI_OM_LOG_SIZE_MAX 0x400000 /* 4*1024*1024 = 4M */
+#define HIFI_OM_FILE_BUFFER_SIZE_MAX (1024)
+#define HIFI_SEC_MAX_NUM 64
+
+typedef enum {
+ DUMP_DSP_LOG,
+ DUMP_DSP_BIN
+} DUMP_DSP_TYPE;
+
+typedef enum {
+ DSP_NORMAL,
+ DSP_PANIC,
+ DSP_LOG_BUF_FULL
+} DSP_ERROR_TYPE;
+
+typedef enum {
+ NORMAL_LOG = 0,
+ NORMAL_BIN,
+ PANIC_LOG,
+ PANIC_BIN,
+ OCRAM_BIN,
+ TCM_BIN
+} DUMP_DSP_INDEX;
+
+enum HIFI_CPU_LOAD_INFO_ENUM {
+ HIFI_CPU_LOAD_REPORT = 0,
+ HIFI_CPU_LOAD_VOTE_UP,
+ HIFI_CPU_LOAD_VOTE_DOWN,
+ HIFI_CPU_LOAD_LACK_PERFORMANCE,
+ HIFI_CPU_LOAD_INFO_BUTT
+};
+
+enum HIFI_CPU_OM_INFO_ENUM {
+ HIFI_CPU_OM_LOAD_INFO = 0,
+ HIFI_CPU_OM_ALGO_MCPS_INFO,
+ HIFI_CPU_OM_UPDATE_BUFF_DELAY_INFO, /* audio pcm play or capture update buff delay */
+ HIFI_CPU_OM_INFO_BUTT
+};
+enum EFFECT_ALGO_ENUM {
+ ID_EFFECT_ALGO_START = 0,
+ ID_EFFECT_ALGO_FORMATER,
+ ID_EFFECT_ALGO_FORTE_VOICE_SPKOUT,
+ ID_EFFECT_ALGO_FORTE_VOICE_MICIN,
+ ID_EFFECT_ALGO_FORTE_VOICE_SPKOUT_BWE,
+ ID_EFFECT_ALGO_FORTE_VOIP_MICIN,
+ ID_EFFECT_ALGO_FORTE_VOIP_SPKOUT,
+ ID_EFFECT_ALGO_IN_CONVERT_I2S_GENERAL,
+ ID_EFFECT_ALGO_IN_CONVERT_I2S_HI363X,
+ ID_EFFECT_ALGO_INTERLACE,
+ ID_EFFECT_ALGO_OUT_CONVERT_I2S_GENERAL,
+ ID_EFFECT_ALGO_OUT_CONVERT_I2S_HI363X,
+ ID_EFFECT_ALGO_SWAP,
+ ID_EFFECT_ALGO_IMEDIA_WNR_MICIN,
+ ID_EFFECT_ALGO_IMEDIA_WNR_SPKOUT,
+ ID_EFFECT_ALGO_SWS_INTERFACE,
+ ID_EFFECT_ALGO_DTS,
+ ID_EFFECT_ALGO_DRE,
+ ID_EFFECT_ALGO_CHC,
+ ID_EFFECT_ALGO_SRC,
+ ID_EFFECT_ALGO_TTY,
+ ID_EFFECT_ALGO_KARAOKE_RECORD,
+ ID_EFFECT_ALGO_KARAOKE_PLAY,
+ ID_EFFECT_ALGO_MLIB_CS_VOICE_CALL_MICIN,
+ ID_EFFECT_ALGO_MLIB_CS_VOICE_CALL_SPKOUT,
+ ID_EFFECT_ALGO_MLIB_VOIP_CALL_MICIN,
+ ID_EFFECT_ALGO_MLIB_VOIP_CALL_SPKOUT,
+ ID_EFFECT_ALGO_MLIB_AUDIO_PLAY,
+ ID_EFFECT_ALGO_MLIB_AUDIO_RECORD,
+ ID_EFFECT_ALGO_MLIB_SIRI_MICIN,
+ ID_EFFECT_ALGO_MLIB_SIRI_SPKOUT,
+ ID_EFFECT_ALGO_EQ,
+ ID_EFFECT_ALGO_MBDRC6402,
+ ID_EFFECT_ALGO_IMEDIA_VOIP_MICIN,
+ ID_EFFECT_ALGO_IMEDIA_VOIP_SPKOUT,
+ ID_EFFECT_ALGO_IMEDIA_VOICE_CALL_MICIN,
+ ID_EFFECT_ALGO_IMEDIA_VOICE_CALL_SPKOUT,
+ ID_EFFECT_ALGO_IMEDIA_VOICE_CALL_SPKOUT_BWE,
+ ID_EFFECT_ALGO_BUTT
+};
+
+enum EFFECT_STREAM_ID {
+ AUDIO_STREAM_PCM_OUTPUT = 0,
+ AUDIO_STREAM_PLAYER_OUTPUT,
+ AUDIO_STREAM_MIXER_OUTPUT,
+ AUDIO_STREAM_VOICE_OUTPUT,
+ AUDIO_STREAM_VOICEPP_OUTPUT,
+ AUDIO_STREAM_OUTPUT_CNT,
+
+ AUDIO_STREAM_PCM_INPUT = 0x10,
+ AUDIO_STREAM_VOICE_INPUT,
+ AUDIO_STREAM_VOICEPP_INPUT,
+ AUDIO_STREAM_INPUT_CNT,
+};
+
+enum DRV_HIFI_IMAGE_SEC_LOAD_ENUM {
+ DRV_HIFI_IMAGE_SEC_LOAD_STATIC = 0,
+ DRV_HIFI_IMAGE_SEC_LOAD_DYNAMIC,
+ DRV_HIFI_IMAGE_SEC_UNLOAD,
+ DRV_HIFI_IMAGE_SEC_UNINIT,
+ DRV_HIFI_IMAGE_SEC_LOAD_BUTT,
+};
+typedef unsigned char DRV_HIFI_IMAGE_SEC_LOAD_ENUM_UINT8;
+
+enum DRV_HIFI_IMAGE_SEC_TYPE_ENUM {
+ DRV_HIFI_IMAGE_SEC_TYPE_CODE = 0,
+ DRV_HIFI_IMAGE_SEC_TYPE_DATA,
+ DRV_HIFI_IMAGE_SEC_TYPE_BSS,
+ DRV_HIFI_IMAGE_SEC_TYPE_BUTT,
+};
+typedef unsigned char DRV_HIFI_IMAGE_SEC_TYPE_ENUM_UINT8;
+
+struct drv_hifi_image_sec {
+ unsigned short sn;
+ DRV_HIFI_IMAGE_SEC_TYPE_ENUM_UINT8 type;
+ DRV_HIFI_IMAGE_SEC_LOAD_ENUM_UINT8 load_attib;
+ unsigned int src_offset;
+ unsigned int des_addr;
+ unsigned int size;
+};
+
+struct drv_hifi_image_head {
+ char time_stamp[24];
+ unsigned int image_size;
+ unsigned int sections_num;
+ struct drv_hifi_image_sec sections[HIFI_SEC_MAX_NUM];
+};
+
+struct image_partition_table {
+ unsigned long phy_addr_start;
+ unsigned long phy_addr_end;
+ unsigned int size;
+ unsigned long remap_addr;
+};
+
+
+struct hifi_om_s {
+ struct task_struct *kdumpdsp_task;
+ struct semaphore dsp_dump_sema;
+
+ unsigned int debug_level;
+ unsigned int dsp_debug_level;
+ unsigned int *dsp_debug_level_addr;
+
+ unsigned int pre_dsp_dump_timestamp;
+ unsigned int *dsp_time_stamp;
+ unsigned int pre_exception_no;
+ unsigned int *dsp_exception_no;
+
+ unsigned int *dsp_panic_mark;
+
+ unsigned int *dsp_log_cur_addr;
+ char *dsp_log_addr;
+ char *dsp_bin_addr;
+ char cur_dump_time[HIFI_DUMP_FILE_NAME_MAX_LEN];
+ bool first_dump_log;
+ bool force_dump_log;
+ DSP_ERROR_TYPE dsp_error_type;
+
+ bool dsp_loaded;
+ bool reset_system;
+ unsigned int dsp_loaded_sign;
+
+ unsigned int *dsp_debug_kill_addr;
+ unsigned int *dsp_stack_addr;
+ unsigned int *dsp_loaded_indicate_addr;
+ struct drv_fama_config *dsp_fama_config;
+
+ struct device *dev;
+
+};
+
+struct hifi_dsp_dump_info{
+ DSP_ERROR_TYPE error_type;
+ DUMP_DSP_TYPE dump_type;
+ char *file_name;
+ char *data_addr;
+ unsigned int data_len;
+};
+
+typedef struct {
+ unsigned short effect_stream_id;
+ unsigned short effect_algo_id;
+ unsigned int effect_algo_mcps;
+} hifi_effect_mcps_stru;
+
+typedef struct {
+ unsigned int cpu_load;
+ unsigned int avg_cpu_Load;
+ unsigned int ddr_freq;
+} hifi_cpu_load_info_stru;
+
+struct hifi_om_load_info_stru {
+ unsigned int recv_msg_type;
+ hifi_cpu_load_info_stru cpu_load_info;
+ unsigned int info_type;
+ unsigned int report_interval;
+};
+
+struct hifi_om_effect_mcps_stru {
+ unsigned int recv_msg_type;
+ hifi_cpu_load_info_stru cpu_load_info;
+ hifi_effect_mcps_stru effect_mcps_info[DSP_DUMP_MAX_EFFECTS_CNT];
+};
+
+struct hifi_om_update_buff_delay_info {
+ unsigned int recv_msg_type;
+ unsigned short reserved;
+ unsigned short pcm_mode;
+};
+
+extern struct hifi_om_s g_om_data;
+
+typedef struct {
+ char level_char;
+ unsigned int level_num;
+} debug_level_com;
+
+struct hikey_ap2dsp_msg_head {
+ unsigned int head_protect_word;
+ unsigned int msg_num;
+ unsigned int read_pos;
+ unsigned int write_pos;
+};
+
+struct hikey_ap2dsp_msg_body {
+ unsigned short msg_id;
+ unsigned short msg_len; /*size of the whole message*/
+ union {
+ char msg_content[0];
+ struct xf_proxy_msg xf_dsp_msg;
+ };
+};
+
+struct hikey_msg_with_content {
+ struct hikey_ap2dsp_msg_body msg_info;
+ char msg_content[HIKEY_AP_DSP_MSG_MAX_LEN];
+};
+
+struct hifi_effect_info_stru {
+ unsigned int effect_id;
+ char effect_name[64];
+};
+
+/* voice bsd param hsm struct */
+ struct voice_bsd_param_hsm {
+ unsigned int data_len;
+ unsigned char *pdata;
+ };
+
+ enum hifi_om_work_id {
+ HIFI_OM_WORK_VOICE_BSD = 0,
+ HIFI_OM_WORK_AUDIO_OM_DETECTION,
+ HIFI_OM_WORK_MAX,
+ };
+
+ struct hifi_om_work_ctl {
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ spinlock_t lock;
+ struct list_head list;
+ };
+
+ struct hifi_om_work_info {
+ int work_id;
+ char *work_name;
+ work_func_t func;
+ struct hifi_om_work_ctl ctl;
+ };
+
+ struct hifi_om_ap_data {
+ unsigned short msg_id;
+ unsigned int data_len;
+ unsigned char data[0];
+ };
+
+ struct hifi_om_work {
+ struct list_head om_node;
+ unsigned int data_len;
+ unsigned char data[0];
+ };
+
+#define HIFI_STAMP (unsigned int)readl(g_om_data.dsp_time_stamp)
+
+#define can_reset_system() \
+do {\
+ if (g_om_data.reset_system) {\
+ printk("soc hifi reset, reset all system by reset option");\
+ BUG_ON(true);\
+ } \
+} while (0);
+
+#define logd(fmt, ...) \
+do {\
+ if (g_om_data.debug_level >= 3) {\
+ printk(LOG_TAG"[D][%u]:%s:%d: "fmt, HIFI_STAMP, __FUNCTION__, __LINE__, ##__VA_ARGS__);\
+ } \
+} while (0);
+
+#define logi(fmt, ...) \
+do {\
+ if (g_om_data.debug_level >= 2) {\
+ printk(LOG_TAG"[I][%u]:%s:%d: "fmt, HIFI_STAMP, __FUNCTION__, __LINE__, ##__VA_ARGS__);\
+ } \
+} while (0);
+
+#define logw(fmt, ...) \
+do {\
+ if (g_om_data.debug_level >= 1) {\
+ printk(LOG_TAG"[W][%u]:%s:%d: "fmt, HIFI_STAMP, __FUNCTION__, __LINE__, ##__VA_ARGS__);\
+ } \
+} while (0);
+
+#define loge(fmt, ...) \
+do {\
+ printk(LOG_TAG"[E][%u]:%s:%d: "fmt, HIFI_STAMP, __FUNCTION__, __LINE__, ##__VA_ARGS__);\
+} while (0);
+
+#define IN_FUNCTION logd("begin.\n");
+#define OUT_FUNCTION logd("end.\n");
+
+ int load_hifi_img_by_misc(void);
+
+ void hifi_om_init(struct platform_device *dev, unsigned char *hifi_priv_base_virt, unsigned char *hifi_priv_base_phy);
+ void hifi_om_deinit(struct platform_device *dev);
+
+ int hifi_dsp_dump_hifi(void __user *arg);
+ void hifi_dump_panic_log(void);
+
+ bool hifi_is_loaded(void);
+ void ap_ipc_int_init(void);
+
+ void hifi_om_effect_mcps_info_show(struct hifi_om_effect_mcps_stru *hifi_mcps_info);
+ void hifi_om_cpu_load_info_show(struct hifi_om_load_info_stru *hifi_om_info);
+ void hifi_om_update_buff_delay_info_show(struct hifi_om_update_buff_delay_info *info);
+
+ int hifi_get_dmesg(void __user *arg);
+ int hifi_om_get_voice_bsd_param(void __user *uaddr);
+ void hifi_om_rev_data_handle(int type, const unsigned char *addr, unsigned int len);
+ int send_pcm_data_to_dsp(void __user *buf, unsigned int size);
+ struct xf_proxy_msg;
+ int send_xaf_ipc_msg_to_dsp(struct xf_proxy_msg *xaf_msg);
+ int read_xaf_ipc_msg_from_dsp(void *buf, unsigned int size);
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif
+#endif
diff --git a/drivers/hisi/hifi_dsp/memcpy_opt.S b/drivers/hisi/hifi_dsp/memcpy_opt.S
new file mode 100644
index 000000000000..261d2ce27feb
--- /dev/null
+++ b/drivers/hisi/hifi_dsp/memcpy_opt.S
@@ -0,0 +1,21 @@
+
+ .global memcpy128
+ .global memcpy64
+
+memcpy128:
+ add x2, x0, x2
+memcpy_align_128:
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ cmp x0, x2
+ b.ne memcpy_align_128
+ ret
+
+memcpy64:
+ add x2, x0, x2
+memcpy_align_64:
+ ldr x3, [x1], #8
+ str x3, [x0], #8
+ cmp x0, x2
+ b.ne memcpy_align_64
+ ret
diff --git a/drivers/hisi/hifi_mailbox/Kconfig b/drivers/hisi/hifi_mailbox/Kconfig
new file mode 100644
index 000000000000..c40c1b49b1ef
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/Kconfig
@@ -0,0 +1,3 @@
+
+source "drivers/hisi/hifi_mailbox/mailbox/Kconfig"
+source "drivers/hisi/hifi_mailbox/ipcm/Kconfig"
diff --git a/drivers/hisi/hifi_mailbox/Makefile b/drivers/hisi/hifi_mailbox/Makefile
new file mode 100644
index 000000000000..e1ffb39c7de7
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/Makefile
@@ -0,0 +1,3 @@
+
+obj-$(CONFIG_HIFI_MAILBOX) += mailbox/
+obj-$(CONFIG_HIFI_IPC) += ipcm/
diff --git a/drivers/hisi/hifi_mailbox/ipcm/Kconfig b/drivers/hisi/hifi_mailbox/ipcm/Kconfig
new file mode 100644
index 000000000000..18408d3ba598
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/ipcm/Kconfig
@@ -0,0 +1,23 @@
+menu "Hifi mailbox driver"
+
+config HIFI_IPC
+ bool "k3 multicore ipc driver"
+ default n
+ help
+ k3 multicore ipc driver.
+config HIFI_IPC_3650
+ bool "hifi 3650 mailbox driver base on ipc"
+ default n
+ help
+ transplant 3650 mailbox driver.
+config HIFI_IPC_3660
+ bool "hifi 3660 mailbox driver base on ipc"
+ default n
+ help
+ transplant 3660 mailbox driver.
+config HIFI_IPC_6250
+ bool "hifi 6250 mailbox driver base on ipc"
+ default n
+ help
+ transplant 6250 mailbox driver.
+endmenu
diff --git a/drivers/hisi/hifi_mailbox/ipcm/Makefile b/drivers/hisi/hifi_mailbox/ipcm/Makefile
new file mode 100644
index 000000000000..bda1d5986cdb
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/ipcm/Makefile
@@ -0,0 +1,5 @@
+EXTRA_CFLAGS += -I$(srctree)/drivers/hisi/hifi_mailbox/mailbox/
+
+obj-$(CONFIG_HIFI_IPC) := bsp_ipc.o
+
+
diff --git a/drivers/hisi/hifi_mailbox/ipcm/bsp_drv_ipc.h b/drivers/hisi/hifi_mailbox/ipcm/bsp_drv_ipc.h
new file mode 100644
index 000000000000..2edd1f4ba1c6
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/ipcm/bsp_drv_ipc.h
@@ -0,0 +1,125 @@
+
+/*
+ *
+ * Modifications made by Cadence Design Systems, Inc. 06/21/2017
+ * Copyright (C) 2017 Cadence Design Systems, Inc.All rights reserved worldwide.
+ *
+ */
+
+#ifndef _BSP_DRV_IPC_H_
+#define _BSP_DRV_IPC_H_
+
+#include <asm/io.h>
+#include "drv_comm.h"
+#include <linux/interrupt.h>
+
+#include "../mailbox/mdrv_ipc_enum.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ extern void __iomem *ipc_base;
+
+#define SIZE_4K (4096)
+#define BSP_IPC_BASE_ADDR (SOC_IPC_S_BASE_ADDR)
+#define IPC_REG_SIZE (SIZE_4K)
+
+#define BSP_RegRd(uwAddr) (0)
+#define BSP_RegWr(uwAddr, uwValue)
+
+#define SOC_IPC_CPU_INT_EN_ADDR(base, i) ((base) + (0x500+(0x10*(i))))
+#define SOC_IPC_SEM_INT_MASK_ADDR(base, j) ((base) + (0x604+(0x10*(j))))
+#define SOC_IPC_CPU_INT_CLR_ADDR(base, i) ((base) + (0x40C+(0x10*(i))))
+#define IRQ_IPC0_S (252)
+#define IRQ_IPC1_S (253)
+#define SOC_IPC_CPU_INT_EN_ADDR(base, i) ((base) + (0x500+(0x10*(i))))
+#define SOC_IPC_CPU_INT_DIS_ADDR(base, i) ((base) + (0x504+(0x10*(i))))
+#define SOC_IPC_CPU_INT_STAT_ADDR(base, i) ((base) + (0x408+(0x10*(i))))
+#define SOC_IPC_CPU_RAW_INT_ADDR(base, i) ((base) + (0x400+(0x10*(i))))
+#define SOC_IPC_CPU_INT_MASK_ADDR(base, i) ((base) + (0x404+(0x10*(i))))
+#define SOC_IPC_SEM_INT_CLR_ADDR(base, j) ((base) + (0x60C+(0x10*(j))))
+#define SOC_IPC_HS_CTRL_ADDR(base, j, k) ((base) + (0x800+(0x100*(j))+(0x8*(k))))
+#define SOC_IPC_SEM_INT_STAT_ADDR(base, j) ((base) + (0x608+(0x10*(j))))
+#define SOC_IPC_S_BASE_ADDR (0xe0475000)
+
+#define SOC_IPC_CPU_INT_MASK_DIS_ADDR(base, i) SOC_IPC_CPU_INT_EN_ADDR(base, i)
+#define SOC_IPC_CPU_INT_MASK_EN_ADDR(base, i) SOC_IPC_CPU_INT_DIS_ADDR(base, i)
+
+#define BSP_IPC_CPU_RAW_INT(i) (SOC_IPC_CPU_RAW_INT_ADDR((BSP_U32)ipc_base, i))
+#define BSP_IPC_CPU_INT_MASK(i) (SOC_IPC_CPU_INT_MASK_ADDR((BSP_U32)ipc_base, i))
+#define BSP_IPC_CPU_INT_STAT(i) (SOC_IPC_CPU_INT_STAT_ADDR((BSP_U32)ipc_base, i))
+#define BSP_IPC_CPU_INT_CLR(i) (SOC_IPC_CPU_INT_CLR_ADDR((BSP_U32)ipc_base, i))
+#define BSP_IPC_INT_MASK_EN(i) (SOC_IPC_CPU_INT_MASK_EN_ADDR((BSP_U32)ipc_base, i))
+#define BSP_IPC_INT_MASK_DIS(i) (SOC_IPC_CPU_INT_MASK_DIS_ADDR((BSP_U32)ipc_base, i))
+
+#define BSP_IPC_SEM_RAW_INT(j) (SOC_IPC_SEM_RAW_INT_ADDR((BSP_U32)ipc_base, j))
+#define BSP_IPC_SEM_INT_MASK(j) (SOC_IPC_SEM_INT_MASK_ADDR((BSP_U32)ipc_base, j))
+#define BSP_IPC_SEM_INT_STAT(j) (SOC_IPC_SEM_INT_STAT_ADDR((BSP_U32)ipc_base, j))
+#define BSP_IPC_SEM_INT_CLR(j) (SOC_IPC_SEM_INT_CLR_ADDR((BSP_U32)ipc_base, j))
+#define BSP_IPC_HS_CTRL(j, k) (SOC_IPC_HS_CTRL_ADDR((BSP_U32)ipc_base, j, k))
+#define BSP_IPC_HS_STAT(j, k) (SOC_IPC_HS_STAT_ADDR((BSP_U32)ipc_base, j, k))
+
+#define BSP_IPC_CPU_RAW_INT_ACPU (BSP_IPC_CPU_RAW_INT((BSP_U32)IPC_CORE_ACPU))
+#define BSP_IPC_CPU_INT_MASK_ACPU (BSP_IPC_CPU_INT_MASK((BSP_U32)IPC_CORE_ACPU))
+#define BSP_IPC_CPU_INT_STAT_ACPU (BSP_IPC_CPU_INT_STAT((BSP_U32)IPC_CORE_ACPU))
+#define BSP_IPC_CPU_INT_CLR_ACPU (BSP_IPC_CPU_INT_CLR((BSP_U32)IPC_CORE_ACPU))
+#define BSP_IPC_CPU_INT_MASK_EN_ACPU (BSP_IPC_INT_MASK_EN((BSP_U32)IPC_CORE_ACPU))
+#define BSP_IPC_CPU_INT_MASK_DIS_ACPU (BSP_IPC_INT_MASK_DIS((BSP_U32)IPC_CORE_ACPU))
+
+#define UCOM_COMM_UINT32_MAX (0xffffffff)
+#define BSP_IPC_MAX_INT_NUM (32)
+#define IPC_MASK 0xFFFFFF0F
+
+#define INT_LEV_IPC_CPU (IRQ_IPC0_S)
+#define INT_LEV_IPC_SEM (IRQ_IPC1_S)
+
+#define INT_VEC_IPC_SEM IVEC_TO_INUM(INT_LEV_IPC_SEM)
+#define INTSRC_NUM 32
+
+#define INT_VEC_IPC_CPU IVEC_TO_INUM(INT_LEV_IPC_CPU)
+#define IPC_CHECK_PARA(ulLvl) \
+ do {\
+ if (ulLvl >= INTSRC_NUM) {\
+ pr_warn("Wrong para , line:%d\n", __LINE__);\
+ return BSP_ERROR;\
+ } \
+ } while (0)
+
+ typedef struct tagIPC_DEV_S {
+ BSP_BOOL bInit;
+ } IPC_DEV_S;
+
+ typedef struct {
+ VOIDFUNCPTR routine;
+ BSP_U32 arg;
+ } BSP_IPC_ENTRY;
+
+ typedef struct tagIPC_DEBUG_E {
+ BSP_U32 u32RecvIntCore;
+ BSP_U32 u32IntHandleTimes[INTSRC_NUM];
+ BSP_U32 u32IntSendTimes[INTSRC_NUM];
+ BSP_U32 u32SemId;
+ BSP_U32 u32SemTakeTimes[INTSRC_NUM];
+ BSP_U32 u32SemGiveTimes[INTSRC_NUM];
+ } IPC_DEBUG_E;
+
+ BSP_S32 DRV_IPCIntInit(void);
+
+ BSP_S32 IPC_IntEnable(IPC_INT_LEV_E ulLvl);
+
+ BSP_S32 IPC_IntConnect(IPC_INT_LEV_E ulLvl, VOIDFUNCPTR routine,
+ BSP_U32 parameter);
+
+ BSP_S32 IPC_IntSend(IPC_INT_CORE_E enDstCore, IPC_INT_LEV_E ulLvl);
+
+ BSP_VOID IPC_SpinLock(BSP_U32 u32SignalNum);
+
+ BSP_VOID IPC_SpinUnLock (BSP_U32 u32SignalNum);
+ BSP_VOID IPC_SemGive_Ccore_All(BSP_VOID);
+ irqreturn_t DRV_k3IpcIntHandler_Autoack(void);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* end #define _BSP_IPC_H_ */
diff --git a/drivers/hisi/hifi_mailbox/ipcm/bsp_ipc.c b/drivers/hisi/hifi_mailbox/ipcm/bsp_ipc.c
new file mode 100644
index 000000000000..6f2f3378c098
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/ipcm/bsp_ipc.c
@@ -0,0 +1,462 @@
+/*
+ *
+ * Modifications made by Cadence Design Systems, Inc. 06/21/2017
+ * Copyright (C) 2017 Cadence Design Systems, Inc.All rights reserved worldwide.
+ *
+ */
+
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/string.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/delay.h>
+
+#include <linux/slab.h>
+#include <linux/hisi/hisi_mailbox.h>
+#include <linux/hisi/hisi_rproc.h>
+
+#include "bsp_drv_ipc.h"
+
+/*#define USE_HIFI_IPC*/
+BSP_IPC_ENTRY stIpcIntTable[INTSRC_NUM];
+static IPC_DEV_S g_stIpcDev = { 0 };
+
+BSP_U32 g_CoreNum;
+
+struct semaphore g_semIpcTask[INTSRC_NUM];
+IPC_DEBUG_E g_stIpc_debug = { 0 };
+
+/* base address of ipc registers */
+void __iomem *ipc_base;
+spinlock_t g_ipc_int_lock = __SPIN_LOCK_UNLOCKED("ipc");
+
+#define USE_HISI_MAILBOX
+
+/*************************************k3 ipc******************************************/
+#define BIT_ENABLE(n) (1 << (n))
+#define BYTE_REF(address) (*((unsigned char volatile *) (address)))
+#define HALFWORD_REF(address) (*((unsigned short volatile *) (address)))
+#define WORD_REF(address) (*((unsigned int volatile *) (address)))
+#define WORD_PTR(address) (*((unsigned int volatile **) (address)))
+#define BYTE(address) volatile unsigned char __attribute__((section(".ARM.__at_"address)))
+#define HALFWORD(address) volatile unsigned short __attribute__((section(".ARM.__at_"address)))
+#define WORD(address) volatile unsigned int __attribute__((section(".ARM.__at_"address)))
+
+#define K3_IPC_MODE_ACK (7)
+#define K3_IPC_MODE_IDLE (4)
+#define K3_IPC_MODE_AUTOACK (0)
+
+/*************************************hifiϵͳipc******************************************/
+#define K3_SYS_IPC_BASE_ADDR_S (unsigned long)(0xe896a000)
+#define K3_SYS_IPC_BASE_ADDR_NS (unsigned long)(0xe896b000)
+#define K3_SYS_IPC_REG_SIZE (0xA00)
+
+#define K3_IPC_LOCK(base) WORD_REF(base + 0xA00)
+#define K3_IPC_SOURCE(base, box) WORD_REF(base + ((box) * 64))
+#define K3_IPC_DEST(base, box) WORD_REF(base + ((box) * 64) + 0x04)
+#define K3_IPC_DCLR(base, box) WORD_REF(base + ((box) * 64) + 0x08)
+#define K3_IPC_DSTATUS(base, box) WORD_REF(base + ((box) * 64) + 0x0c)
+#define K3_IPC_MODE(base, box) WORD_REF(base + ((box) * 64) + 0x10)
+#define K3_IPC_IMASK(base, box) WORD_REF(base + ((box) * 64) + 0x14)
+#define K3_IPC_ICLR(base, box) WORD_REF(base + ((box) * 64) + 0x18)
+#define K3_IPC_SEND(base, box) WORD_REF(base + ((box) * 64) + 0x1c)
+#define K3_IPC_DATA(base, box, num) WORD_REF(base + ((box) * 64) + 0x20 + ((num) * 4))
+#define K3_IPC_CPUIMST(base, core) WORD_REF(base + 0x800 + ((core) * 8))
+#define K3_IPC_CPUIRST(base, core) WORD_REF(base + 0x804 + ((core) * 8))
+
+#define K3_SYS_IPC_CORE_LIT (0)
+#define K3_SYS_IPC_CORE_BIG (1)
+#define K3_SYS_IPC_CORE_IOM3 (2)
+#define K3_SYS_IPC_CORE_LPM3 (3)
+#define K3_SYS_IPC_CORE_HIFI (4)
+
+#define K3_HIFI_IPC_BASE_ADDR (unsigned long)(0xE804C000)
+#define K3_HIFI_IPC_REG_SIZE (0x1000)
+
+#define K3_ASP_CFG_CTRLDIS(base) WORD_REF(base + 0x04)
+#define K3_ASP_CFG_GATE_EN(base) WORD_REF(base + 0x0c)
+
+enum {
+ K3_HIFI_IPC_CORE_AP_LPM3_IOM3 = 0,
+ K3_HIFI_IPC_CORE_MODEM_A9 = 1,
+ K3_HIFI_IPC_CORE_MODEM_BBE = 2,
+ K3_HIFI_IPC_CORE_HIFI = 3,
+ K3_HIFI_IPC_CORE_IOM3 = 5,
+ K3_HIFI_IPC_CORE_AP = 6,
+ K3_HIFI_IPC_CORE_LPM3 = 8,
+};
+
+typedef enum {
+ K3_SEC_SYS_IPC = 0,
+ K3_UNSEC_SYS_IPC,
+ K3_HIFI_IPC,
+} K3_IPC;
+
+typedef struct {
+ K3_IPC ipcMode;
+ void __iomem *ipcBase;
+ int mailBoxNum;
+ int intNum;
+ int sourceCore;
+ int destCore;
+} K3_IPC_CONFIG;
+
+enum {
+ K3_IPC_CORE_IS_SEND = 0,
+ K3_IPC_CORE_IS_RECEIVE,
+ K3_IPC_CORE_IS_UNKNOEN,
+};
+
+static K3_IPC_CONFIG k3IpcConfig[K3_IPC_CORE_IS_UNKNOEN] = {
+ {
+ K3_UNSEC_SYS_IPC,
+ NULL,
+ 18,
+ 220,
+ K3_SYS_IPC_CORE_LIT,
+ K3_SYS_IPC_CORE_HIFI},
+
+ {
+ K3_UNSEC_SYS_IPC,
+ NULL,
+ 2,
+ 227,
+ K3_SYS_IPC_CORE_HIFI,
+ K3_SYS_IPC_CORE_BIG}
+};
+
+static int DRV_k3IpcIntHandler_ipc(struct notifier_block *nb, unsigned long len,
+ void *msg);
+struct hisi_mbox *hifi_tx_mbox;
+struct notifier_block rx_nb;
+
+#ifdef USE_HISI_MAILBOX
+#define MAX_SEND_IPC_TRY 3
+
+static int hisi_hifi_mbox_init(void)
+{
+ int ret = 0, rproc_id = 0;
+
+ rx_nb.next = NULL;
+ rx_nb.notifier_call = DRV_k3IpcIntHandler_ipc;
+ rproc_id = HISI_RPROC_HIFI_MBX2;
+ /* register the rx notify callback */
+ ret = RPROC_MONITOR_REGISTER(rproc_id, &rx_nb);
+ if (ret)
+ pr_info("%s:RPROC_MONITOR_REGISTER failed", __func__);
+
+ return ret;
+}
+
+/*
+static void hisi_hifi_mbox_exit(void)
+{
+ if (hifi_mbox)
+ hisi_mbox_put(&hifi_mbox);
+}
+*/
+#else
+static irqreturn_t DRV_k3IpcIntHandler_ack(int irq, void *dev_id)
+{
+ BSP_S32 retval = IRQ_HANDLED;
+ BSP_U32 u32IntStat = 0;
+
+ int myRole = K3_IPC_CORE_IS_SEND;
+ BSP_U32 mailBoxNum = k3IpcConfig[myRole].mailBoxNum;
+ BSP_U32 source = k3IpcConfig[myRole].sourceCore;
+ void __iomem *ipcBase = k3IpcConfig[myRole].ipcBase;
+
+ u32IntStat = K3_IPC_CPUIMST(ipcBase, source);
+
+ if (u32IntStat & BIT_ENABLE(mailBoxNum)) {
+ if (K3_IPC_MODE(ipcBase, mailBoxNum) &
+ BIT_ENABLE(K3_IPC_MODE_ACK)) {
+ pr_info("func:%s: Receive ack int\n", __func__);
+
+ K3_IPC_SOURCE(ipcBase, mailBoxNum) = BIT_ENABLE(source);
+ }
+ K3_IPC_DCLR(ipcBase, mailBoxNum) = BIT_ENABLE(mailBoxNum);
+ }
+ return (irqreturn_t)IRQ_RETVAL(retval);
+}
+
+#endif
+
+irqreturn_t DRV_k3IpcIntHandler_Autoack(void)
+{
+ BSP_S32 retval = IRQ_HANDLED;
+ BSP_U32 u32IntStat = 0;
+
+ int myRole = K3_IPC_CORE_IS_RECEIVE;
+ BSP_U32 mailBoxNum = k3IpcConfig[myRole].mailBoxNum;
+ BSP_U32 source = k3IpcConfig[myRole].sourceCore;
+ void __iomem *ipcBase = k3IpcConfig[myRole].ipcBase;
+
+ u32IntStat = K3_IPC_CPUIMST(ipcBase, source);
+
+ if (u32IntStat & BIT_ENABLE(mailBoxNum)) {
+ if (K3_IPC_MODE(ipcBase, mailBoxNum) & BIT_ENABLE(K3_IPC_MODE_AUTOACK)) {
+ K3_IPC_SOURCE(ipcBase, mailBoxNum) = BIT_ENABLE(source);
+ }
+ K3_IPC_DCLR(ipcBase, mailBoxNum) = BIT_ENABLE(mailBoxNum);
+ }
+ return (irqreturn_t)IRQ_RETVAL(retval);
+}
+
+BSP_S32 DRV_IPCIntInit(void)
+{
+
+ int myRole = 0;
+
+ printk(KERN_ERR "DRV_IPCIntInit begin.\n");
+
+ if (BSP_TRUE == g_stIpcDev.bInit) {
+ return BSP_OK;
+ }
+
+ printk(KERN_ERR "DRV_IPCIntInit line = %d\n", __LINE__);
+
+ g_CoreNum = IPC_CORE_ACPU;
+
+ memset((void *)stIpcIntTable, 0x0,
+ (INTSRC_NUM * sizeof(BSP_IPC_ENTRY)));
+
+ myRole = K3_IPC_CORE_IS_SEND;
+ if (K3_UNSEC_SYS_IPC == k3IpcConfig[myRole].ipcMode) {
+ k3IpcConfig[myRole].ipcBase =
+ ioremap(K3_SYS_IPC_BASE_ADDR_NS, K3_SYS_IPC_REG_SIZE);
+ if (!k3IpcConfig[myRole].ipcBase) {
+ printk(KERN_ERR
+ "line %d :k3 unsec sys ipc ioremap error.\n",
+ __LINE__);
+ return -1;
+ }
+ } else if (K3_SEC_SYS_IPC == k3IpcConfig[myRole].ipcMode) {
+ k3IpcConfig[myRole].ipcBase =
+ ioremap(K3_SYS_IPC_BASE_ADDR_S, K3_SYS_IPC_REG_SIZE);
+ if (!k3IpcConfig[myRole].ipcBase) {
+ printk(KERN_ERR
+ "line %d :k3 sec sys ipc ioremap error.\n",
+ __LINE__);
+ return -1;
+ }
+ } else {
+ k3IpcConfig[myRole].ipcBase =
+ ioremap(K3_HIFI_IPC_BASE_ADDR, K3_HIFI_IPC_REG_SIZE);
+ if (!k3IpcConfig[myRole].ipcBase) {
+ printk(KERN_ERR "line %d :k3 hifi ipc ioremap error.\n",
+ __LINE__);
+ return -1;
+ }
+ }
+
+ K3_IPC_LOCK(k3IpcConfig[myRole].ipcBase) = 0x1ACCE551;
+ myRole = K3_IPC_CORE_IS_RECEIVE;
+ if (K3_UNSEC_SYS_IPC == k3IpcConfig[myRole].ipcMode) {
+ k3IpcConfig[myRole].ipcBase =
+ ioremap(K3_SYS_IPC_BASE_ADDR_NS, K3_SYS_IPC_REG_SIZE);
+ if (!k3IpcConfig[myRole].ipcBase) {
+ printk(KERN_ERR
+ "line %d :k3 unsec sys ipc ioremap error.\n",
+ __LINE__);
+ return -1;
+ }
+ } else if (K3_SEC_SYS_IPC == k3IpcConfig[myRole].ipcMode) {
+ k3IpcConfig[myRole].ipcBase =
+ ioremap(K3_SYS_IPC_BASE_ADDR_S, K3_SYS_IPC_REG_SIZE);
+ if (!k3IpcConfig[myRole].ipcBase) {
+ printk(KERN_ERR
+ "line %d :k3 sec sys ipc ioremap error.\n",
+ __LINE__);
+ return -1;
+ }
+ } else {
+ k3IpcConfig[myRole].ipcBase =
+ ioremap(K3_HIFI_IPC_BASE_ADDR, K3_HIFI_IPC_REG_SIZE);
+ if (!k3IpcConfig[myRole].ipcBase) {
+ printk(KERN_ERR "line %d :k3 hifi ipc ioremap error.\n",
+ __LINE__);
+ return -1;
+ }
+ }
+ K3_IPC_LOCK(k3IpcConfig[myRole].ipcBase) = 0x1ACCE551;
+#ifdef USE_HISI_MAILBOX
+ hisi_hifi_mbox_init();
+#else
+ ret = request_irq(k3IpcConfig[K3_IPC_CORE_IS_SEND].intNum,
+ DRV_k3IpcIntHandler_ack, 0, "k3IpcIntHandler_ack",
+ NULL);
+ if (ret) {
+ printk(KERN_ERR
+ "BSP_DRV_IPCIntInit: Unable to register ipc irq ret=%d.\n",
+ ret);
+ return BSP_ERROR;
+ }
+ printk(KERN_ERR "BSP_DRV_IPCIntInit line = %d\n", __LINE__);
+#endif
+
+ g_stIpcDev.bInit = BSP_TRUE;
+
+ printk(KERN_ERR "BSP_DRV_IPCIntInit end.\n");
+
+ return BSP_OK;
+}
+
+BSP_S32 IPC_IntEnable(IPC_INT_LEV_E ulLvl)
+{
+ IPC_CHECK_PARA(ulLvl);
+
+ return BSP_OK;
+}
+
+BSP_S32 IPC_IntDisable(IPC_INT_LEV_E ulLvl)
+{
+ IPC_CHECK_PARA(ulLvl);
+
+ return BSP_OK;
+}
+
+BSP_S32 IPC_IntConnect(IPC_INT_LEV_E ulLvl, VOIDFUNCPTR routine,
+ BSP_U32 parameter)
+{
+
+ unsigned long flag = 0;
+
+ IPC_CHECK_PARA(ulLvl);
+
+ spin_lock_irqsave(&g_ipc_int_lock, flag);
+ stIpcIntTable[ulLvl].routine = routine;
+ stIpcIntTable[ulLvl].arg = parameter;
+ spin_unlock_irqrestore(&g_ipc_int_lock, flag);
+
+ return BSP_OK;
+}
+
+BSP_S32 IPC_IntDisonnect(IPC_INT_LEV_E ulLvl, VOIDFUNCPTR routine,
+ BSP_U32 parameter)
+{
+ unsigned long flag = 0;
+
+ IPC_CHECK_PARA(ulLvl);
+
+ spin_lock_irqsave(&g_ipc_int_lock, flag);
+ stIpcIntTable[ulLvl].routine = NULL;
+ stIpcIntTable[ulLvl].arg = 0;
+ spin_unlock_irqrestore(&g_ipc_int_lock, flag);
+
+ return BSP_OK;
+}
+
+static int DRV_k3IpcIntHandler_ipc(struct notifier_block *nb, unsigned long len,
+ void *msg)
+{
+ BSP_U32 newLevel = 0;
+ mbox_msg_t *_msg = (mbox_msg_t *) msg;
+
+ newLevel = _msg[0];
+
+ if (newLevel < INTSRC_NUM) {
+ g_stIpc_debug.u32IntHandleTimes[newLevel]++;
+
+ if (NULL != stIpcIntTable[newLevel].routine) {
+ stIpcIntTable[newLevel].routine(stIpcIntTable[newLevel].
+ arg);
+ }
+ }
+
+ return 0;
+}
+
+BSP_S32 IPC_IntSend(IPC_INT_CORE_E enDstCore, IPC_INT_LEV_E ulLvl)
+{
+ int myRole = K3_IPC_CORE_IS_SEND;
+ BSP_U32 source = k3IpcConfig[myRole].sourceCore;
+
+#ifdef USE_HISI_MAILBOX
+ BSP_U32 ipcMsg[2];
+ int ret = 0, rproc_id = 0;
+#else
+ BSP_U32 mailBoxNum = k3IpcConfig[myRole].mailBoxNum;
+ BSP_U32 dest = k3IpcConfig[myRole].destCore;
+ void __iomem *ipcBase = k3IpcConfig[myRole].ipcBase;
+#endif
+
+ IPC_CHECK_PARA(ulLvl);
+
+ if (IPC_CORE_HiFi == enDstCore) {
+#ifdef USE_HISI_MAILBOX
+ ipcMsg[0] = (source << 24) | (ulLvl << 8);
+
+ rproc_id = HISI_RPROC_HIFI_MBX18;
+ ret = RPROC_ASYNC_SEND(rproc_id, (mbox_msg_t *) ipcMsg, 2);
+ if (ret) {
+ printk(" %s , line %d, send error\n", __func__,
+ __LINE__);
+ }
+#else
+ while (0 ==
+ (K3_IPC_MODE(ipcBase, mailBoxNum) &
+ BIT_ENABLE(K3_IPC_MODE_IDLE))) {
+ printk("func:%s: mailbox is busy mode = 0x%x\n",
+ __func__, K3_IPC_MODE(ipcBase, mailBoxNum));
+ }
+
+ K3_IPC_SOURCE(ipcBase, mailBoxNum) = BIT_ENABLE(source);
+ K3_IPC_DEST(ipcBase, mailBoxNum) = BIT_ENABLE(dest);
+
+ K3_IPC_IMASK(ipcBase, mailBoxNum) =
+ ~(BIT_ENABLE(source) | BIT_ENABLE(dest));
+
+ K3_IPC_MODE(ipcBase, mailBoxNum) =
+ BIT_ENABLE(K3_IPC_MODE_AUTOACK);
+
+ K3_IPC_DATA(ipcBase, mailBoxNum, 0) = source;
+ K3_IPC_DATA(ipcBase, mailBoxNum, 1) = ulLvl;
+
+ K3_IPC_SEND(ipcBase, mailBoxNum) = BIT_ENABLE(source);
+
+#endif
+ } else {
+ BSP_RegWr(BSP_IPC_CPU_RAW_INT(enDstCore), 1 << ulLvl);
+ }
+
+ g_stIpc_debug.u32RecvIntCore = enDstCore;
+ g_stIpc_debug.u32IntSendTimes[ulLvl]++;
+
+ return BSP_OK;
+}
+
+void BSP_IPC_SpinLock(unsigned int u32SignalNum)
+{
+ unsigned int u32HsCtrl;
+
+ if (u32SignalNum >= INTSRC_NUM) {
+ printk("BSP_IPC_SpinLock Parameter error, line:%d\n",
+ __LINE__);
+ return;
+ }
+ for (;;) {
+ u32HsCtrl = BSP_RegRd(BSP_IPC_HS_CTRL(g_CoreNum, u32SignalNum));
+ if (0 == u32HsCtrl) {
+ break;
+ }
+ }
+}
+
+void BSP_IPC_SpinUnLock(unsigned int u32SignalNum)
+{
+ if (u32SignalNum >= INTSRC_NUM) {
+ printk("BSP_IPC_SpinUnLock Parameter error, line:%d\n",
+ __LINE__);
+ return;
+ }
+ BSP_RegWr(BSP_IPC_HS_CTRL(g_CoreNum, u32SignalNum), 0);
+}
+
+EXPORT_SYMBOL(IPC_IntEnable);
+EXPORT_SYMBOL(IPC_IntDisable);
+EXPORT_SYMBOL(IPC_IntConnect);
+EXPORT_SYMBOL(IPC_IntSend);
diff --git a/drivers/hisi/hifi_mailbox/ipcm/drv_comm.h b/drivers/hisi/hifi_mailbox/ipcm/drv_comm.h
new file mode 100644
index 000000000000..5a7f0a884773
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/ipcm/drv_comm.h
@@ -0,0 +1,188 @@
+
+#ifndef __DRV_COMM_H__
+#define __DRV_COMM_H__
+
+/*************************GLOBAL BEGIN*****************************/
+#ifndef _WIN32_COMPILE
+typedef signed long long BSP_S64;
+#else
+typedef double BSP_S64;
+#endif
+typedef signed int BSP_S32;
+typedef signed short BSP_S16;
+typedef signed char BSP_S8;
+typedef char BSP_CHAR;
+typedef char BSP_CHAR_TL;
+
+#ifndef _WIN32_COMPILE
+typedef unsigned long long BSP_U64;
+#else
+typedef double BSP_U64;
+#endif
+typedef unsigned int BSP_U32;
+typedef unsigned short BSP_U16;
+typedef unsigned char BSP_U8;
+
+typedef int BSP_BOOL;
+typedef void BSP_VOID;
+typedef int BSP_STATUS;
+
+#ifndef _WIN32_COMPILE
+typedef signed long long *BSP_PS64;
+#else
+typedef double *BSP_PS64;
+#endif
+typedef signed int *BSP_PS32;
+typedef signed short *BSP_PS16;
+typedef signed char *BSP_PS8;
+
+#ifndef _WIN32_COMPILE
+typedef unsigned long long *BSP_PU64;
+#else
+typedef double *BSP_PU64;
+#endif
+typedef unsigned int *BSP_PU32;
+typedef unsigned short *BSP_PU16;
+typedef unsigned char *BSP_PU8;
+
+#ifndef UINT8
+typedef unsigned char UINT8;
+#endif
+#ifndef UINT32
+typedef unsigned int UINT32;
+#endif
+typedef int *BSP_PBOOL;
+typedef void *BSP_PVOID;
+typedef int *BSP_PSTATUS;
+
+typedef void VOID;
+typedef BSP_S32 STATUS;
+typedef BSP_S32 UDI_HANDLE;
+
+#ifndef BSP_CONST
+#define BSP_CONST const
+#endif
+
+#ifndef OK
+#define OK (0)
+#endif
+
+#ifndef ERROR
+#define ERROR (-1)
+#endif
+
+#ifndef TRUE
+#define TRUE (1)
+#endif
+
+#ifndef FALSE
+#define FALSE (0)
+#endif
+
+#ifndef BSP_OK
+#define BSP_OK (0)
+#endif
+
+#ifndef BSP_ERROR
+#define BSP_ERROR (-1)
+#endif
+
+#ifndef BSP_TRUE
+#define BSP_TRUE (1)
+#endif
+
+#ifndef BSP_FALSE
+#define BSP_FALSE (0)
+#endif
+
+#ifndef BSP_NULL
+#define BSP_NULL (void *)0
+#endif
+
+typedef BSP_S32(*FUNCPTR_1) (int);
+typedef int (*PWRCTRLFUNCPTRVOID) (void);
+typedef unsigned int (*PWRCTRLFUNCPTR) (unsigned int arg); /* ptr to function returning int */
+
+#ifndef INLINE
+#ifdef __KERNEL__
+#define INLINE inline
+#else
+#define INLINE __inline__
+#endif
+#endif
+
+#if defined(BSP_CORE_MODEM) || defined(PRODUCT_CFG_CORE_TYPE_MODEM)
+#ifndef _VOIDFUNCPTR_DEFINED
+typedef BSP_VOID(*VOIDFUNCPTR) ();
+#endif
+#else
+typedef BSP_VOID(*VOIDFUNCPTR) (BSP_U32);
+#define SEM_FULL (1)
+#define SEM_EMPTY (0)
+#define LOCAL static
+#define IVEC_TO_INUM(intVec) ((int)(intVec))
+#endif
+
+#define BSP_ERR_MODULE_OFFSET (0x1000)
+#define BSP_DEF_ERR(mod, errid) \
+ ((((BSP_U32) mod + BSP_ERR_MODULE_OFFSET) << 16) | (errid))
+
+#define BSP_REG(base, reg) (*(volatile BSP_U32 *)((BSP_U32)base + (reg)))
+
+#if defined(BSP_CORE_MODEM) || defined(PRODUCT_CFG_CORE_TYPE_MODEM) || defined(__VXWORKS__)
+#define BSP_REG_READ(base, reg, result) \
+ ((result) = BSP_REG(base, reg))
+
+#define BSP_REG_WRITE(base, reg, data) \
+ (BSP_REG(base, reg) = (data))
+
+#else
+#define BSP_REG_READ(base, reg, resule) \
+ (resule = readl(base + reg))
+
+#define BSP_REG_WRITE(base, reg, data) \
+ (writel(data, (base + reg)))
+#endif
+
+#define BSP_REG_SETBITS(base, reg, pos, bits, val) (BSP_REG(base, reg) = (BSP_REG(base, reg) & (~((((u32)1 << (bits)) - 1) << (pos)))) \
+ | ((u32)((val) & (((u32)1 << (bits)) - 1)) << (pos)))
+#define BSP_REG_GETBITS(base, reg, pos, bits) ((BSP_REG(base, reg) >> (pos)) & (((u32)1 << (bits)) - 1))
+
+#define DRV_OK (0)
+#define DRV_ERROR (-1)
+#define DRV_INTERFACE_RSLT_OK (0)
+
+typedef int (*pFUNCPTR) (void);
+
+typedef unsigned long (*pFUNCPTR2) (unsigned long ulPara1,
+ unsigned long ulPara2);
+
+typedef unsigned int tagUDI_DEVICE_ID_UINT32;
+
+typedef struct {
+ BSP_U32 ulblockCount;
+ BSP_U32 ulpageSize;
+ BSP_U32 ulpgCntPerBlk;
+} DLOAD_FLASH_STRU;
+typedef BSP_VOID(*UpLinkRxFunc) (BSP_U8 *buf, BSP_U32 len);
+
+typedef BSP_VOID(*FreePktEncap) (BSP_VOID *PktEncap);
+
+typedef enum tagGMAC_OWNER_E {
+ GMAC_OWNER_VXWORKS = 0,
+ GMAC_OWNER_PS,
+ GMAC_OWNER_MSP,
+ GMAC_OWNER_MAX
+} GMAC_OWNER_E;
+
+typedef enum tagWDT_TIMEOUT_E {
+ WDT_TIMEOUT_1 = 0, /*0xFFFF000/WDT_CLK_FREQ, about 3657ms *//*WDT_CLK_FREQ = ARM_FREQ/6 = 70M */
+ WDT_TIMEOUT_2, /*0x1FFFE000/WDT_CLK_FREQ, about 7314ms */
+ WDT_TIMEOUT_3, /*0x3FFFC000/WDT_CLK_FREQ, about 14628ms */
+ WDT_TIMEOUT_4, /*0x7FFF8000/WDT_CLK_FREQ, about 29257ms */
+ WDT_TIMEOUT_BUTT
+} WDT_TIMEOUT_E;
+
+/*************************GLOBAL END****************************/
+
+#endif
diff --git a/drivers/hisi/hifi_mailbox/mailbox/Kconfig b/drivers/hisi/hifi_mailbox/mailbox/Kconfig
new file mode 100644
index 000000000000..fdc1fda0b957
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/Kconfig
@@ -0,0 +1,8 @@
+menu "Hifi mailbox driver"
+
+config HIFI_MAILBOX
+ bool "hifi mailbox driver base on ipc"
+ default n
+ help
+ transplant balong mailbox driver.
+endmenu
diff --git a/drivers/hisi/hifi_mailbox/mailbox/Makefile b/drivers/hisi/hifi_mailbox/mailbox/Makefile
new file mode 100644
index 000000000000..df0831d3fb59
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/Makefile
@@ -0,0 +1,17 @@
+EXTRA_CFLAGS += -I$(srctree)/drivers/hisi/hifi_mailbox/ipcm
+
+EXTRA_CFLAGS += -DBSP_CORE_APP
+ifdef CONFIG_HIFI_RESET
+EXTRA_CFLAGS += -D_HIFI_WD_DEBUG
+endif
+
+EXTRA_CFLAGS += -Idrivers/hisi/hifi_dsp
+
+obj-$(CONFIG_HIFI_MAILBOX) += drv_mailbox_table.o
+obj-$(CONFIG_HIFI_MAILBOX) += drv_mailbox_gut.o
+obj-$(CONFIG_HIFI_MAILBOX) += drv_mailbox_ifc.o
+obj-$(CONFIG_HIFI_MAILBOX) += drv_mailbox_msg.o
+obj-$(CONFIG_HIFI_MAILBOX) += drv_mailbox_debug.o
+obj-$(CONFIG_HIFI_MAILBOX) += drv_mailbox_port_linux.o
+obj-$(CONFIG_HIFI_MAILBOX) += drv_mailbox_ifc.o
+obj-$(CONFIG_HIFI_MAILBOX) += drv_mailbox.o
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox.c b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox.c
new file mode 100644
index 000000000000..bd569dd56187
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox.c
@@ -0,0 +1,28 @@
+#include "drv_mailbox_cfg.h"
+
+unsigned int mailbox_send_msg(unsigned int mailcode,
+ void *data, unsigned int length);
+
+unsigned int mailbox_reg_msg_cb(unsigned int mailcode,
+ mb_msg_cb func, void *data);
+
+unsigned int mailbox_read_msg_data(void *mail_handle,
+ char *buff, unsigned int *size);
+
+unsigned int DRV_MAILBOX_SENDMAIL(unsigned int MailCode,
+ void *pData, unsigned int Length)
+{
+ return mailbox_send_msg(MailCode, pData, Length);
+}
+
+unsigned int DRV_MAILBOX_REGISTERRECVFUNC(unsigned int MailCode,
+ mb_msg_cb pFun, void *UserHandle)
+{
+ return mailbox_reg_msg_cb(MailCode, pFun, UserHandle);
+}
+
+unsigned int DRV_MAILBOX_READMAILDATA(void *MailHandle,
+ unsigned char *pData, unsigned int *pSize)
+{
+ return mailbox_read_msg_data(MailHandle, (char *)pData, pSize);
+}
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_cfg.h b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_cfg.h
new file mode 100644
index 000000000000..a4329c5c7b87
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_cfg.h
@@ -0,0 +1,646 @@
+
+#ifndef __DRV_MAILBOX_CFG_H__
+#define __DRV_MAILBOX_CFG_H__
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif
+
+#include "../../drivers/hisi/hifi_dsp/hifi_lpp.h"
+#include "mdrv_ipc_enum.h"
+
+#define MAILBOX_CHANNEL_BEGIN(src, dst) \
+ enum MAILBOX_CHANNEL_##src##2##dst##_ENUM \
+ { \
+ MAILBOX_CHANNEL_##src##2##dst##_RSERVED = -1,
+
+#define MAILBOX_CHANNEL_ITEM(src, dst, channel) \
+ MAILBOX_CHANNEL_##src##2##dst##_##channel
+
+#define MAILBOX_CHANNEL_END(src, dst) \
+ MAILBOX_CHANNEL_##src##2##dst##_BUTT \
+ };
+
+ /* CCPU -> HIFI */
+ MAILBOX_CHANNEL_BEGIN(CCPU, HIFI)
+ MAILBOX_CHANNEL_ITEM(CCPU, HIFI, MSG), MAILBOX_CHANNEL_END(CCPU, HIFI)
+
+ /* CCPU -> MCU */
+ MAILBOX_CHANNEL_BEGIN(CCPU, MCU)
+ MAILBOX_CHANNEL_ITEM(CCPU, MCU, MSG),
+ MAILBOX_CHANNEL_ITEM(CCPU, MCU, IFC), MAILBOX_CHANNEL_END(CCPU, MCU)
+
+ /* CCPU -> ACPU */
+ MAILBOX_CHANNEL_BEGIN(CCPU, ACPU)
+ MAILBOX_CHANNEL_ITEM(CCPU, ACPU, MSG),
+ MAILBOX_CHANNEL_ITEM(CCPU, ACPU, IFC),
+ MAILBOX_CHANNEL_END(CCPU, ACPU)
+
+ /* ACPU -> CCPU */
+ MAILBOX_CHANNEL_BEGIN(ACPU, CCPU)
+ MAILBOX_CHANNEL_ITEM(ACPU, CCPU, MSG),
+ MAILBOX_CHANNEL_ITEM(ACPU, CCPU, IFC),
+ MAILBOX_CHANNEL_END(ACPU, CCPU)
+
+ /* ACPU -> MCU */
+ MAILBOX_CHANNEL_BEGIN(ACPU, MCU)
+ MAILBOX_CHANNEL_ITEM(ACPU, MCU, MSG),
+ MAILBOX_CHANNEL_ITEM(ACPU, MCU, IFC), MAILBOX_CHANNEL_END(ACPU, MCU)
+
+ /* ACPU -> HIFI */
+ MAILBOX_CHANNEL_BEGIN(ACPU, HIFI)
+ MAILBOX_CHANNEL_ITEM(ACPU, HIFI, MSG), MAILBOX_CHANNEL_END(ACPU, HIFI)
+
+ /* HIFI -> ACPU */
+ MAILBOX_CHANNEL_BEGIN(HIFI, ACPU)
+ MAILBOX_CHANNEL_ITEM(HIFI, ACPU, MSG), MAILBOX_CHANNEL_END(HIFI, ACPU)
+
+ /* HIFI -> CCPU */
+ MAILBOX_CHANNEL_BEGIN(HIFI, CCPU)
+ MAILBOX_CHANNEL_ITEM(HIFI, CCPU, MSG), MAILBOX_CHANNEL_END(HIFI, CCPU)
+
+ /* HIFI -> BBE16 */
+ MAILBOX_CHANNEL_BEGIN(HIFI, BBE16)
+ MAILBOX_CHANNEL_ITEM(HIFI, BBE16, MSG), MAILBOX_CHANNEL_END(HIFI, BBE16)
+
+ /* MCU -> ACPU */
+ MAILBOX_CHANNEL_BEGIN(MCU, ACPU)
+ MAILBOX_CHANNEL_ITEM(MCU, ACPU, MSG),
+ MAILBOX_CHANNEL_ITEM(MCU, ACPU, IFC), MAILBOX_CHANNEL_END(MCU, ACPU)
+
+ /* MCU -> CCPU */
+ MAILBOX_CHANNEL_BEGIN(MCU, CCPU)
+ MAILBOX_CHANNEL_ITEM(MCU, CCPU, MSG),
+ MAILBOX_CHANNEL_ITEM(MCU, CCPU, IFC), MAILBOX_CHANNEL_END(MCU, CCPU)
+
+ /* BBE16 -> HIFI */
+ MAILBOX_CHANNEL_BEGIN(BBE16, HIFI)
+ MAILBOX_CHANNEL_ITEM(BBE16, HIFI, MSG), MAILBOX_CHANNEL_END(BBE16, HIFI)
+
+ enum MAILBOX_GAP_FOR_SI_PARSE { MAILBOX_GAP_FOR_SI_BUTT };
+
+#define MAILBOX_ID_SRC_CPU_OFFSET (24)
+#define MAILBOX_ID_DST_CPU_OFFSET (16)
+#define MAILBOX_ID_CHANNEL_OFFSET (8)
+
+#define MAILBOX_MAILCODE_CHANNEL(src, dst, channel) \
+ (((unsigned int)(src) << MAILBOX_ID_SRC_CPU_OFFSET) \
+ | ((unsigned int)(dst) << MAILBOX_ID_DST_CPU_OFFSET) \
+ | ((unsigned int)(channel) << MAILBOX_ID_CHANNEL_OFFSET))
+
+#define MAILBOX_CPUID(cpu) MAILBOX_CPUID_##cpu
+
+#define MAILBOX_MAILCODE_RESERVED(src, dst, channel) \
+ MAILBOX_MAILCODE_CHANNEL(MAILBOX_CPUID(src), \
+ MAILBOX_CPUID(dst), \
+ MAILBOX_CHANNEL_ITEM(src, dst, channel))
+
+#define MAILBOX_MAILCODE_ITEM_RESERVED(src, dst, channel) \
+ MAILBOX_MAILCODE_##src##2##dst##_##channel##_RESERVED
+
+#define MAILBOX_MAILCODE_ITEM_END(src, dst, channel) \
+ MAILBOX_MAILCODE_##src##2##dst##_##channel##_BUTT
+
+#define MAILBOX_MAILCODE_ITEM_BEGIN(src, dst, channel) \
+ MAILBOX_MAILCODE_ITEM_RESERVED(src, dst, channel) = MAILBOX_MAILCODE_RESERVED(src, dst, channel)
+
+ enum MAILBOX_CPUID_ENUM {
+ MAILBOX_CPUID_RESERVED = -1,
+ MAILBOX_CPUID_ACPU = IPC_CORE_ACORE,
+ MAILBOX_CPUID_CCPU = IPC_CORE_CCORE,
+ MAILBOX_CPUID_MCU = IPC_CORE_ACORE,
+ MAILBOX_CPUID_BBE16 = IPC_CORE_LDSP,
+ MAILBOX_CPUID_HIFI = IPC_CORE_HiFi,
+ MAILBOX_CPUID_BUTT
+ };
+
+ enum MAILBOX_MAILCODE_ENUM {
+ MAILBOX_MAILCODE_ITEM_BEGIN(CCPU, MCU, MSG),
+ MAILBOX_MAILCODE_CCPU_TO_MCU_VOS_MSG_NORMAL,
+ MAILBOX_MAILCODE_CCPU_TO_MCU_VOS_MSG_URGENT,
+ BSP_MAILBOX_CHANNEL_CCPU_TO_MCU_MCA_CH,
+
+ MAILBOX_MAILCODE_ITEM_END(CCPU, MCU, MSG),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(CCPU, MCU, IFC),
+ MAILBOX_IFC_CCPU_TO_MCU_TEST_CMP,
+ MAILBOX_IFC_CCPU_TO_MCU_TEST,
+ MAILBOX_IFC_CCPU_TO_MCU_MCA,
+ MAILBOX_MAILCODE_ITEM_END(CCPU, MCU, IFC),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(CCPU, HIFI, MSG),
+ MAILBOX_MAILCODE_CCPU_TO_HIFI_VOS_MSG_NORMAL,
+ MAILBOX_MAILCODE_CCPU_TO_HIFI_VOS_MSG_URGENT,
+ MAILBOX_MAILCODE_ITEM_END(CCPU, HIFI, MSG),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(CCPU, ACPU, MSG),
+ MAILBOX_MAILCODE_ITEM_END(CCPU, ACPU, MSG),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(CCPU, ACPU, IFC),
+ MAILBOX_IFC_CCPU_TO_ACPU_TEST_CMP,
+ MAILBOX_IFC_CCPU_TO_ACPU_TEST,
+ MAILBOX_IFC_CCPU_TO_ACPU_PRINT,
+ MAILBOX_IFC_CCPU_TO_ACPU_FOPEN,
+ MAILBOX_IFC_CCPU_TO_ACPU_FCLOSE,
+ MAILBOX_IFC_CCPU_TO_ACPU_FREAD,
+ MAILBOX_IFC_CCPU_TO_ACPU_FWRITE,
+ MAILBOX_IFC_CCPU_TO_ACPU_FSEEK,
+ MAILBOX_IFC_CCPU_TO_ACPU_REMOVE,
+ MAILBOX_IFC_CCPU_TO_ACPU_FTELL,
+ MAILBOX_IFC_CCPU_TO_ACPU_RENAME,
+ MAILBOX_IFC_CCPU_TO_ACPU_ACCESS,
+ MAILBOX_IFC_CCPU_TO_ACPU_MKDIR,
+ MAILBOX_IFC_CCPU_TO_ACPU_RMDIR,
+ MAILBOX_IFC_ACPU_TO_CCPU_PMIC_IRQEVENT_REPO,
+ MAILBOX_MAILCODE_ITEM_END(CCPU, ACPU, IFC),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(ACPU, MCU, MSG),
+ MAILBOX_MAILCODE_ACPU_TO_MCU_VOS_MSG_NORMAL,
+ MAILBOX_MAILCODE_ACPU_TO_MCU_VOS_MSG_URGENT,
+ BSP_MAILBOX_CHANNEL_ACPU_TO_MCU_IFC_CH,
+ BSP_MAILBOX_CHANNEL_ACPU_TO_MCU_IFC_RESPONSE_CH,
+ BSP_MAILBOX_CHANNEL_ACPU_TO_MCU_SENSOR_CH,
+ BSP_MAILBOX_CHANNEL_ACPU_TO_MCU_TP_CH,
+ BSP_MAILBOX_CHANNEL_ACPU_TO_MCU_MCA_CH,
+ BSP_MAILBOX_CHANNEL_ACPU_TO_MCU_RST_CH,
+ MAILBOX_MAILCODE_ITEM_END(ACPU, MCU, MSG),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(ACPU, MCU, IFC),
+ MAILBOX_IFC_ACPU_TO_MCU_TEST_CMP,
+ MAILBOX_IFC_ACPU_TO_MCU_TEST,
+ MAILBOX_IFC_ACPU_TO_MCU_HUTAF_HLT,
+ MAILBOX_IFC_ACPU_TO_MCU_MCA,
+ MAILBOX_IFC_ACPU_TO_MCU_MNTN,
+ MAILBOX_IFC_ACPU_TO_MCU_RUN_CMD,
+ MAILBOX_MAILCODE_ITEM_END(ACPU, MCU, IFC),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(ACPU, HIFI, MSG),
+ MAILBOX_MAILCODE_ACPU_TO_HIFI_VOS_MSG_NORMAL,
+ MAILBOX_MAILCODE_ACPU_TO_HIFI_VOS_MSG_URGENT,
+ MAILBOX_MAILCODE_ACPU_TO_HIFI_AUDIO,
+ MAILBOX_MAILCODE_ACPU_TO_HIFI_MISC,
+ MAILBOX_MAILCODE_ACPU_TO_HIFI_VOICE,
+ MAILBOX_MAILCODE_ACPU_TO_HIFI_VOICE_RT,
+ MAILBOX_MAILCODE_ACPU_TO_HIFI_CCORE_RESET_ID,
+ MAILBOX_MAILCODE_ITEM_END(ACPU, HIFI, MSG),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(ACPU, CCPU, MSG),
+ MAILBOX_IFC_ACPU_TO_CCPU_CSHELL_START,
+ MAILBOX_MAILCODE_ITEM_END(ACPU, CCPU, MSG),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(ACPU, CCPU, IFC),
+ MAILBOX_IFC_ACPU_TO_CCPU_TEST_CMP,
+ MAILBOX_IFC_ACPU_TO_CCPU_TEST,
+ MAILBOX_IFC_ACPU_TO_CCPU_PRINT,
+ MAILBOX_IFC_ACPU_TO_CCPU_FOPEN,
+ MAILBOX_IFC_ACPU_TO_CCPU_FCLOSE,
+ MAILBOX_IFC_ACPU_TO_CCPU_FREAD,
+ MAILBOX_IFC_ACPU_TO_CCPU_FWRITE,
+ MAILBOX_IFC_ACPU_TO_CCPU_FSEEK,
+ MAILBOX_IFC_ACPU_TO_CCPU_REMOVE,
+ MAILBOX_IFC_ACPU_TO_CCPU_FTELL,
+ MAILBOX_IFC_ACPU_TO_CCPU_RENAME,
+ MAILBOX_IFC_ACPU_TO_CCPU_ACCESS,
+ MAILBOX_IFC_ACPU_TO_CCPU_MKDIR,
+ MAILBOX_IFC_ACPU_TO_CCPU_RMDIR,
+ MAILBOX_IFC_ACPU_TO_CCPU_BASE_TEST2,
+ MAILBOX_IFC_ACPU_TO_CCPU_BASE_TEST1,
+ MAILBOX_IFC_ACPU_TO_CCPU_PMIC_IRQEVENT,
+ MAILBOX_IFC_ACPU_TO_CCPU_READ_EFUSE,
+ MAILBOX_IFC_ACPU_TO_CCPU_SYSTEMERROR,
+ MAILBOX_MAILCODE_ITEM_END(ACPU, CCPU, IFC),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(HIFI, CCPU, MSG),
+ MAILBOX_MAILCODE_HIFI_TO_CCPU_VOS_MSG_NORMAL,
+ MAILBOX_MAILCODE_HIFI_TO_CCPU_VOS_MSG_URGENT,
+ MAILBOX_MAILCODE_ITEM_END(HIFI, CCPU, MSG),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(HIFI, ACPU, MSG),
+ MAILBOX_MAILCODE_HIFI_TO_ACPU_VOS_MSG_NORMAL,
+ MAILBOX_MAILCODE_HIFI_TO_ACPU_VOS_MSG_URGENT,
+ MAILBOX_MAILCODE_HIFI_TO_ACPU_AUDIO,
+ MAILBOX_MAILCODE_HIFI_TO_ACPU_MISC,
+ MAILBOX_MAILCODE_HIFI_TO_ACPU_VOICE,
+ MAILBOX_MAILCODE_HIFI_TO_ACPU_CCORE_RESET_ID,
+ MAILBOX_MAILCODE_ITEM_END(HIFI, ACPU, MSG),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(HIFI, BBE16, MSG),
+ MAILBOX_MAILCODE_HIFI_TO_BBE16_VOS_MSG_NORMAL,
+ MAILBOX_MAILCODE_HIFI_TO_BBE16_VOS_MSG_URGENT,
+ MAILBOX_MAILCODE_ITEM_END(HIFI, BBE16, MSG),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(MCU, CCPU, MSG),
+ MAILBOX_MAILCODE_MCU_TO_CCPU_VOS_MSG_NORMAL,
+ MAILBOX_MAILCODE_MCU_TO_CCPU_VOS_MSG_URGENT,
+ BSP_MAILBOX_CHANNEL_MCU_TO_CCPU_MCA_CH,
+ MAILBOX_MAILCODE_ITEM_END(MCU, CCPU, MSG),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(MCU, CCPU, IFC),
+ MAILBOX_IFC_MCU_TO_CCPU_TEST_CMP,
+ MAILBOX_IFC_MCU_TO_CCPU_BASE_TEST2,
+ MAILBOX_IFC_MCU_TO_CCPU_BASE_TEST1,
+ MAILBOX_IFC_MCU_TO_CCPU_TEST,
+ MAILBOX_MAILCODE_ITEM_END(MCU, CCPU, IFC),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(MCU, ACPU, MSG),
+ MAILBOX_MAILCODE_MCU_TO_ACPU_VOS_MSG_NORMAL,
+ MAILBOX_MAILCODE_MCU_TO_ACPU_VOS_MSG_URGENT,
+ BSP_MAILBOX_CHANNEL_MCU_TO_ACPU_IFC_CH,
+ BSP_MAILBOX_CHANNEL_MCU_TO_ACPU_IFC_RESPONSE_CH,
+ BSP_MAILBOX_CHANNEL_MCU_TO_ACPU_SENSOR_CH,
+ BSP_MAILBOX_CHANNEL_MCU_TO_ACPU_TP_CH,
+ BSP_MAILBOX_CHANNEL_MCU_TO_ACPU_MCA_CH,
+ BSP_MAILBOX_CHANNEL_MCU_TO_ACPU_MNTN_CH,
+ MAILBOX_IFC_MCU_TO_ACPU_HUTAF_HLT,
+ MAILBOX_MAILCODE_MCU_TO_ACPU_CCORE_RESET_ID,
+ MAILBOX_MAILCODE_ITEM_END(MCU, ACPU, MSG),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(MCU, ACPU, IFC),
+ MAILBOX_IFC_MCU_TO_ACPU_TEST_CMP,
+ MAILBOX_IFC_MCU_TO_ACPU_TEST,
+ MAILBOX_IFC_MCU_TO_ACPU_PRINT,
+ MAILBOX_MAILCODE_ITEM_END(MCU, ACPU, IFC),
+
+ MAILBOX_MAILCODE_ITEM_BEGIN(BBE16, HIFI, MSG),
+ MAILBOX_MAILCODE_BBE16_TO_HIFI_VOS_MSG_NORMAL,
+ MAILBOX_MAILCODE_BBE16_TO_HIFI_VOS_MSG_URGENT,
+ MAILBOX_MAILCODE_ITEM_END(BBE16, HIFI, MSG),
+ };
+
+#define MAILBOX_OK 0
+#define MAILBOX_ERRO 0xF7654321
+#define MAILBOX_FULL 0xF7654322
+#define MAILBOX_NOT_READY 0xF7654323
+#define MAILBOX_TARGET_NOT_READY MAILBOX_NOT_READY
+#define MAILBOX_TIME_OUT 0xF7654324
+
+#define MAILBOX_SEQNUM_START (0)
+
+#define MAILBOX_MEM_BASEADDR (HIFI_AP_MAILBOX_BASE_ADDR)
+
+#define MAILBOX_MEM_LENGTH (HIFI_AP_MAILBOX_TOTAL_SIZE)
+
+ typedef struct mb_head {
+ unsigned int ulProtectWord1;
+ unsigned int ulProtectWord2;
+ unsigned int ulFront;
+ unsigned int ulRear;
+ unsigned int ulFrontslice;
+ unsigned int ulRearslice;
+ unsigned short ausReserve[4];
+ unsigned int ulProtectWord3;
+ unsigned int ulProtectWord4;
+ } MAILBOX_HEAD_STRU;
+
+#define MAILBOX_HEAD_LEN (sizeof(struct mb_head))
+#define MAILBOX_MAX_CHANNEL (30)
+#define MAILBOX_MEM_HEAD_LEN (MAILBOX_MAX_CHANNEL * MAILBOX_HEAD_LEN)
+
+#define MAILBOX_QUEUE_SIZE(src, dst, channel) \
+ MAILBOX_QUEUE_SIZE_##src##2##dst##_##channel
+ enum MAILBOX_QUEUE_SIZE_ENUM {
+ MAILBOX_QUEUE_SIZE(MCU, ACPU, MSG) = 0x00000000,
+ MAILBOX_QUEUE_SIZE(ACPU, MCU, MSG) = 0x00000000,
+ MAILBOX_QUEUE_SIZE(MCU, ACPU, IFC) = 0x00000000,
+ MAILBOX_QUEUE_SIZE(ACPU, MCU, IFC) = 0x00000000,
+
+ MAILBOX_QUEUE_SIZE(MCU, CCPU, MSG) = 0x00000000,
+ MAILBOX_QUEUE_SIZE(CCPU, MCU, MSG) = 0x00000000,
+ MAILBOX_QUEUE_SIZE(MCU, CCPU, IFC) = 0x00000000,
+ MAILBOX_QUEUE_SIZE(CCPU, MCU, IFC) = 0x00000000,
+
+ MAILBOX_QUEUE_SIZE(ACPU, CCPU, MSG) = 0x00000000,
+ MAILBOX_QUEUE_SIZE(CCPU, ACPU, MSG) = 0x00000000,
+ MAILBOX_QUEUE_SIZE(ACPU, CCPU, IFC) = 0x00000000,
+ MAILBOX_QUEUE_SIZE(CCPU, ACPU, IFC) = 0x00000000,
+
+ MAILBOX_QUEUE_SIZE(ACPU, HIFI, MSG) = 0x00001800,
+ MAILBOX_QUEUE_SIZE(HIFI, ACPU, MSG) = 0x00001800,
+
+ MAILBOX_QUEUE_SIZE(CCPU, HIFI, MSG) = 0x00000000,
+ MAILBOX_QUEUE_SIZE(HIFI, CCPU, MSG) = 0x00000000,
+
+ MAILBOX_QUEUE_SIZE(BBE16, HIFI, MSG) = 0x00000000,
+ MAILBOX_QUEUE_SIZE(HIFI, BBE16, MSG) = 0x00000000
+ };
+
+#define MAILBOX_HEAD_ADDR(src, dst, channel) \
+ MAILBOX_HEAD_ADDR_##src##2##dst##_##channel
+ enum MAILBOX_HEAD_ADDR_ENUM {
+ MAILBOX_HEAD_ADDR(MCU, ACPU, MSG) = MAILBOX_MEM_BASEADDR,
+ MAILBOX_HEAD_ADDR(ACPU, MCU, MSG) =
+ MAILBOX_HEAD_ADDR(MCU, ACPU, MSG) + MAILBOX_HEAD_LEN,
+ MAILBOX_HEAD_ADDR(ACPU, HIFI, MSG) =
+ MAILBOX_HEAD_ADDR(ACPU, MCU, MSG) + MAILBOX_HEAD_LEN,
+ MAILBOX_HEAD_ADDR(HIFI, ACPU, MSG) =
+ MAILBOX_HEAD_ADDR(ACPU, HIFI, MSG) + MAILBOX_HEAD_LEN,
+ MAILBOX_HEAD_ADDR(MCU, CCPU, MSG) =
+ MAILBOX_HEAD_ADDR(HIFI, ACPU, MSG) + MAILBOX_HEAD_LEN,
+ MAILBOX_HEAD_ADDR(CCPU, MCU, MSG) =
+ MAILBOX_HEAD_ADDR(MCU, CCPU, MSG) + MAILBOX_HEAD_LEN,
+ MAILBOX_HEAD_ADDR(CCPU, HIFI, MSG) =
+ MAILBOX_HEAD_ADDR(CCPU, MCU, MSG) + MAILBOX_HEAD_LEN,
+ MAILBOX_HEAD_ADDR(HIFI, CCPU, MSG) =
+ MAILBOX_HEAD_ADDR(CCPU, HIFI, MSG) + MAILBOX_HEAD_LEN,
+
+ MAILBOX_HEAD_ADDR(ACPU, CCPU, MSG) =
+ MAILBOX_HEAD_ADDR(HIFI, CCPU, MSG) + MAILBOX_HEAD_LEN,
+ MAILBOX_HEAD_ADDR(CCPU, ACPU, MSG) =
+ MAILBOX_HEAD_ADDR(ACPU, CCPU, MSG) + MAILBOX_HEAD_LEN,
+ MAILBOX_HEAD_ADDR(CCPU, ACPU, IFC) =
+ MAILBOX_HEAD_ADDR(CCPU, ACPU, MSG) + MAILBOX_HEAD_LEN,
+ MAILBOX_HEAD_ADDR(ACPU, CCPU, IFC) =
+ MAILBOX_HEAD_ADDR(CCPU, ACPU, IFC) + MAILBOX_HEAD_LEN,
+
+ MAILBOX_HEAD_ADDR(CCPU, MCU, IFC) =
+ MAILBOX_HEAD_ADDR(ACPU, CCPU, IFC) + MAILBOX_HEAD_LEN,
+ MAILBOX_HEAD_ADDR(MCU, CCPU, IFC) =
+ MAILBOX_HEAD_ADDR(CCPU, MCU, IFC) + MAILBOX_HEAD_LEN,
+ MAILBOX_HEAD_ADDR(ACPU, MCU, IFC) =
+ MAILBOX_HEAD_ADDR(MCU, CCPU, IFC) + MAILBOX_HEAD_LEN,
+ MAILBOX_HEAD_ADDR(MCU, ACPU, IFC) =
+ MAILBOX_HEAD_ADDR(ACPU, MCU, IFC) + MAILBOX_HEAD_LEN,
+
+ MAILBOX_HEAD_ADDR(BBE16, HIFI, MSG) =
+ MAILBOX_HEAD_ADDR(MCU, ACPU, IFC) + MAILBOX_HEAD_LEN,
+ MAILBOX_HEAD_ADDR(HIFI, BBE16, MSG) =
+ MAILBOX_HEAD_ADDR(BBE16, HIFI, MSG) + MAILBOX_HEAD_LEN,
+
+ MAILBOX_HEAD_BOTTOM_ADDR =
+ MAILBOX_HEAD_ADDR(HIFI, BBE16, MSG) + MAILBOX_HEAD_LEN
+ };
+
+#define MAILBOX_QUEUE_ADDR(src, dst, channel) \
+ MAILBOX_QUEUE_ADDR_##src##2##dst##_##channel
+#define MAILBOX_QUEUE_BOTTOM_ADDR(src, dst, channel) \
+ (MAILBOX_QUEUE_ADDR(src, dst, channel) + MAILBOX_QUEUE_SIZE(src, dst, channel))
+ enum MAILBOX_QUEUE_ADDR_ENUM {
+ MAILBOX_QUEUE_ADDR(MCU, ACPU, MSG) =
+ MAILBOX_MEM_BASEADDR + MAILBOX_MEM_HEAD_LEN,
+ MAILBOX_QUEUE_ADDR(ACPU, MCU, MSG) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(MCU, ACPU, MSG),
+ MAILBOX_QUEUE_ADDR(ACPU, HIFI, MSG) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(ACPU, MCU, MSG),
+ MAILBOX_QUEUE_ADDR(HIFI, ACPU, MSG) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(ACPU, HIFI, MSG),
+
+ MAILBOX_QUEUE_ADDR(MCU, CCPU, MSG) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(HIFI, ACPU, MSG),
+ MAILBOX_QUEUE_ADDR(CCPU, MCU, MSG) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(MCU, CCPU, MSG),
+ MAILBOX_QUEUE_ADDR(CCPU, HIFI, MSG) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(CCPU, MCU, MSG),
+ MAILBOX_QUEUE_ADDR(HIFI, CCPU, MSG) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(CCPU, HIFI, MSG),
+
+ MAILBOX_QUEUE_ADDR(ACPU, CCPU, MSG) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(HIFI, CCPU, MSG),
+ MAILBOX_QUEUE_ADDR(CCPU, ACPU, MSG) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(ACPU, CCPU, MSG),
+ MAILBOX_QUEUE_ADDR(CCPU, ACPU, IFC) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(CCPU, ACPU, MSG),
+ MAILBOX_QUEUE_ADDR(ACPU, CCPU, IFC) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(CCPU, ACPU, IFC),
+
+ MAILBOX_QUEUE_ADDR(CCPU, MCU, IFC) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(ACPU, CCPU, IFC),
+ MAILBOX_QUEUE_ADDR(MCU, CCPU, IFC) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(CCPU, MCU, IFC),
+ MAILBOX_QUEUE_ADDR(ACPU, MCU, IFC) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(MCU, CCPU, IFC),
+ MAILBOX_QUEUE_ADDR(MCU, ACPU, IFC) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(ACPU, MCU, IFC),
+
+ MAILBOX_QUEUE_ADDR(BBE16, HIFI, MSG) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(MCU, ACPU, IFC),
+ MAILBOX_QUEUE_ADDR(HIFI, BBE16, MSG) =
+ MAILBOX_QUEUE_BOTTOM_ADDR(BBE16, HIFI, MSG),
+
+ MAILBOX_MEMORY_BOTTOM_ADDR =
+ MAILBOX_QUEUE_BOTTOM_ADDR(HIFI, BBE16, MSG)
+ };
+
+#define MAILBOX_PROTECT1 (0x55AA55AA)
+#define MAILBOX_PROTECT2 (0x5A5A5A5A)
+#define MAILBOX_PROTECT_LEN (sizeof(int))
+#define MAILBOX_MSGHEAD_NUMBER (0xA5A5A5A5)
+
+#define MAILBOX_DATA_BASE_PROTECT_NUM (2)
+#define MAILBOX_DATA_TAIL_PROTECT_NUM (2)
+
+#define MAILBOX_DATA_LEN_PROTECT_NUM (MAILBOX_DATA_BASE_PROTECT_NUM + MAILBOX_DATA_TAIL_PROTECT_NUM)
+
+#define HIFI_MB_ADDR_PROTECT (0x5a5a5a5a)
+
+#define MAILBOX_IPC_INT_NUM(src, dst, channel) \
+ MAILBOX_IPC_INT_##src##2##dst##_##channel
+ enum IPC_MAILBOX_INT_ENUM {
+#if 0
+ MAILBOX_IPC_INT_NUM(CCPU, MCU, MSG) = IPC_MCU_INT_SRC_CCPU_MSG,
+ MAILBOX_IPC_INT_NUM(MCU, CCPU, MSG) = IPC_CCPU_INT_SRC_MCU_MSG,
+
+ MAILBOX_IPC_INT_NUM(CCPU, HIFI, MSG) =
+ IPC_HIFI_INT_SRC_CCPU_MSG,
+ MAILBOX_IPC_INT_NUM(HIFI, CCPU, MSG) =
+ IPC_CCPU_INT_SRC_HIFI_MSG,
+
+ MAILBOX_IPC_INT_NUM(ACPU, MCU, MSG) = IPC_MCU_INT_SRC_ACPU_MSG,
+ MAILBOX_IPC_INT_NUM(MCU, ACPU, MSG) = IPC_ACPU_INT_SRC_MCU_MSG,
+
+ MAILBOX_IPC_INT_NUM(ACPU, HIFI, MSG) =
+ IPC_HIFI_INT_SRC_ACPU_MSG,
+ MAILBOX_IPC_INT_NUM(HIFI, ACPU, MSG) =
+ IPC_ACPU_INT_SRC_HIFI_MSG,
+
+ MAILBOX_IPC_INT_NUM(HIFI, MCU, MSG) = IPC_MCU_INT_SRC_HIFI_MSG,
+ MAILBOX_IPC_INT_NUM(MCU, HIFI, MSG) = IPC_HIFI_INT_SRC_MCU_MSG,
+
+ MAILBOX_IPC_INT_NUM(CCPU, ACPU, MSG) =
+ IPC_ACPU_INT_SRC_CCPU_MSG,
+ MAILBOX_IPC_INT_NUM(ACPU, CCPU, MSG) =
+ IPC_CCPU_INT_SRC_ACPU_MSG,
+
+ MAILBOX_IPC_INT_NUM(CCPU, ACPU, IFC) =
+ IPC_ACPU_INT_SRC_CCPU_IFC,
+ MAILBOX_IPC_INT_NUM(ACPU, CCPU, IFC) =
+ IPC_CCPU_INT_SRC_ACPU_IFC,
+
+ MAILBOX_IPC_INT_NUM(CCPU, MCU, IFC) = IPC_MCU_INT_SRC_CCPU_IFC,
+ MAILBOX_IPC_INT_NUM(MCU, CCPU, IFC) = IPC_CCPU_INT_SRC_MCU_IFC,
+
+ MAILBOX_IPC_INT_NUM(ACPU, MCU, IFC) = IPC_MCU_INT_SRC_ACPU_IFC,
+ MAILBOX_IPC_INT_NUM(MCU, ACPU, IFC) = IPC_ACPU_INT_SRC_MCU_IFC,
+
+ MAILBOX_IPC_INT_NUM(BBE16, HIFI, MSG) =
+ IPC_HIFI_INT_SRC_BBE_MSG,
+ MAILBOX_IPC_INT_NUM(HIFI, BBE16, MSG) =
+ IPC_BBE16_INT_SRC_HIFI_MSG
+#else
+ MAILBOX_IPC_INT_NUM(CCPU, HIFI, MSG) =
+ IPC_HIFI_INT_SRC_CCPU_MSG,
+ MAILBOX_IPC_INT_NUM(HIFI, CCPU, MSG) =
+ IPC_CCPU_INT_SRC_HIFI_MSG,
+
+ MAILBOX_IPC_INT_NUM(ACPU, HIFI, MSG) =
+ IPC_HIFI_INT_SRC_ACPU_MSG,
+ MAILBOX_IPC_INT_NUM(HIFI, ACPU, MSG) =
+ IPC_ACPU_INT_SRC_HIFI_MSG,
+#endif
+ };
+
+#define MAILBOX_MAILSIZE_MAX(src, dst, channel) \
+ MAILBOX_MAILSIZE_MAX_##src##2##dst##_##channel
+ enum MAILBOX_MAILSIZE_MAX_ENUM {
+ MAILBOX_MAILSIZE_MAX(MCU, ACPU, MSG) =
+ MAILBOX_QUEUE_SIZE(MCU, ACPU, MSG) / 4,
+ MAILBOX_MAILSIZE_MAX(ACPU, MCU, MSG) =
+ MAILBOX_QUEUE_SIZE(ACPU, MCU, MSG) / 4,
+ MAILBOX_MAILSIZE_MAX(ACPU, HIFI, MSG) =
+ MAILBOX_QUEUE_SIZE(ACPU, HIFI, MSG) / 4,
+ MAILBOX_MAILSIZE_MAX(HIFI, ACPU, MSG) =
+ MAILBOX_QUEUE_SIZE(HIFI, ACPU, MSG) / 4,
+ MAILBOX_MAILSIZE_MAX(MCU, CCPU, MSG) =
+ MAILBOX_QUEUE_SIZE(MCU, CCPU, MSG) / 4,
+ MAILBOX_MAILSIZE_MAX(CCPU, MCU, MSG) =
+ MAILBOX_QUEUE_SIZE(CCPU, MCU, MSG) / 4,
+ MAILBOX_MAILSIZE_MAX(CCPU, HIFI, MSG) =
+ MAILBOX_QUEUE_SIZE(CCPU, HIFI, MSG) / 4,
+ MAILBOX_MAILSIZE_MAX(HIFI, CCPU, MSG) =
+ MAILBOX_QUEUE_SIZE(HIFI, CCPU, MSG) / 4,
+
+ MAILBOX_MAILSIZE_MAX(CCPU, ACPU, MSG) =
+ MAILBOX_QUEUE_SIZE(CCPU, ACPU, MSG) / 4,
+ MAILBOX_MAILSIZE_MAX(ACPU, CCPU, MSG) =
+ MAILBOX_QUEUE_SIZE(ACPU, CCPU, MSG) / 4,
+ MAILBOX_MAILSIZE_MAX(CCPU, ACPU, IFC) =
+ MAILBOX_QUEUE_SIZE(CCPU, ACPU, IFC) / 4,
+ MAILBOX_MAILSIZE_MAX(ACPU, CCPU, IFC) =
+ MAILBOX_QUEUE_SIZE(ACPU, CCPU, IFC) / 4,
+
+ MAILBOX_MAILSIZE_MAX(CCPU, MCU, IFC) =
+ MAILBOX_QUEUE_SIZE(CCPU, MCU, IFC) / 4,
+ MAILBOX_MAILSIZE_MAX(MCU, CCPU, IFC) =
+ MAILBOX_QUEUE_SIZE(MCU, CCPU, IFC) / 4,
+
+ MAILBOX_MAILSIZE_MAX(ACPU, MCU, IFC) =
+ MAILBOX_QUEUE_SIZE(ACPU, MCU, IFC) / 4,
+ MAILBOX_MAILSIZE_MAX(MCU, ACPU, IFC) =
+ MAILBOX_QUEUE_SIZE(MCU, ACPU, IFC) / 4,
+
+ MAILBOX_MAILSIZE_MAX(BBE16, HIFI, MSG) =
+ MAILBOX_QUEUE_SIZE(BBE16, HIFI, MSG) / 4,
+ MAILBOX_MAILSIZE_MAX(HIFI, BBE16, MSG) =
+ MAILBOX_QUEUE_SIZE(HIFI, BBE16, MSG) / 4,
+ };
+
+ typedef struct mb_mail {
+ unsigned int ulPartition;
+ unsigned int ulWriteSlice;
+ unsigned int ulReadSlice;
+ unsigned int ulSeqNum;
+ unsigned int ulPriority;
+ unsigned int ulMailCode;
+ unsigned int ausReserve[2];
+ unsigned int ulMsgLength;
+ } MAILBOX_MSG_HEADER;
+
+ enum {
+ HI_SYSCTRL_BASE_ADDR_ID = 0,
+ HI_SOCP_REGBASE_ADDR_ID,
+ SOC_IPC_S_BASE_ADDR_ID,
+ SOC_AP_EDMAC_BASE_ADDR_ID,
+ SOC_UART3_BASE_ADDR_ID,
+ SOC_Watchdog1_BASE_ADDR_ID,
+ SOC_AO_SCTRL_SC_SLICER_COUNT0_ADDR_0_ID,
+ SOC_HIFI_Timer00_BASE_ADDR_ID,
+ SOC_HIFI_Timer08_BASE_ADDR_ID,
+ DDR_HIFI_ADDR_ID,
+ SOC_BBP_TDS_BASE_ADDR_ID,
+ };
+
+ typedef struct {
+ unsigned int enID;
+ unsigned int uwAddress;
+ } SOC_HIFI_ADDR_ITEM_STRU;
+
+ typedef struct {
+ unsigned int uwProtectWord; /*0x5a5a5a5a */
+ SOC_HIFI_ADDR_ITEM_STRU astSocAddr[64];
+ } SOC_HIFI_ADDR_SHARE_STRU;
+
+ typedef struct {
+ unsigned int uwProtectWord;
+ unsigned int uwAddrPhy;
+ unsigned int uwSize;
+ unsigned int uwReserve;
+
+ } MODEM_HIFI_NV_SHARE_STRU;
+
+ typedef struct {
+ unsigned int uwProtectWord; /*0x5a5a5a5a */
+ unsigned int uwHifi2AarmMailBoxLen;
+ unsigned int uwAarm2HifiMailBoxLen;
+ unsigned int uwHifiAarmHeadAddr;
+ unsigned int uwHifiAarmBodyAddr;
+ unsigned int uwAarmHifiHeadAddr;
+ unsigned int uwAarmHifiBodyAddr;
+ unsigned int uwReserved[2];
+ } AARM_HIFI_MAILBOX_STRU;
+
+ typedef struct {
+ unsigned int uwProtectWord; /*0x5a5a5a5a */
+ unsigned int uwHifi2CarmMailBoxLen;
+ unsigned int uwCarm2HifiMailBoxLen;
+ unsigned int uwHifiCarmHeadAddr;
+ unsigned int uwHifiCarmBodyAddr;
+ unsigned int uwCarmHifiHeadAddr;
+ unsigned int uwCarmHifiBodyAddr;
+ unsigned int uwReserved[2];
+ } CARM_HIFI_MAILBOX_STRU;
+
+ typedef struct {
+ unsigned int uwProtectWord;
+ unsigned int uwHifi2CarmIccChannelLen;
+ unsigned int uwHifi2TphyIccChannelLen;
+ unsigned int uwHifi2CarmIccChannelAddr;
+ unsigned int uwCarm2HifiIccChannelAddr;
+ unsigned int uwHifi2TphyIccChannelAddr;
+ unsigned int uwTphy2HifiIccChannelAddr;
+ unsigned int uwReserved[2];
+ } CARM_HIFI_ICC_STRU;
+
+ typedef struct {
+ unsigned int uwProtectWord; /*0x5a5a5a5a */
+ CARM_HIFI_ICC_STRU stCarmHifiMB;
+ AARM_HIFI_MAILBOX_STRU stAarmHifiMB;
+ unsigned int uwNvBaseAddrPhy;
+ unsigned int uwNvBaseAddrVirt;
+ MODEM_HIFI_NV_SHARE_STRU stNVShare;
+ SOC_HIFI_ADDR_SHARE_STRU stSoCShare;
+ unsigned int uwReserved[2];
+ } CARM_HIFI_DYN_ADDR_SHARE_STRU;
+
+ typedef void (*mb_msg_cb) (void *user_handle,
+ void *mail_handle, unsigned int mail_len);
+
+ unsigned int DRV_MAILBOX_SENDMAIL(unsigned int MailCode,
+ void *pData, unsigned int Length);
+
+ unsigned int DRV_MAILBOX_REGISTERRECVFUNC(unsigned int MailCode,
+ mb_msg_cb pFun,
+ void *UserHandle);
+
+ unsigned int DRV_MAILBOX_READMAILDATA(void *MailHandle,
+ unsigned char *pData,
+ unsigned int *pSize);
+
+ void drv_hifi_fill_mb_info(unsigned int addr);
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif
+#endif /* end of drv_mailbox_cfg.h */
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_debug.c b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_debug.c
new file mode 100644
index 000000000000..2d8921807b78
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_debug.c
@@ -0,0 +1,443 @@
+#include "drv_mailbox_cfg.h"
+#include "drv_mailbox_debug.h"
+#include "drv_mailbox_gut.h"
+
+extern int logMsg(char *fmt, ...);
+
+#undef _MAILBOX_FILE_
+#define _MAILBOX_FILE_ "dbg"
+#if (MAILBOX_LOG_LEVEL != MAILBOX_LOG_NONE)
+MAILBOX_EXTERN int mailbox_log_erro(unsigned int err_no,
+ unsigned long param1,
+ unsigned long param2,
+ unsigned int line_no, char *file_name)
+{
+ struct mb *mb = &g_mailbox_handle;
+ struct mb_log *record = &mb->log_array[0];
+ unsigned int log_out = MAILBOX_FALSE;
+ char *erro_str = MAILBOX_NULL;
+
+ record[mb->log_prob].erro_num = err_no;
+ record[mb->log_prob].line = line_no;
+ record[mb->log_prob].param1 = param1;
+ record[mb->log_prob].param2 = param2;
+ record[mb->log_prob].file = (const char *)file_name;
+ mb->log_prob = ((MAILBOX_ERRO_ARRAY_NUM - 1) == mb->log_prob) ? (0) :
+ (mb->log_prob + 1);
+
+#if (MAILBOX_LOG_LEVEL >= MAILBOX_LOG_CRITICAL)
+ if ((err_no > MAILBOX_CRIT_RET_START)
+ && (err_no < MAILBOX_CRIT_RET_END)) {
+ erro_str = "mb Critical!";
+ log_out = MAILBOX_TRUE;
+ mailbox_assert(err_no);
+ }
+#endif
+
+#if (MAILBOX_LOG_LEVEL >= MAILBOX_LOG_ERROR)
+ if ((err_no > MAILBOX_ERR_RET_START) && (err_no < MAILBOX_ERR_RET_END)) {
+ erro_str = "mb error!";
+ log_out = MAILBOX_TRUE;
+ }
+#endif
+
+#if (MAILBOX_LOG_LEVEL >= MAILBOX_LOG_WARNING)
+ if ((err_no > MAILBOX_WARNING_RET_START)
+ && (err_no < MAILBOX_WARNING_RET_END)) {
+ erro_str = "mb warning!";
+ log_out = MAILBOX_TRUE;
+ }
+#endif
+
+#if (MAILBOX_LOG_LEVEL >= MAILBOX_LOG_INFO)
+ if ((err_no > MAILBOX_INFO_RET_START)
+ && (err_no < MAILBOX_INFO_RET_END)) {
+ erro_str = "mb info!";
+ log_out = MAILBOX_TRUE;
+ }
+#endif
+
+ if (MAILBOX_FULL == err_no) {
+ mailbox_out(("mb(%lu) full !" RT, param1));
+ log_out = MAILBOX_TRUE;
+ } else if (MAILBOX_NOT_READY == err_no) {
+ mailbox_out(("remote mb(%lu) not ready!" RT, param1));
+ log_out = MAILBOX_TRUE;
+ }
+
+ if (MAILBOX_TRUE == log_out) {
+ mailbox_out(("%s:0x%08x, param1:%lu, param2:%lu, (line:%d),(file:%s)" RT, erro_str, err_no, param1, param2, (unsigned int)line_no, file_name));
+ }
+
+ return (int)err_no;
+}
+#endif
+
+unsigned int g_mb_trans_time_limit = MAILBOX_MAIL_TRANS_TIME_LIMIT;
+unsigned int g_mb_deal_time_limit = MAILBOX_MAIL_DEAL_TIME_LIMIT;
+unsigned int g_mb_sche_limit = MAILBOX_MAIL_SCHE_TIME_LIMIT;
+
+void mailbox_statistic_slice(struct mb_slice *slice,
+ unsigned int mailcode,
+ unsigned int threslhold, unsigned int erro_code)
+{
+ unsigned int slice_diff;
+ unsigned int slice_end = (unsigned int)mailbox_get_timestamp();
+ unsigned int slice_start = slice->start;
+
+ slice_diff = mailbox_get_slice_diff(slice_start, slice_end);
+ if (slice_diff < 0) {
+ return;
+ }
+
+ slice_end = slice->total;
+ slice->total += slice_diff;
+ if (slice_end > slice->total) {
+ slice->overflow = MAILBOX_TRUE;
+ }
+
+ if (slice_diff > slice->max) {
+ slice->max = slice_diff;
+ slice->code = mailcode;
+ }
+
+ if (slice_diff >= threslhold) {
+ mailbox_logerro_p2(erro_code, slice_diff, mailcode);
+ }
+}
+
+void mailbox_record_sche_send(void *priv)
+{
+ struct mb_buff *mbuf = (struct mb_buff *)priv;
+ mbuf->mntn.sche.start = (unsigned long)mailbox_get_timestamp();
+}
+
+void mailbox_record_sche_recv(void *priv)
+{
+ struct mb_buff *mbuf = (struct mb_buff *)priv;
+
+ mailbox_statistic_slice(&mbuf->mntn.sche, mbuf->channel_id,
+ g_mb_sche_limit, MAILBOX_WARNING_SCHE_TIME_OUT);
+}
+
+void mailbox_record_send(struct mb_mntn *mntn,
+ unsigned int mailcode,
+ unsigned int time_stamp, unsigned long mail_addr)
+{
+ struct mb_queue *m_queue = &mntn->mbuff->mail_queue;
+ unsigned int size_left =
+ (unsigned int)mailbox_queue_left(m_queue->rear, m_queue->front,
+ m_queue->length);
+
+ if (size_left < mntn->peak_traffic_left) {
+ mntn->peak_traffic_left = size_left;
+ }
+ mntn->track_array[mntn->track_prob].send_slice = time_stamp;
+ mntn->track_array[mntn->track_prob].mail_addr = mail_addr;
+ mntn->track_array[mntn->track_prob].use_id =
+ mailbox_get_use_id(mailcode);
+ mntn->track_prob =
+ ((MAILBOX_RECORD_USEID_NUM - 1) ==
+ mntn->track_prob) ? (0) : (mntn->track_prob + 1);
+}
+
+void mailbox_record_transport(struct mb_mntn *mntn,
+ unsigned int mailcode,
+ unsigned int write_slice,
+ unsigned int read_slice, unsigned long mail_addr)
+{
+ struct mb_queue *m_queue = &mntn->mbuff->mail_queue;
+ unsigned int size_left =
+ (unsigned int)mailbox_queue_left(m_queue->rear, m_queue->front,
+ m_queue->length);
+
+ if (size_left < mntn->peak_traffic_left) {
+ mntn->peak_traffic_left = size_left;
+ }
+
+ if (size_left < (m_queue->length >> 3)) {
+ mailbox_logerro_p2(MAILBOX_ERR_GUT_MAILBOX_RECEIVE_FULL,
+ size_left, mailcode);
+ }
+
+ mntn->track_array[mntn->track_prob].use_id =
+ (unsigned short)mailbox_get_use_id(mailcode);
+ mntn->track_array[mntn->track_prob].send_slice = write_slice;
+ mntn->track_array[mntn->track_prob].recv_slice = read_slice;
+ mntn->track_array[mntn->track_prob].mail_addr = mail_addr;
+ mntn->track_prob =
+ ((MAILBOX_RECORD_USEID_NUM - 1) ==
+ mntn->track_prob) ? (0) : (mntn->track_prob + 1);
+
+ mntn->trans.start = write_slice;
+ mailbox_statistic_slice(&mntn->trans, mailcode,
+ g_mb_trans_time_limit,
+ MAILBOX_WARNING_TRANSPORT_TIME_OUT);
+
+}
+
+void mailbox_record_receive(struct mb_mntn *mntn,
+ unsigned int mailcode, unsigned int slice_start)
+{
+ mntn->deal.start = slice_start;
+ mailbox_statistic_slice(&mntn->deal, mailcode,
+ g_mb_deal_time_limit,
+ MAILBOX_WARNING_RECEIVE_TIME_OUT);
+}
+
+void mailbox_clear_mntn(struct mb_mntn *mntn, int clear)
+{
+ struct mb_buff *mbuff;
+ if (clear) {
+ mbuff = mntn->mbuff;
+ mailbox_memset(mntn, 0x00, sizeof(struct mb_mntn));
+ mntn->mbuff = mbuff;
+ mntn->peak_traffic_left = MAILBOX_QUEUE_LEFT_INVALID;
+ }
+}
+
+MAILBOX_LOCAL void mailbox_show_general(struct mb_cfg *cfg)
+{
+ struct mb_head *pBoxHead = (struct mb_head *)(cfg->head_addr);
+
+ mailbox_out(("Max Id, HeadAddr, DataAddr, DataSize, IntSrcId" RT));
+ mailbox_out(("0x%08x, 0x%08x, 0x%08x, 0x%08x, %04d" RT,
+ (unsigned int)cfg->butt_id, (unsigned int)cfg->head_addr,
+ (unsigned int)cfg->data_addr, (unsigned int)cfg->data_size,
+ (unsigned int)cfg->int_src));
+ mailbox_out(("Head information:" RT));
+
+ mailbox_out(("Head Front: 0x%x (0x%08x)" RT,
+ (unsigned int)pBoxHead->ulFront,
+ (unsigned int)(cfg->data_addr +
+ (pBoxHead->ulFront *
+ sizeof(unsigned long)))));
+ mailbox_out(("Head Rear: 0x%x (0x%08x)" RT,
+ (unsigned int)pBoxHead->ulRear,
+ (unsigned int)(cfg->data_addr +
+ (pBoxHead->ulRear *
+ sizeof(unsigned long)))));
+ mailbox_out(("Head Frontslice: 0x%x" RT,
+ (unsigned int)pBoxHead->ulFrontslice));
+ mailbox_out(("Head Rearslice: 0x%x" RT,
+ (unsigned int)pBoxHead->ulRearslice));
+ mailbox_out((":-------------------------------------------------------------:" RT));
+
+}
+
+MAILBOX_LOCAL void mailbox_show_receive(struct mb_buff *mbuf)
+{
+ struct mb_mntn *mntn = &(mbuf->mntn);
+ struct mb_cb *callback = mbuf->read_cb;
+ unsigned int max_use = mailbox_get_use_id(mbuf->config->butt_id);
+ unsigned int i = 0;
+
+ mailbox_out((":---------------------------------------------:" RT));
+ mailbox_out(("Receive info:" RT));
+
+ mailbox_out(("Mail Read Call Back show:" RT));
+ mailbox_out(("Use Id, Call Back, User Handle" RT));
+ while (i < max_use) {
+ mailbox_out(("%d, %pK, %pK" RT, (unsigned int)i,
+ callback->func, callback->handle));
+ callback++;
+ i++;
+ }
+
+ if (MAILBOX_TRUE != mntn->sche.overflow) {
+ mailbox_out(("Schedule Avg. slice:%4d, total:%d" RT,
+ (unsigned int)((mbuf->seq_num + 1)
+ ? (mntn->sche.total) /
+ (mbuf->seq_num + 1) : 0),
+ (unsigned int)(mbuf->seq_num)));
+ } else {
+ mailbox_out(("Schedule Avg. data overflow " RT));
+ }
+ mailbox_out(("Schedule Max. slice:%4d, Use ID:0x%08x" RT,
+ (unsigned int)(mntn->sche.max),
+ (unsigned int)(mntn->sche.code)));
+
+ if (MAILBOX_TRUE != mntn->trans.overflow) {
+ mailbox_out(("Transfers Avg. slice:%4d, total:%d" RT,
+ (int)((mbuf->seq_num +
+ 1) ? ((mntn->trans.total) / (mbuf->seq_num +
+ 1)) : 0),
+ (unsigned int)(mbuf->seq_num)));
+ } else {
+ mailbox_out(("Transfers Max. data overflow" RT));
+ }
+ mailbox_out(("Transfers Max. slice:%4d, Use ID:0x%08x" RT,
+ (unsigned int)(mntn->trans.max), (int)(mntn->trans.code)));
+
+ if (MAILBOX_TRUE != mntn->deal.overflow) {
+ mailbox_out(("Call Back Avg. slice:%4d, total:%d" RT,
+ (unsigned int)((mbuf->seq_num + 1)
+ ? (mntn->deal.total) /
+ (mbuf->seq_num + 1) : 0),
+ (unsigned int)(mbuf->seq_num)));
+ } else {
+ mailbox_out(("Call Back Avg. data overflow" RT));
+ }
+ mailbox_out(("Call Back Max. slice:%4d, Use ID:0x%08x" RT,
+ (unsigned int)(mntn->deal.max),
+ (unsigned int)(mntn->deal.code)));
+
+ mailbox_out((":---------------------------------------------:" RT));
+}
+
+MAILBOX_LOCAL void mailbox_show_detail(struct mb *mb,
+ struct mb_buff *mbuf, int clear)
+{
+ struct mb_mntn *mntn = MAILBOX_NULL;
+ unsigned int channel = mbuf->channel_id;
+ struct mb_queue *queue = &mbuf->mail_queue;
+ struct mb_mail *mail;
+ unsigned int i;
+
+ mailbox_out(("mail box show channel(0x%08x) information:" RT,
+ (unsigned int)channel));
+
+ mailbox_show_general(mbuf->config);
+ mailbox_out((":---------------------------------------------:" RT));
+ mntn = &(mbuf->mntn);
+ i = mntn->track_prob;
+ mailbox_out(("Latest transmit mails track:(%d total)" RT,
+ (unsigned int)mbuf->seq_num));
+ mailbox_out(("id ,address ,send slice ,recv slice ,diff slice"
+ RT));
+ do {
+ i = ((0 == i) ? (MAILBOX_RECORD_USEID_NUM - 1) : (i - 1));
+ mail = (struct mb_mail *)(mntn->track_array[i].mail_addr);
+
+ if (mail && (0 == mntn->track_array[i].recv_slice)) {
+ mntn->track_array[i].recv_slice = mail->ulReadSlice;
+ }
+ mailbox_out(("%02d ,%pK ,0x%-8x ,0x%-8x ,0x%-8x(%d)" RT,
+ mntn->track_array[i].use_id,
+ mail,
+ (unsigned int)mntn->track_array[i].send_slice,
+ (unsigned int)mntn->track_array[i].recv_slice,
+ (unsigned int)mailbox_get_slice_diff(mntn->
+ track_array
+ [i].
+ send_slice,
+ mntn->
+ track_array
+ [i].
+ recv_slice),
+ (unsigned int)mailbox_get_slice_diff(mntn->
+ track_array
+ [i].
+ send_slice,
+ mntn->
+ track_array
+ [i].
+ recv_slice)));
+ } while (i != (mntn->track_prob));
+
+ if (mb->local_id == mailbox_get_dst_id(channel)) {
+ mailbox_out(("Receive Channel" RT));
+ }
+
+ if (mb->local_id == mailbox_get_src_id(channel)) {
+ mailbox_out(("Send Channel: sem id(%pK)" RT, mbuf->mutex));
+ } else if (mb->local_id == mailbox_get_dst_id(channel)) {
+ mailbox_out(("Receive Channel: sem id(%pK)" RT, mbuf->mutex));
+ mailbox_show_receive(mbuf);
+ }
+
+ if (MAILBOX_QUEUE_LEFT_INVALID == mntn->peak_traffic_left) {
+ mntn->peak_traffic_left = queue->length;
+ }
+ mailbox_out(("Peak Traffic: %d%%, Peak: 0x%x, Total: 0x%x" RT,
+ (int)100 * (queue->length -
+ mntn->peak_traffic_left) / queue->length,
+ (unsigned int)(queue->length - mntn->peak_traffic_left),
+ (unsigned int)queue->length));
+
+ mailbox_out((":------------------------------------------------:" RT));
+
+ mailbox_clear_mntn(mntn, clear);
+}
+
+MAILBOX_EXTERN int mailbox_show(unsigned int channel, unsigned int show_flag)
+{
+ struct mb_cfg *config = &g_mailbox_global_cfg_tbl[0];
+ struct mb_buff *mbuf = MAILBOX_NULL;
+ struct mb_link *send_tbl = MAILBOX_NULL;
+ struct mb_link *recv_tbl = MAILBOX_NULL;
+ struct mb *mb = MAILBOX_NULL;
+ struct mb_log *record = MAILBOX_NULL;
+ unsigned int i;
+ unsigned int j;
+ unsigned int clear = MAILBOX_FALSE;
+
+ mb = mailbox_get_mb();
+ if (MAILBOX_NULL == mb) {
+ return (int)MAILBOX_ERRO;
+ }
+
+ if (MAILBOX_SHOW_CLEAR & show_flag) {
+ clear = MAILBOX_TRUE;
+ }
+
+ if (MAILBOX_SHOW_ALL & show_flag) {
+ /*Show all channel's general information. */
+ send_tbl = mb->send_tbl;
+ for (i = 0; i < MAILBOX_CPUID_BUTT; i++) {
+ if (MAILBOX_NULL != send_tbl[i].channel_buff) {
+ mbuf = send_tbl[i].channel_buff;
+ for (j = 0; j < send_tbl[i].carrier_butt; j++) {
+ mailbox_show_detail(mb, mbuf,
+ (int)clear);
+ mbuf++;
+ }
+ }
+ }
+
+ recv_tbl = mb->recv_tbl;
+ for (i = 0; i < MAILBOX_CPUID_BUTT; i++) {
+ if (MAILBOX_NULL != recv_tbl[i].channel_buff) {
+ mbuf = recv_tbl[i].channel_buff;
+ for (j = 0; j < recv_tbl[i].carrier_butt; j++) {
+ mailbox_show_detail(mb, mbuf,
+ (int)clear);
+ mbuf++;
+ }
+ }
+ }
+ } else {
+ mbuf = mailbox_get_channel_handle(mb, channel);
+
+ if (MAILBOX_NULL != mbuf) {
+ mailbox_show_detail(mb, mbuf, (int)clear);
+ } else {
+ mailbox_out(("mail box show global channel config:"
+ RT));
+ while (MAILBOX_MAILCODE_INVALID != config->butt_id) {
+ mailbox_show_general(config);
+ config++;
+ }
+
+ i = mb->log_prob;
+ record = &mb->log_array[0];
+ mailbox_out(("Latest error log track:" RT));
+ mailbox_out(("error num, line num, file name" RT));
+ do {
+
+ i = ((0 ==
+ i) ? (MAILBOX_ERRO_ARRAY_NUM - 1) : (i -
+ 1));
+ mailbox_out(("0x%-8x, %-8d, %-8s" RT,
+ (unsigned int)record[i].erro_num,
+ (unsigned int)(record[i].line),
+ (record[i].file)));
+
+ } while (i != (mb->log_prob));
+ mailbox_out(("track end." RT));
+ }
+ }
+
+ mailbox_out((":================================================:" RT));
+ return MAILBOX_OK;
+}
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_debug.h b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_debug.h
new file mode 100644
index 000000000000..a0efab3ac7f2
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_debug.h
@@ -0,0 +1,213 @@
+#ifndef _DRV_MAILBOX_DEBUG_H_
+#define _DRV_MAILBOX_DEBUG_H_
+
+#include "drv_mailbox_platform.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif
+
+#define MAILBOX_MAIL_TRANS_TIME_LIMIT (3000)
+#define MAILBOX_MAIL_DEAL_TIME_LIMIT (2000)
+#define MAILBOX_MAIL_SCHE_TIME_LIMIT (2000)
+
+#define MAILBOX_ERRO_ARRAY_NUM (5)
+
+#define MAILBOX_LOG_NONE (-1)
+#define MAILBOX_LOG_CRITICAL (0)
+#define MAILBOX_LOG_ERROR (1)
+#define MAILBOX_LOG_WARNING (2)
+#define MAILBOX_LOG_INFO (3)
+
+#define MAILBOX_SHOW_ALL (0x001)
+#define MAILBOX_SHOW_CLEAR (0x010)
+
+#define MAILBOX_QUEUE_LEFT_INVALID (0xffffffff)
+
+#define MAILBOX_MAX_SLICE (0xffffffff)
+#define mailbox_get_slice_diff(s, e) (((e) >= (s))?((e) - (s)) : ((MAILBOX_MAX_SLICE - (s)) + (e)))
+
+#if (MAILBOX_LOG_LEVEL == MAILBOX_LOG_NONE)
+#define mailbox_logerro_p0(ErrorId) (unsigned int)(ErrorId)
+
+#define mailbox_logerro_p1(ErrorId, Param) (unsigned int)(ErrorId)
+
+#define mailbox_logerro_p2(ErrorId, Param1, Param2) (unsigned int)(ErrorId)
+
+#else
+#define mailbox_logerro_p0(ErrorId) mailbox_log_erro((unsigned int)ErrorId, (unsigned int)0, (unsigned int)0, \
+ (unsigned int)_MAILBOX_LINE_, (char *)_MAILBOX_FILE_)
+
+#define mailbox_logerro_p1(ErrorId, Param) mailbox_log_erro((unsigned int)ErrorId, (unsigned long)Param, (unsigned long)0, \
+ (unsigned int)_MAILBOX_LINE_, (char *)_MAILBOX_FILE_)
+
+#define mailbox_logerro_p2(ErrorId, Param1, Param2) mailbox_log_erro((unsigned int)ErrorId, (unsigned long)Param1, Param2, \
+ (unsigned int)_MAILBOX_LINE_, (char *)_MAILBOX_FILE_)
+#endif
+
+#define MAILBOX_CRIT_RET_START 0x80000001
+#define MAILBOX_CRIT_GUT_INVALID_USER_MAIL_HANDLE 0x80000002
+#define MAILBOX_CRIT_GUT_INIT_CHANNEL_POOL_TOO_SMALL 0x80000003
+#define MAILBOX_CRIT_GUT_INIT_USER_POOL_TOO_SMALL 0x80000004
+#define MAILBOX_CRIT_GUT_MUTEX_LOCK_FAILED 0x80000005
+#define MAILBOX_CRIT_GUT_MUTEX_UNLOCK_FAILED 0x80000006
+#define MAILBOX_CRIT_GUT_MSG_CHECK_FAIL 0x80000007
+#define MAILBOX_CRIT_GUT_RECEIVE_MAIL 0x80000008
+#define MAILBOX_CRIT_GUT_READ_MAIL 0x80000009
+#define MAILBOX_CRIT_GUT_MEMORY_CONFIG 0x8000000a
+#define MAILBOX_CRIT_PORT_CONFIG 0x80000010
+#define MAILBOX_CRIT_RET_END 0x800000ff
+
+#define MAILBOX_ERR_RET_START 0x80000100
+#define MAILBOX_ERR_GUT_INVALID_CPU_LINK 0x80000101
+#define MAILBOX_ERR_GUT_INVALID_SRC_CPU 0x80000102
+#define MAILBOX_ERR_GUT_INVALID_TARGET_CPU 0x80000103
+#define MAILBOX_ERR_GUT_INVALID_CHANNEL_ID 0x80000104
+#define MAILBOX_ERR_GUT_INVALID_CARRIER_ID 0x80000105
+#define MAILBOX_ERR_GUT_REQUEST_CHANNEL 0x80000106
+#define MAILBOX_ERR_GUT_RELEASE_CHANNEL 0x80000107
+#define MAILBOX_ERR_GUT_TIMESTAMP_CHECK_FAIL 0x80000108
+#define MAILBOX_ERR_GUT_WRITE_EXCEED_MAX_SIZE 0x80000109
+#define MAILBOX_ERR_GUT_ALREADY_INIT 0x8000010a
+#define MAILBOX_ERR_GUT_SEND_MAIL_IN_INT_CONTEXT 0x8000010b
+#define MAILBOX_ERR_GUT_USER_BUFFER_SIZE_TOO_SMALL 0x8000010c
+#define MAILBOX_ERR_GUT_INPUT_PARAMETER 0x8000010d
+#define MAILBOX_ERR_GUT_NOT_INIT 0x8000010e
+#define MAILBOX_ERR_GUT_MAILBOX_SEQNUM_CHECK_FAIL 0x8000010f
+#define MAILBOX_ERR_GUT_MAILBOX_RECEIVE_FULL 0x80000110
+#define MAILBOX_ERR_GUT_INIT_PLATFORM 0x80000112
+#define MAILBOX_ERR_GUT_MAILBOX_NULL_PARAM 0x80000113
+#define MAILBOX_ERR_GUT_CREATE_BOX 0x80000114
+#define MAILBOX_ERR_GUT_MUTEX_CREATE_FAILED 0x80000115
+#define MAILBOX_ERR_GUT_READ_CALLBACK_NOT_FIND 0x80000116
+#define MAILBOX_ERR_GUT_INVALID_USER_ID 0x80000117
+#define MAILBOX_ERR_GUT_INIT_CHANNEL_POOL_TOO_LARGE 0x80000118
+#define MAILBOX_ERR_GUT_INIT_USER_POOL_TOO_LARGE 0x80000119
+#define MAILBOX_ERR_GUT_CALCULATE_SPACE 0x8000011a
+#define MAILBOX_ERR_GUT_INIT_CORESHARE_MEM 0x8000011b
+
+#define MAILBOX_ERR_VXWORKS_TASK_CREATE 0x80000140
+#define MAILBOX_ERR_VXWORKS_CALLBACK_NOT_FIND 0x80000141
+#define MAILBOX_ERR_VXWORKS_CALLBACK_ERRO 0x80000142
+#define MAILBOX_ERR_VXWORKS_MAIL_TASK_NOT_FIND 0x80000143
+#define MAILBOX_ERR_VXWORKS_MAIL_INT_NOT_FIND 0x80000144
+#define MAILBOX_ERR_VXWORKS_CHANNEL_NOT_FIND 0x80000145
+#define MAILBOX_ERR_VXWORKS_ALLOC_MEMORY 0x80000146
+
+#define MAILBOX_ERR_MCU_CHANNEL_NOT_FIND 0x80000160
+#define MAILBOX_ERR_MCU_ZOS_MSG_ALLOC_FAIL 0x80000161
+#define MAILBOX_ERR_MCU_ZOS_MSG_SEND_FAIL 0x80000162
+#define MAILBOX_ERR_MCU_ZOS_PID_NOT_FIND 0x80000163
+#define MAILBOX_ERR_MCU_ZOS_CBFUNC_NULL 0x80000164
+
+#define MAILBOX_ERR_LINUX_TASK_CREATE 0x80000180
+#define MAILBOX_ERR_LINUX_CALLBACK_NOT_FIND 0x80000181
+#define MAILBOX_ERR_LINUX_CALLBACK_ERRO 0x80000182
+#define MAILBOX_ERR_LINUX_MAIL_TASK_NOT_FIND 0x80000183
+#define MAILBOX_ERR_LINUX_MAIL_INT_NOT_FIND 0x80000184
+#define MAILBOX_ERR_LINUX_CHANNEL_NOT_FIND 0x80000185
+#define MAILBOX_ERR_LINUX_ALLOC_MEMORY 0x80000186
+
+#define MAILBOX_ERR_RET_END 0x800001ff
+
+#define MAILBOX_WARNING_RET_START 0x80000200
+#define MAILBOX_WARNING_USER_CALLBACK_ALREADY_EXIST 0x80000201
+#define MAILBOX_WARNING_TRANSPORT_TIME_OUT 0x80000202
+#define MAILBOX_WARNING_RECEIVE_TIME_OUT 0x80000203
+#define MAILBOX_WARNING_SCHE_TIME_OUT 0x80000204
+#define MAILBOX_WARNING_RET_END 0x800002ff
+
+#define MAILBOX_INFO_RET_START 0x80000300
+#define MAILBOX_INFO_RECEIVE_FIRST_MAIL 0x80000301
+#define MAILBOX_INFO_SEND_FIRST_MAIL 0x80000302
+#define MAILBOX_INFO_RET_END 0x800003ff
+
+#define MAILBOX_BOARDST_USER_PROTECT1 (0x18273645)
+
+#define MAILBOX_MCU_TEST_BUFF_SIZE 256
+
+ struct mb_st_msg {
+ unsigned int protect;
+ unsigned int length;
+ unsigned int back_code;
+ unsigned int test_id;
+ };
+
+ enum MAILBOX_BOARDST_TEST_ID_E {
+ MAILBOX_BOARDST_ID_LOOP_SEND,
+ MAILBOX_BOARDST_ID_LOOP_BACK,
+ MAILBOX_BOARDST_ID_LOOP_FINISH
+ };
+
+ struct mb_log {
+ unsigned int erro_num;
+ unsigned long param1;
+ unsigned long param2;
+ unsigned int line;
+ const char *file;
+ };
+
+ typedef struct {
+ unsigned int send_slice;
+ unsigned int recv_slice;
+ unsigned long mail_addr;
+ unsigned short use_id;
+ unsigned short reserved;
+ } MAILBOX_TRACK_STRU;
+
+ struct mb_slice {
+ unsigned int total;
+ unsigned int start;
+ unsigned int max;
+ unsigned int code;
+ unsigned int overflow;
+ };
+
+ struct mb_mntn {
+ unsigned int peak_traffic_left;
+
+ struct mb_slice trans;
+ struct mb_slice deal;
+ struct mb_slice sche;
+
+ unsigned int track_prob;
+ MAILBOX_TRACK_STRU track_array[MAILBOX_RECORD_USEID_NUM];
+ struct mb_buff *mbuff;
+ };
+
+ extern MAILBOX_EXTERN int mailbox_log_erro(unsigned int err_no,
+ unsigned long param1,
+ unsigned long param2,
+ unsigned int line_no,
+ char *file_name);
+
+ MAILBOX_EXTERN void mailbox_record_send(struct mb_mntn *mntn,
+ unsigned int mailcode,
+ unsigned int time_stamp,
+ unsigned long mail_addr);
+
+ MAILBOX_EXTERN void mailbox_record_transport(struct mb_mntn *mntn,
+ unsigned int mailcode,
+ unsigned int write_slice,
+ unsigned int read_slice,
+ unsigned long mail_addr);
+
+ MAILBOX_EXTERN void mailbox_record_receive(struct mb_mntn *mntn,
+ unsigned int mailcode,
+ unsigned int slice_start);
+
+ MAILBOX_EXTERN int mailbox_show(unsigned int channel,
+ unsigned int show_all);
+
+ MAILBOX_EXTERN void mailbox_record_sche_send(void *priv);
+
+ MAILBOX_EXTERN void mailbox_record_sche_recv(void *priv);
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif
+#endif /*_DRV_MAILBOX_DEBUG_H_*/
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_gut.c b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_gut.c
new file mode 100644
index 000000000000..130c2074df2e
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_gut.c
@@ -0,0 +1,981 @@
+#include "drv_mailbox_cfg.h"
+#include "drv_mailbox_platform.h"
+#include "drv_mailbox_debug.h"
+#include "drv_mailbox_gut.h"
+#include <asm/io.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/fb.h>
+#include <linux/console.h>
+#include <linux/uaccess.h>
+#include <linux/kthread.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <asm/pgtable.h>
+#include<linux/vmalloc.h>
+#include <asm/io.h>
+#include <linux/mm.h>
+
+extern int logMsg(char *fmt, ...);
+#undef _MAILBOX_FILE_
+#define _MAILBOX_FILE_ "gut"
+
+#define SLICE_REG 0xFFF0A534
+#define mem_remap_nocache(phys_addr, size) mem_remap_type(phys_addr, size, pgprot_writecombine(PAGE_KERNEL))
+MAILBOX_EXTERN struct mb g_mailbox_handle = { MAILBOX_MAILCODE_INVALID };
+
+void __iomem *g_shareMemBase;
+void *g_slice_reg;
+
+MAILBOX_LOCAL int mailbox_init_mem(struct mb_cfg *config);
+
+MAILBOX_EXTERN int mailbox_queue_write(struct mb_queue *queue,
+ char *data, unsigned int size)
+{
+ unsigned int SizeToBottom;
+
+ SizeToBottom =
+ (unsigned int)((queue->base + queue->length) - queue->front);
+
+ if (SizeToBottom > size) {
+ mailbox_memcpy((void *)queue->front, (const void *)data, size);
+
+ queue->front += size;
+ } else {
+ mailbox_memcpy((void *)(queue->front), (const void *)data,
+ SizeToBottom);
+
+ mailbox_memcpy((void *)(queue->base),
+ (const void *)(data + SizeToBottom),
+ (size - SizeToBottom));
+
+ queue->front = (queue->base + size) - SizeToBottom;
+ }
+
+ return (int)size;
+}
+
+MAILBOX_GLOBAL int mailbox_queue_read(struct mb_queue *queue,
+ char *buff, unsigned int size)
+{
+ unsigned int to_bottom;
+
+ to_bottom = (unsigned int)((queue->base + queue->length) - queue->rear);
+
+ if (to_bottom > size) {
+ mailbox_memcpy((void *)buff, (const void *)(queue->rear), size);
+
+ queue->rear += size;
+ } else {
+ mailbox_memcpy((void *)buff, (const void *)(queue->rear),
+ to_bottom);
+
+ mailbox_memcpy((void *)(buff + to_bottom),
+ (const void *)(queue->base), (size - to_bottom));
+
+ queue->rear = (queue->base + size) - to_bottom;
+ }
+
+ return (int)size;
+}
+
+MAILBOX_LOCAL int mailbox_check_mail(struct mb_buff *mbuff,
+ struct mb_mail *msg_head,
+ unsigned long data_addr)
+{
+ unsigned int time_stamp;
+ unsigned int seq_num;
+ int ret_val = MAILBOX_OK;
+ if (MAILBOX_MSGHEAD_NUMBER != msg_head->ulPartition) {
+ ret_val =
+ mailbox_logerro_p1(MAILBOX_CRIT_GUT_MSG_CHECK_FAIL,
+ msg_head->ulMailCode);
+ }
+
+ seq_num = mbuff->seq_num;
+
+ if (MAILBOX_SEQNUM_START == msg_head->ulSeqNum) {
+ ret_val =
+ mailbox_logerro_p1(MAILBOX_INFO_RECEIVE_FIRST_MAIL,
+ msg_head->ulMailCode);
+ } else if ((seq_num + 1) != msg_head->ulSeqNum) {
+ ret_val =
+ mailbox_logerro_p1
+ (MAILBOX_ERR_GUT_MAILBOX_SEQNUM_CHECK_FAIL,
+ msg_head->ulMailCode);
+ }
+
+ mbuff->seq_num = msg_head->ulSeqNum;
+
+ time_stamp = (unsigned int)mailbox_get_timestamp();
+
+ msg_head->ulReadSlice = time_stamp;
+
+ mailbox_record_transport(&(mbuff->mntn), msg_head->ulMailCode,
+ msg_head->ulWriteSlice, msg_head->ulReadSlice,
+ data_addr);
+ return ret_val;
+}
+
+MAILBOX_EXTERN struct mb *mailbox_get_mb(void)
+{
+ if (MAILBOX_INIT_MAGIC == g_mailbox_handle.init_flag) {
+ return &g_mailbox_handle;
+ }
+
+ mailbox_out(("error: mb not init\n"));
+ return MAILBOX_NULL;
+}
+
+MAILBOX_EXTERN struct mb_buff *mailbox_get_channel_handle(struct mb *mailbox,
+ unsigned int mailcode)
+{
+ struct mb_link *link_array = MAILBOX_NULL;
+ struct mb_buff *mbuff = MAILBOX_NULL;
+ unsigned int src_id = 0;
+ unsigned int dst_id = 0;
+ unsigned int carrier_id = 0;
+
+ src_id = mailbox_get_src_id(mailcode);
+ dst_id = mailbox_get_dst_id(mailcode);
+ carrier_id = mailbox_get_carrier_id(mailcode);
+
+ if (src_id == mailbox->local_id) {
+ if (dst_id < MAILBOX_CPUID_BUTT) {
+ link_array = &mailbox->send_tbl[dst_id];
+ } else {
+ (void)
+ mailbox_logerro_p1
+ (MAILBOX_ERR_GUT_INVALID_CHANNEL_ID, mailcode);
+ return MAILBOX_NULL;
+ }
+ } else if (dst_id == mailbox->local_id) {
+ if (src_id < MAILBOX_CPUID_BUTT) {
+ link_array = &mailbox->recv_tbl[src_id];
+ } else {
+ (void)
+ mailbox_logerro_p1
+ (MAILBOX_ERR_GUT_INVALID_CHANNEL_ID, mailcode);
+ return MAILBOX_NULL;
+ }
+ }
+
+ if ((MAILBOX_NULL == link_array) || (0 == link_array->carrier_butt)) {
+ (void)mailbox_logerro_p1(MAILBOX_ERR_GUT_INVALID_CPU_LINK,
+ mailcode);
+ return MAILBOX_NULL;
+ }
+
+ if (carrier_id < link_array->carrier_butt) {
+ mbuff = &link_array->channel_buff[carrier_id];
+ } else {
+ (void)mailbox_logerro_p1(MAILBOX_ERR_GUT_INVALID_CARRIER_ID,
+ mailcode);
+ return MAILBOX_NULL;
+ }
+
+ if (mailbox_get_use_id(mailcode) >=
+ mailbox_get_use_id(mbuff->config->butt_id)) {
+ (void)mailbox_logerro_p1(MAILBOX_ERR_GUT_INVALID_USER_ID,
+ mailcode);
+
+ return MAILBOX_NULL;
+ }
+
+ return mbuff;
+}
+
+MAILBOX_LOCAL int mailbox_request_channel(struct mb *mb,
+ struct mb_buff **mb_buf,
+ unsigned int mailcode)
+{
+ struct mb_head *head = MAILBOX_NULL;
+ struct mb_queue *queue = MAILBOX_NULL;
+ struct mb_buff *mbuff = MAILBOX_NULL;
+ int ret_val = MAILBOX_OK;
+
+ mailbox_dpm_device_get();
+
+ *mb_buf = MAILBOX_NULL;
+ mbuff = mailbox_get_channel_handle(mb, mailcode);
+ if (MAILBOX_NULL == mbuff) {
+ ret_val = (int)MAILBOX_ERRO;
+ goto request_erro;
+ }
+
+ head = (struct mb_head *)mbuff->config->head_addr;
+ if (((MAILBOX_PROTECT1 != head->ulProtectWord1))
+ || (MAILBOX_PROTECT2 != head->ulProtectWord2)
+ || (MAILBOX_PROTECT1 != head->ulProtectWord3)
+ || (MAILBOX_PROTECT2 != head->ulProtectWord4)) {
+ /* do not check cause hifi maybe not pwr on when the first ipc message send */
+
+ mailbox_init_mem(mbuff->config);
+
+ mailbox_logerro_p1(MAILBOX_NOT_READY, mailcode);
+ }
+
+ if (mailbox_get_src_id(mailcode) == mb->local_id) {
+ if (MAILBOX_TRUE == mailbox_int_context()) {
+ ret_val =
+ mailbox_logerro_p1
+ (MAILBOX_ERR_GUT_SEND_MAIL_IN_INT_CONTEXT,
+ mailcode);
+ goto request_erro;
+ } else {
+ if (MAILBOX_OK != mailbox_mutex_lock(&mbuff->mutex)) {
+ ret_val =
+ mailbox_logerro_p1
+ (MAILBOX_CRIT_GUT_MUTEX_LOCK_FAILED,
+ mailcode);
+ goto request_erro;
+ }
+ }
+ }
+
+ mbuff->mailcode = mailcode;
+
+ queue = &mbuff->mail_queue;
+ queue->front = queue->base + head->ulFront * sizeof(unsigned int);
+ queue->rear = queue->base + head->ulRear * sizeof(unsigned int);
+
+ mbuff->mb = mb;
+ *mb_buf = mbuff;
+ return MAILBOX_OK;
+ request_erro:
+ mailbox_out(("###mailbox_request_channel ERR! \n"));
+ mailbox_dpm_device_put();
+ return ret_val;
+}
+
+MAILBOX_LOCAL int mailbox_release_channel(struct mb *mb, struct mb_buff *mbuff)
+{
+ unsigned int channel_id = mbuff->channel_id;
+
+ if (mb->local_id == mailbox_get_src_id(channel_id)) {
+ if (MAILBOX_TRUE == mailbox_int_context()) {
+ } else {
+ mailbox_mutex_unlock(&mbuff->mutex);
+ }
+ }
+
+ mailbox_dpm_device_put();
+ return MAILBOX_OK;
+}
+
+MAILBOX_LOCAL int mailbox_read_mail(struct mb_buff *mbuff)
+{
+ struct mb_mail mail;
+ struct mb_cb *read_cb = MAILBOX_NULL;
+ struct mb_queue tmp_queue;
+ struct mb_queue *usr_queue = MAILBOX_NULL;
+ struct mb_queue *m_queue = MAILBOX_NULL;
+ unsigned int use_id;
+ unsigned int slice_start;
+ unsigned int to_bottom;
+ void *usr_handle;
+ void *usr_data;
+ void (*usr_func) (void *mbuf, void *handle, void *data);
+
+ m_queue = &(mbuff->mail_queue);
+ usr_queue = &(mbuff->usr_queue);
+ mailbox_memcpy((void *)usr_queue, (const void *)m_queue,
+ sizeof(struct mb_queue));
+
+ tmp_queue.base = usr_queue->base;
+ tmp_queue.length = usr_queue->length;
+ tmp_queue.front = usr_queue->rear;
+ tmp_queue.rear = usr_queue->front;
+
+ mailbox_queue_read(usr_queue, (char *)&mail, sizeof(struct mb_mail));
+
+ mailbox_check_mail(mbuff, &mail, m_queue->rear);
+
+ mailbox_queue_write(&tmp_queue, (char *)&mail, sizeof(struct mb_mail));
+ use_id = mailbox_get_use_id(mail.ulMailCode);
+
+ if (use_id >= mailbox_get_use_id(mbuff->config->butt_id)) {
+ (void)mailbox_logerro_p1(MAILBOX_ERR_GUT_INVALID_USER_ID,
+ mail.ulMailCode);
+ goto EXIT;
+ }
+
+ read_cb = &mbuff->read_cb[use_id];
+ mailbox_mutex_lock(&mbuff->mutex);
+ usr_handle = read_cb->handle;
+ usr_data = read_cb->data;
+ usr_func = read_cb->func;
+ mailbox_mutex_unlock(&mbuff->mutex);
+ if (MAILBOX_NULL != usr_func) {
+ usr_queue->size = mail.ulMsgLength;
+ slice_start = (unsigned int)mailbox_get_timestamp();
+ usr_func(mbuff, usr_handle, usr_data);
+
+ mailbox_record_receive(&mbuff->mntn, mail.ulMailCode,
+ slice_start);
+ } else {
+ (void)mailbox_logerro_p1(MAILBOX_ERR_GUT_READ_CALLBACK_NOT_FIND,
+ mail.ulMailCode);
+ }
+
+ EXIT:
+
+ to_bottom =
+ (unsigned int)((m_queue->base + m_queue->length) - m_queue->rear);
+ if (to_bottom > (mail.ulMsgLength + sizeof(struct mb_mail))) {
+ m_queue->rear += (mail.ulMsgLength + sizeof(struct mb_mail));
+ } else {
+ m_queue->rear = m_queue->base + ((mail.ulMsgLength +
+ sizeof(struct mb_mail)) -
+ to_bottom);
+ }
+
+ m_queue->rear = mailbox_align_size(m_queue->rear, MAILBOX_ALIGN);
+
+ return (int)(mailbox_align_size(mail.ulMsgLength, MAILBOX_ALIGN)
+ + sizeof(struct mb_mail));
+
+}
+
+MAILBOX_LOCAL int mailbox_receive_all_mails(struct mb_buff *mbuf)
+{
+
+ struct mb_queue *queue;
+ signed int size_left;
+ unsigned int mail_len = 0;
+ int ret_val = MAILBOX_OK;
+
+ queue = &(mbuf->mail_queue);
+
+ if (queue->front >= queue->rear) {
+ size_left = (signed int)(queue->front - queue->rear);
+ } else {
+ size_left = (signed int)((queue->length) - ((queue->rear)
+ - (queue->front)));
+ }
+
+ while (size_left > 0) {
+ mail_len = (unsigned int)mailbox_read_mail(mbuf);
+ if (0 == mail_len) {
+ ret_val =
+ mailbox_logerro_p1(MAILBOX_CRIT_GUT_READ_MAIL,
+ mbuf->channel_id);
+ }
+ size_left -= (signed int)(mail_len);
+ }
+
+ if (size_left < 0) {
+ return mailbox_logerro_p1(MAILBOX_CRIT_GUT_RECEIVE_MAIL,
+ mbuf->channel_id);
+ }
+ return ret_val;
+}
+
+MAILBOX_LOCAL int mailbox_read_channel(unsigned int channel_id)
+{
+ struct mb_buff *mbuf = MAILBOX_NULL;
+ struct mb *mb = MAILBOX_NULL;
+ int ret_val = MAILBOX_OK;
+
+ mb = mailbox_get_mb();
+
+ if (MAILBOX_NULL == mb) {
+ return (int)MAILBOX_ERRO;
+ }
+
+ if (mb->local_id != mailbox_get_dst_id(channel_id)) {
+ return mailbox_logerro_p1(MAILBOX_ERR_GUT_INVALID_TARGET_CPU,
+ channel_id);
+ }
+
+ ret_val = mailbox_request_channel(mb, &mbuf, channel_id);
+ if (MAILBOX_OK != ret_val) {
+ return ret_val;
+ }
+
+ ret_val = mailbox_receive_all_mails(mbuf);
+
+ mailbox_flush_buff(mbuf);
+
+ if (MAILBOX_OK != mailbox_release_channel(mb, mbuf)) {
+ return mailbox_logerro_p1(MAILBOX_ERR_GUT_RELEASE_CHANNEL,
+ channel_id);
+ }
+
+ return ret_val;
+}
+
+MAILBOX_LOCAL int mailbox_init_mem(struct mb_cfg *config)
+{
+ struct mb_head *head = (struct mb_head *)config->head_addr;
+ unsigned long data_addr = config->data_addr;
+ unsigned int data_size = config->data_size;
+
+ if ((0 == data_addr) || (MAILBOX_NULL == head) || (0 == data_size)) {
+ return mailbox_logerro_p1(MAILBOX_ERR_GUT_INIT_CORESHARE_MEM,
+ config->butt_id);
+ }
+
+ mailbox_memset((signed char *)data_addr, 0, data_size);
+
+ mailbox_write_reg(data_addr, MAILBOX_PROTECT1);
+ mailbox_write_reg(data_addr + MAILBOX_PROTECT_LEN, MAILBOX_PROTECT2);
+
+ mailbox_write_reg((data_addr + data_size) - (2 * MAILBOX_PROTECT_LEN),
+ MAILBOX_PROTECT1);
+ mailbox_write_reg((data_addr + data_size) - MAILBOX_PROTECT_LEN,
+ MAILBOX_PROTECT2);
+
+ head->ulFront = 0;
+ head->ulFrontslice = 0;
+ head->ulRear = head->ulFront;
+ head->ulRearslice = 0;
+ head->ulProtectWord4 = MAILBOX_PROTECT2;
+ head->ulProtectWord3 = MAILBOX_PROTECT1;
+ head->ulProtectWord2 = MAILBOX_PROTECT2;
+ head->ulProtectWord1 = MAILBOX_PROTECT1;
+
+ return MAILBOX_OK;
+}
+
+MAILBOX_LOCAL int mailbox_calculate_space(struct mb *mb,
+ struct mb_cfg *config,
+ unsigned int cpu_id)
+{
+ struct mb_link *send_link = MAILBOX_NULL;
+ struct mb_link *recev_link = MAILBOX_NULL;
+ int ret_val = MAILBOX_OK;
+ unsigned int src_id = 0;
+ unsigned int dst_id = 0;
+ unsigned int carrier_id = 0;
+
+ send_link = mb->send_tbl;
+ recev_link = mb->recv_tbl;
+
+ while (MAILBOX_MAILCODE_INVALID != config->butt_id) {
+ src_id = mailbox_get_src_id(config->butt_id);
+ dst_id = mailbox_get_dst_id(config->butt_id);
+ carrier_id = mailbox_get_carrier_id(config->butt_id);
+
+ if (cpu_id == src_id) {
+ if (dst_id < MAILBOX_CPUID_BUTT) {
+ if ((carrier_id + 1) >
+ send_link[dst_id].carrier_butt) {
+ send_link[dst_id].carrier_butt =
+ (carrier_id + 1);
+ }
+ } else {
+ ret_val =
+ mailbox_logerro_p1
+ (MAILBOX_ERR_GUT_INVALID_CHANNEL_ID,
+ config->butt_id);
+ }
+ }
+ if (cpu_id == dst_id) {
+ if (src_id < MAILBOX_CPUID_BUTT) {
+ if ((carrier_id + 1) >
+ recev_link[src_id].carrier_butt) {
+ recev_link[src_id].carrier_butt =
+ (carrier_id + 1);
+ }
+ } else {
+ ret_val =
+ mailbox_logerro_p1
+ (MAILBOX_ERR_GUT_INVALID_CHANNEL_ID,
+ config->butt_id);
+ }
+ }
+
+ config++;
+ }
+
+ return ret_val;
+}
+
+MAILBOX_LOCAL int mailbox_init_all_handle(struct mb *mb,
+ struct mb_cfg *config,
+ unsigned int cpu_id)
+{
+ struct mb_queue *queue = MAILBOX_NULL;
+ unsigned int direct = MIALBOX_DIRECTION_INVALID;
+ int ret_val = MAILBOX_OK;
+ struct mb_link *send_link = MAILBOX_NULL;
+ struct mb_link *recv_link = MAILBOX_NULL;
+ struct mb_buff *mbuf_prob = &g_mailbox_channel_handle_pool[0];
+ struct mb_cb *cb_prob = &g_mailbox_user_cb_pool[0];
+ struct mb_buff *mbuf_cur = MAILBOX_NULL;
+ unsigned int channel_sum = 0;
+ unsigned int use_sum = 0;
+ unsigned int src_id = 0;
+ unsigned int dst_id = 0;
+ unsigned int carrier_id = 0;
+ unsigned int use_max = 0;
+
+ send_link = mb->send_tbl;
+ recv_link = mb->recv_tbl;
+
+ while (MAILBOX_MAILCODE_INVALID != config->butt_id) {
+ direct = MIALBOX_DIRECTION_INVALID;
+ src_id = mailbox_get_src_id(config->butt_id);
+ dst_id = mailbox_get_dst_id(config->butt_id);
+ carrier_id = mailbox_get_carrier_id(config->butt_id);
+ use_max = mailbox_get_use_id(config->butt_id);
+
+ printk
+ ("******************func = %s line = %d src_id = %d dst_id = %d******************\n",
+ __func__, __LINE__, src_id, dst_id);
+
+ if (cpu_id == src_id) {
+ direct = MIALBOX_DIRECTION_SEND;
+
+ if (MAILBOX_NULL == send_link[dst_id].channel_buff) {
+ send_link[dst_id].channel_buff = mbuf_prob;
+ mbuf_prob += (send_link[dst_id].carrier_butt);
+ channel_sum += (send_link[dst_id].carrier_butt);
+ if (channel_sum > MAILBOX_CHANNEL_NUM) {
+ return
+ mailbox_logerro_p1
+ (MAILBOX_CRIT_GUT_INIT_CHANNEL_POOL_TOO_SMALL,
+ channel_sum);
+ }
+ }
+ mbuf_cur = &send_link[dst_id].channel_buff[carrier_id];
+ }
+
+ if (cpu_id == dst_id) {
+ direct = MIALBOX_DIRECTION_RECEIVE;
+
+ if (MAILBOX_NULL == recv_link[src_id].channel_buff) {
+ recv_link[src_id].channel_buff = mbuf_prob;
+ mbuf_prob += (recv_link[src_id].carrier_butt);
+ channel_sum += (recv_link[src_id].carrier_butt);
+ if (channel_sum > MAILBOX_CHANNEL_NUM) {
+ return
+ mailbox_logerro_p1
+ (MAILBOX_CRIT_GUT_INIT_CHANNEL_POOL_TOO_SMALL,
+ channel_sum);
+ }
+ }
+
+ mbuf_cur = &recv_link[src_id].channel_buff[carrier_id];
+
+ mbuf_cur->read_cb = cb_prob;
+ cb_prob += use_max;
+ use_sum += use_max;
+ if (use_sum > MAILBOX_USER_NUM) {
+ return
+ mailbox_logerro_p1
+ (MAILBOX_CRIT_GUT_INIT_USER_POOL_TOO_SMALL,
+ use_sum);
+ }
+
+ ret_val =
+ mailbox_process_register(mailbox_get_channel_id
+ (config->butt_id),
+ mailbox_read_channel,
+ mbuf_cur);
+
+ ret_val |= mailbox_init_mem(config);
+
+ }
+
+ if ((MIALBOX_DIRECTION_INVALID != direct)
+ && (MAILBOX_NULL != mbuf_cur)) {
+ mbuf_cur->channel_id =
+ mailbox_get_channel_id(config->butt_id);
+ mbuf_cur->seq_num = MAILBOX_SEQNUM_START;
+ mbuf_cur->mntn.peak_traffic_left =
+ MAILBOX_QUEUE_LEFT_INVALID;
+ mbuf_cur->mntn.mbuff = mbuf_cur;
+ mbuf_cur->config = config;
+
+ queue = &(mbuf_cur->mail_queue);
+
+ queue->length = mbuf_cur->config->data_size -
+ (MAILBOX_DATA_LEN_PROTECT_NUM *
+ MAILBOX_PROTECT_LEN);
+ queue->base =
+ mbuf_cur->config->data_addr +
+ (MAILBOX_DATA_BASE_PROTECT_NUM *
+ MAILBOX_PROTECT_LEN);
+
+ ret_val =
+ mailbox_channel_register(mailbox_get_channel_id
+ (config->butt_id),
+ config->int_src,
+ mailbox_get_dst_id(config->
+ butt_id),
+ direct, &mbuf_cur->mutex);
+ }
+
+ config++;
+ }
+
+ /*g_mailbox_global_cfg_tbl[];
+ g_mailbox_channel_handle_pool[MAILBOX_CHANNEL_NUM];
+ g_mailbox_user_cb_pool[MAILBOX_USER_NUM];
+ */
+ if ((unsigned int)MAILBOX_CHANNEL_NUM != channel_sum) {
+ ret_val =
+ mailbox_logerro_p1
+ (MAILBOX_ERR_GUT_INIT_CHANNEL_POOL_TOO_LARGE,
+ ((MAILBOX_CHANNEL_NUM << 16) | channel_sum));
+ }
+
+ if (MAILBOX_USER_NUM != use_sum) {
+ ret_val =
+ mailbox_logerro_p1(MAILBOX_ERR_GUT_INIT_USER_POOL_TOO_LARGE,
+ use_sum);
+ }
+
+ return ret_val;
+}
+
+MAILBOX_LOCAL int mailbox_create_box(struct mb *mb,
+ struct mb_cfg *config, unsigned int cpu_id)
+{
+ mb->local_id = cpu_id;
+
+ printk("<<<<<<<<<<<<<<mailbox_create_box cpu_id = %d>>>>>>>>>>>>>>\n",
+ cpu_id);
+
+ if (MAILBOX_OK != mailbox_calculate_space(mb, config, cpu_id)) {
+ return mailbox_logerro_p0(MAILBOX_ERR_GUT_CALCULATE_SPACE);
+ }
+
+ return mailbox_init_all_handle(mb, config, cpu_id);
+}
+
+void *mem_remap_type(unsigned long phys_addr, size_t size, pgprot_t pgprot)
+{
+ int i;
+ u8 *vaddr;
+ int npages =
+ PAGE_ALIGN((phys_addr & (PAGE_SIZE - 1)) + size) >> PAGE_SHIFT;
+ unsigned long offset = phys_addr & (PAGE_SIZE - 1);
+ struct page **pages;
+ pages = vmalloc(sizeof(struct page *) * npages);
+ if (!pages) {
+ printk(KERN_ERR "%s: vmalloc return NULL!\n", __FUNCTION__);
+ return NULL;
+ }
+ pages[0] = phys_to_page(phys_addr);
+ for (i = 0; i < npages - 1; i++) {
+ pages[i + 1] = pages[i] + 1;
+ }
+ vaddr = (u8 *) vmap(pages, npages, VM_MAP, pgprot);
+ if (vaddr == 0) {
+ printk(KERN_ERR "%s: vmap return NULL!\n", __FUNCTION__);
+ } else {
+ vaddr += offset;
+ }
+ vfree(pages);
+ printk(KERN_DEBUG
+ "%s: phys_addr:0x%08lx size:0x%08lx npages:%d vaddr:%pK offset:0x%08lx\n",
+ __FUNCTION__, phys_addr, (unsigned long)size, npages, vaddr,
+ offset);
+ return (void *)vaddr;
+}
+
+MAILBOX_GLOBAL int mailbox_init(void)
+{
+ unsigned long offset = 0;
+ int i = 0;
+ struct mb_head *head = NULL;
+
+ if (MAILBOX_INIT_MAGIC == g_mailbox_handle.init_flag) {
+ return mailbox_logerro_p1(MAILBOX_ERR_GUT_ALREADY_INIT,
+ g_mailbox_handle.init_flag);
+ }
+
+ printk("func = %s, line = %d baseaddr = 0x%x\n", __func__, __LINE__,
+ (unsigned int)MAILBOX_MEM_BASEADDR);
+
+ g_shareMemBase =
+ mem_remap_nocache(MAILBOX_MEM_BASEADDR, MAILBOX_MEM_LENGTH);
+
+ if (NULL == g_shareMemBase) {
+ printk("func = %s, mmap mailbox mem error!!\n", __func__);
+ return mailbox_logerro_p0(MAILBOX_CRIT_GUT_MEMORY_CONFIG);
+ }
+
+ offset = (unsigned long)g_shareMemBase - MAILBOX_MEM_BASEADDR;
+ for (i = 0; i < MAILBOX_GLOBAL_CHANNEL_NUM; i++) {
+ g_mailbox_global_cfg_tbl[i].head_addr =
+ g_mailbox_global_cfg_tbl[i].head_addr + offset;
+ g_mailbox_global_cfg_tbl[i].data_addr =
+ g_mailbox_global_cfg_tbl[i].data_addr + offset;
+
+ printk("i = %d, head_addr = 0x%lx, data_addr = 0x%lx\n", i,
+ g_mailbox_global_cfg_tbl[i].head_addr,
+ g_mailbox_global_cfg_tbl[i].data_addr);
+
+ head = (struct mb_head *)g_mailbox_global_cfg_tbl[i].head_addr;
+ head->ulFront = 0x0;
+ head->ulRear = 0x0;
+ head->ulFrontslice = 0x0;
+ head->ulRearslice = 0x0;
+ }
+
+ mailbox_memset(&g_mailbox_handle, 0x00, sizeof(struct mb));
+
+ if ((MAILBOX_HEAD_BOTTOM_ADDR >
+ (MAILBOX_MEM_BASEADDR + MAILBOX_MEM_HEAD_LEN))
+ || (MAILBOX_MEMORY_BOTTOM_ADDR >
+ (MAILBOX_MEM_BASEADDR + MAILBOX_MEM_LENGTH))) {
+ mailbox_out(("mailbox address overflow: headbuttom valid(0x%lx), config(0x%x)!\n\
+ \r databuttom valid(0x%x), config(0x%x)!" RT,
+ (MAILBOX_MEM_BASEADDR + MAILBOX_MEM_HEAD_LEN), (unsigned int)MAILBOX_HEAD_BOTTOM_ADDR, (unsigned int)(MAILBOX_MEM_BASEADDR + MAILBOX_MEM_LENGTH),
+ (unsigned int)MAILBOX_MEMORY_BOTTOM_ADDR));
+ return mailbox_logerro_p0(MAILBOX_CRIT_GUT_MEMORY_CONFIG);
+ }
+
+ if (MAILBOX_OK != mailbox_create_box(&g_mailbox_handle,
+ &g_mailbox_global_cfg_tbl[0],
+ MAILBOX_LOCAL_CPUID)) {
+ return mailbox_logerro_p0(MAILBOX_ERR_GUT_CREATE_BOX);
+ }
+
+ if (MAILBOX_OK != mailbox_init_platform()) {
+ return mailbox_logerro_p0(MAILBOX_ERR_GUT_INIT_PLATFORM);
+ }
+
+ g_mailbox_handle.init_flag = MAILBOX_INIT_MAGIC;
+
+/*mailbox_ifc_test_init();*/
+
+/*fixme: there isn't go to unremap*/
+ g_slice_reg = (void *)ioremap(SLICE_REG, 0x4);
+ if (NULL == g_slice_reg) {
+ printk("ioremap of slice reg fail.\n");
+ }
+
+ mailbox_out(("mb init OK!\n"));
+
+ return MAILBOX_OK;
+}
+
+MAILBOX_EXTERN int mailbox_register_cb(unsigned int mailcode,
+ void (*cb) (void *mbuf, void *handle,
+ void *data),
+ void *usr_handle, void *usr_data)
+{
+ struct mb_cb *read_cb = MAILBOX_NULL;
+ struct mb_buff *mbuf = MAILBOX_NULL;
+ struct mb *mb = MAILBOX_NULL;
+ unsigned int dst_id;
+
+ mb = &g_mailbox_handle;
+
+ dst_id = mailbox_get_dst_id(mailcode);
+
+ printk("mb->local_id = %d dst_id = %d\n", mb->local_id, dst_id);
+
+ if (mb->local_id != dst_id) {
+ return mailbox_logerro_p1(MAILBOX_ERR_GUT_INVALID_TARGET_CPU,
+ mailcode);
+ }
+
+ mbuf = mailbox_get_channel_handle(mb, mailcode);
+ if (MAILBOX_NULL == mbuf) {
+ return mailbox_logerro_p1(MAILBOX_ERR_GUT_INVALID_CHANNEL_ID,
+ mailcode);
+ }
+
+ read_cb = &mbuf->read_cb[mailbox_get_use_id(mailcode)];
+ if (MAILBOX_NULL != read_cb->func) {
+ /*mailbox_logerro_p1(MAILBOX_WARNING_USER_CALLBACK_ALREADY_EXIST, mailcode); */
+ }
+
+ mailbox_mutex_lock(&mbuf->mutex);
+ read_cb->handle = usr_handle;
+ read_cb->data = usr_data;
+ read_cb->func = cb;
+ mailbox_mutex_unlock(&mbuf->mutex);
+
+ return MAILBOX_OK;
+}
+
+MAILBOX_EXTERN int mailbox_init_usr_queue(struct mb_buff *mb_buf)
+{
+ unsigned int hsize = sizeof(struct mb_mail);
+ struct mb_queue *usrqu = &mb_buf->usr_queue;
+
+ mailbox_memcpy((void *)usrqu, (const void *)&mb_buf->mail_queue,
+ sizeof(struct mb_queue));
+ if (hsize >
+ mailbox_queue_left(usrqu->rear, usrqu->front, usrqu->length)) {
+ return mailbox_logerro_p1(MAILBOX_FULL, mb_buf->mailcode);
+ }
+
+ if (hsize + usrqu->front >= usrqu->base + usrqu->length) {
+ usrqu->front = usrqu->front + hsize - usrqu->length;
+ } else {
+ usrqu->front = usrqu->front + hsize;
+ }
+ return MAILBOX_OK;
+}
+
+MAILBOX_EXTERN int mailbox_request_buff(unsigned int mailcode, void *mb_buf)
+{
+ struct mb *mailbox = MAILBOX_NULL;
+ int ret_val = MAILBOX_OK;
+ struct mb_buff **mbuf = (struct mb_buff **)mb_buf;
+
+ mailbox = mailbox_get_mb();
+ *mbuf = MAILBOX_NULL;
+ if (MAILBOX_NULL == mailbox) {
+ return mailbox_logerro_p1(MAILBOX_ERR_GUT_INPUT_PARAMETER,
+ mailcode);
+ }
+
+ if (mailbox->local_id != mailbox_get_src_id(mailcode)) {
+ return mailbox_logerro_p1(MAILBOX_ERR_GUT_INVALID_SRC_CPU,
+ mailcode);
+ }
+
+ ret_val = mailbox_request_channel(mailbox, mbuf, mailcode);
+ if (MAILBOX_OK == ret_val) {
+ (*mbuf)->mb = mailbox;
+ ret_val = mailbox_init_usr_queue(*mbuf);
+ if (MAILBOX_OK == ret_val) {
+ return MAILBOX_OK;
+ } else {
+ mailbox_release_channel(mailbox, *mbuf);
+ *mbuf = MAILBOX_NULL;
+ return ret_val;
+ }
+ } else {
+ return ret_val;
+ }
+}
+
+MAILBOX_EXTERN int mailbox_write_buff(struct mb_queue *queue,
+ char *data, unsigned int size)
+{
+ if ((size <=
+ mailbox_queue_left(queue->rear, queue->front, queue->length))
+ && (size + sizeof(struct mb_mail) <=
+ mailbox_queue_left(queue->rear, queue->front, queue->length))
+ ) {
+ return mailbox_queue_write(queue, (char *)data, size);
+ }
+ return 0;
+}
+
+MAILBOX_EXTERN int mailbox_read_buff(struct mb_queue *queue,
+ char *data, unsigned int size)
+{
+ return mailbox_queue_read(queue, (char *)data, size);
+}
+
+MAILBOX_EXTERN int mailbox_sealup_buff(struct mb_buff *mb_buf,
+ unsigned int usr_size)
+{
+
+ struct mb_mail mail = { 0 };
+ struct mb_queue *m_queue = MAILBOX_NULL;
+ unsigned int time_stamp;
+ unsigned long mail_addr;
+
+ m_queue = &mb_buf->mail_queue;
+
+ if (usr_size > (mb_buf->config->single_max - sizeof(struct mb_mail))) {
+ return (int)
+ mailbox_logerro_p2(MAILBOX_ERR_GUT_WRITE_EXCEED_MAX_SIZE,
+ usr_size, mb_buf->mailcode);
+ } else {
+ }
+
+ time_stamp = (unsigned int)mailbox_get_timestamp();
+ mail.ulPartition = MAILBOX_MSGHEAD_NUMBER;
+ mail.ulWriteSlice = time_stamp;
+ mail.ulReadSlice = 0;
+ mail.ulPriority = 0;
+ mail.ulSeqNum = mb_buf->seq_num;
+ mail.ulMsgLength = usr_size;
+ mail.ulMailCode = mb_buf->mailcode;
+
+ if (MAILBOX_SEQNUM_START == mb_buf->seq_num) {
+ (void)mailbox_logerro_p1(MAILBOX_INFO_SEND_FIRST_MAIL,
+ mb_buf->mailcode);
+ }
+
+ mb_buf->seq_num++;
+
+ mail_addr = m_queue->front;
+ mailbox_queue_write(m_queue, (char *)(&mail), sizeof(struct mb_mail));
+
+ m_queue->front =
+ mailbox_align_size(mb_buf->usr_queue.front, MAILBOX_ALIGN);
+
+ mailbox_flush_buff(mb_buf);
+
+ mailbox_record_send(&(mb_buf->mntn), mb_buf->mailcode, time_stamp,
+ mail_addr);
+
+ return mailbox_init_usr_queue(mb_buf);
+}
+
+MAILBOX_EXTERN int mailbox_flush_buff(struct mb_buff *mbuff)
+{
+ struct mb_head *head = MAILBOX_NULL;
+ struct mb_queue *queue = MAILBOX_NULL;
+ unsigned int channel_id = mbuff->channel_id;
+ struct mb *mb = mbuff->mb;
+ head = (struct mb_head *)mbuff->config->head_addr;
+ queue = &mbuff->mail_queue;
+
+ if (mb->local_id == mailbox_get_src_id(channel_id)) {
+ head->ulFront =
+ (unsigned int)(queue->front -
+ queue->base) / sizeof(unsigned int);
+ head->ulFrontslice = (unsigned int)mailbox_get_timestamp();
+
+ } else if (mb->local_id == mailbox_get_dst_id(channel_id)) {
+ head->ulRear =
+ (unsigned int)(queue->rear -
+ queue->base) / sizeof(unsigned int);
+ head->ulRearslice = (unsigned int)mailbox_get_timestamp();
+ }
+
+ return MAILBOX_OK;
+}
+
+MAILBOX_EXTERN int mailbox_send_buff(struct mb_buff *mbuf)
+{
+ return mailbox_delivery(mbuf->channel_id);
+}
+
+MAILBOX_EXTERN int mailbox_release_buff(struct mb_buff *mbuf)
+{
+ return mailbox_release_channel(mbuf->mb, mbuf);
+}
+
+MAILBOX_EXTERN unsigned long mailbox_virt_to_phy(unsigned long virt_addr)
+{
+ unsigned long offset = 0;
+ if (NULL == g_shareMemBase) {
+ g_shareMemBase =
+ ioremap(MAILBOX_MEM_BASEADDR, MAILBOX_MEM_LENGTH);
+ }
+ offset = (unsigned long)g_shareMemBase - MAILBOX_MEM_BASEADDR;
+
+ return (virt_addr - offset);
+}
+
+MAILBOX_EXTERN unsigned long mailbox_phy_to_virt(unsigned long phy_addr)
+{
+ unsigned long offset = 0;
+ if (NULL == g_shareMemBase) {
+ g_shareMemBase =
+ ioremap(MAILBOX_MEM_BASEADDR, MAILBOX_MEM_LENGTH);
+ }
+ offset = (unsigned long)g_shareMemBase - MAILBOX_MEM_BASEADDR;
+
+ return (phy_addr + offset);
+}
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_gut.h b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_gut.h
new file mode 100644
index 000000000000..bbafa30942d1
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_gut.h
@@ -0,0 +1,170 @@
+#ifndef __DRV_MAILBOX_GUT_H__
+#define __DRV_MAILBOX_GUT_H__
+
+#include "drv_mailbox_platform.h"
+#include "drv_mailbox_debug.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif
+
+#define MAILBOX_GLOBAL_CHANNEL_NUM (4)
+
+#define MAILBOX_SINGLE_ID_MASK (0xFF)
+#define mailbox_get_channel_id(id) ((id) & (~MAILBOX_SINGLE_ID_MASK))
+
+#define mailbox_get_src_id(id) (unsigned char)(((id) >> MAILBOX_ID_SRC_CPU_OFFSET) & MAILBOX_SINGLE_ID_MASK)
+
+#define mailbox_get_dst_id(id) (unsigned char)(((id) >> MAILBOX_ID_DST_CPU_OFFSET) & MAILBOX_SINGLE_ID_MASK)
+
+#define mailbox_get_carrier_id(id) (unsigned char)(((id) >> MAILBOX_ID_CHANNEL_OFFSET) & MAILBOX_SINGLE_ID_MASK)
+
+#define mailbox_get_use_id(id) (unsigned char)((id) & MAILBOX_SINGLE_ID_MASK)
+
+#define MAILBOX_INIT_MAGIC (0x87654312)
+
+#define MAILBOX_ALIGN sizeof(unsigned int)
+#define mailbox_align_size(a, p) (((a)+((p)-1)) & ~((p)-1))
+
+#define mailbox_write_reg(Addr, Value) (*((volatile unsigned int *)(Addr)) = Value)
+
+#define MAILBOX_CPU_NUM (MAILBOX_CPUID_BUTT)
+
+#define MAILBOX_CHANNEL_BUTT(src, dst) (MAILBOX_CHANNEL_##src##2##dst##_BUTT & 0xff) /*& 0xff Ïûpclint */
+
+#define MAILBOX_USER_BUTT(src, dst, channel) \
+ ((unsigned int)MAILBOX_MAILCODE_ITEM_END(src, dst, channel) & 0xff)
+
+#define MAILBOX_CHANNEL_COMPOSE(src, dst, ch) \
+ /*channel*/ \
+ {(unsigned int)MAILBOX_MAILCODE_ITEM_END(src, dst, ch), \
+ /*DataSize*/ \
+ (unsigned int)MAILBOX_QUEUE_SIZE(src, dst, ch), \
+ /*SingleMax*/ \
+ (unsigned int)MAILBOX_MAILSIZE_MAX(src, dst, ch), \
+ /*HeadAddr*/ \
+ (unsigned long)MAILBOX_HEAD_ADDR(src, dst, ch), \
+ /*DataAddr*/ \
+ (unsigned long)MAILBOX_QUEUE_ADDR(src, dst, ch),\
+ /*IPC INT NUMBER*/ \
+ (unsigned int)MAILBOX_IPC_INT_NUM(src, dst, ch)}
+
+#define MAILBOX_USER_QUEUE(mbuf) (&((struct mb_buff *)(mbuf))->usr_queue)
+
+#define mailbox_queue_left(Rear, Front, Length) \
+ (((Rear) > (Front)) ? (((Rear) - (Front)) - sizeof(unsigned int)) : \
+ (((Length) - ((Front) - (Rear))) - sizeof(unsigned int)))
+
+ typedef struct mb_queue {
+ unsigned long base;
+ unsigned int length;
+ unsigned long front;
+ unsigned long rear;
+ unsigned int size;
+ } mailbox_queue_stru;
+
+ struct mailbox_id_stru {
+ signed char UseId;
+ signed char Channel;
+ signed char DstCpu;
+ signed char SrcCpu;
+ };
+
+ struct mb_cb {
+ void (*func) (void *mbuf, void *handle, void *data);
+ void *handle;
+ void *data;
+ };
+
+ typedef struct mb_cfg {
+ unsigned int butt_id;
+ unsigned int data_size;
+ unsigned int single_max;
+ unsigned long head_addr;
+ unsigned long data_addr;
+ unsigned int int_src;
+ } MAILBOX_CHANNEL_CFG_STRU;
+
+ struct mb_buff {
+ unsigned int channel_id;
+ unsigned int mailcode;
+ unsigned int seq_num;
+ void *mutex;
+ struct mb *mb;
+ struct mb_cb *read_cb;
+ struct mb_cfg *config;
+#ifdef MAILBOX_OPEN_MNTN
+ struct mb_mntn mntn;
+#endif
+ struct mb_queue mail_queue;
+ struct mb_queue usr_queue;
+ };
+
+ typedef struct mb_link {
+ unsigned int carrier_butt;
+ struct mb_buff *channel_buff;
+ } MAILBOX_LINK_STRU;
+
+ typedef struct mb {
+ unsigned int local_id;
+ unsigned int init_flag;
+ unsigned int log_prob;
+ struct mb_log log_array[MAILBOX_ERRO_ARRAY_NUM];
+ struct mb_link send_tbl[MAILBOX_CPUID_BUTT];
+ struct mb_link recv_tbl[MAILBOX_CPUID_BUTT];
+ } MAILBOX_STRU;
+
+ extern MAILBOX_EXTERN struct mb_cfg g_mailbox_global_cfg_tbl[];
+
+ extern MAILBOX_EXTERN struct mb_buff
+ g_mailbox_channel_handle_pool[MAILBOX_CHANNEL_NUM];
+
+ extern MAILBOX_EXTERN struct mb_cb
+ g_mailbox_user_cb_pool[MAILBOX_USER_NUM];
+
+ extern MAILBOX_EXTERN struct mb g_mailbox_handle;
+ MAILBOX_EXTERN struct mb *mailbox_get_mb(void);
+
+ MAILBOX_EXTERN struct mb_buff *mailbox_get_channel_handle(struct mb *mb,
+ unsigned int
+ mailcode);
+
+ MAILBOX_EXTERN int mailbox_register_cb(unsigned int mail_code,
+ void (*cb) (void *mbuf,
+ void *handle,
+ void *data),
+ void *usr_handle,
+ void *usr_data);
+
+ MAILBOX_EXTERN int mailbox_request_buff(unsigned int mailcode,
+ void *mbuf);
+
+ MAILBOX_EXTERN int mailbox_sealup_buff(struct mb_buff *mb_buf,
+ unsigned int usr_size);
+
+ MAILBOX_EXTERN int mailbox_flush_buff(struct mb_buff *mbuff);
+
+ MAILBOX_EXTERN int mailbox_send_buff(struct mb_buff *mb_buf);
+
+ MAILBOX_EXTERN int mailbox_release_buff(struct mb_buff *mb_buf);
+
+ MAILBOX_EXTERN int mailbox_write_buff(struct mb_queue *queue,
+ char *data, unsigned int size);
+
+ MAILBOX_EXTERN int mailbox_read_buff(struct mb_queue *queue,
+ char *data, unsigned int size);
+
+ MAILBOX_EXTERN unsigned long mailbox_virt_to_phy(unsigned long
+ virt_addr);
+
+ MAILBOX_EXTERN unsigned long mailbox_phy_to_virt(unsigned long
+ phy_addr);
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif
+#endif /* end of drv_mailbox.h */
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_ifc.c b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_ifc.c
new file mode 100644
index 000000000000..fa2fae3e054e
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_ifc.c
@@ -0,0 +1,385 @@
+#include "drv_mailbox_cfg.h"
+#include "drv_mailbox_debug.h"
+#include "drv_mailbox_gut.h"
+#include "drv_mailbox_ifc.h"
+#include "drv_mailbox_msg.h"
+
+#undef _MAILBOX_FILE_
+#define _MAILBOX_FILE_ "ifc"
+/*extern int BSP_CPU_StateGet(int CpuID);*/
+int mailbox_ifc_get_rcode(unsigned int scode)
+{
+ unsigned int src_id = mailbox_get_src_id(scode);
+ unsigned int dst_id = mailbox_get_dst_id(scode);
+ /*unsigned long carrier_id = mailbox_get_carrier_id(scode); */
+
+ return (int)((dst_id << MAILBOX_ID_SRC_CPU_OFFSET)
+ | (src_id << MAILBOX_ID_DST_CPU_OFFSET)
+ /*|(carrier_id << MAILBOX_ID_CHANNEL_OFFSET) */
+ );
+}
+
+int mailbox_ifc_buf_pad(struct ifc_mb *ifc_b, unsigned int datalen)
+{
+ struct mb_queue *que = (MAILBOX_USER_QUEUE(ifc_b->mbuf));
+
+ unsigned int talsize;
+
+ talsize = sizeof(struct ifc_head) + datalen;
+ /*ifc_b->head.lmsg = talsize; */
+
+ if ((que->front + sizeof(struct ifc_head)) >= que->base + que->length) {
+ /*ifc_b->head.data = que->front + sizeof(struct ifc_head) - que->length; */
+ ifc_b->data_vitrual_addr =
+ que->front + sizeof(struct ifc_head) - que->length;
+ } else {
+ if ((que->front + talsize) >= que->base + que->length) {
+ /*ifc_b->head.data = que->base; */
+ ifc_b->data_vitrual_addr = que->base;
+ talsize += (unsigned int)((que->base + que->length) -
+ (que->front +
+ sizeof(struct ifc_head)));
+ } else {
+ /*ifc_b->head.data = que->front + sizeof(struct ifc_head); */
+ ifc_b->data_vitrual_addr =
+ que->front + sizeof(struct ifc_head);
+ }
+ }
+
+ if (talsize >
+ (unsigned int)mailbox_queue_left(que->rear, que->front,
+ que->length)) {
+
+ return mailbox_logerro_p1(MAILBOX_FULL, datalen);
+ }
+
+ ifc_b->total_size = talsize;
+ ifc_b->data_size = datalen;
+ return MAILBOX_OK;
+}
+
+int mailbox_ifc_send_buf(struct ifc_mb *ifc_b)
+{
+ int ret_val = 0;
+ unsigned long ifc_data = 0;
+ struct mb_queue *que = (MAILBOX_USER_QUEUE(ifc_b->mbuf));
+
+ ifc_data = ifc_b->data_vitrual_addr;
+
+ ifc_b->head.data_phy_addr =
+ (unsigned int)mailbox_virt_to_phy(ifc_b->data_vitrual_addr);
+
+ mailbox_write_buff(que, (char *)&ifc_b->head, sizeof(struct ifc_head));
+
+ que->front = ifc_data + ifc_b->data_size;
+
+ if (MAILBOX_OK == mailbox_sealup_buff(ifc_b->mbuf, ifc_b->total_size)) {
+ ret_val = mailbox_send_buff(ifc_b->mbuf);
+ } else {
+ ret_val = (int)MAILBOX_ERRO;
+ }
+
+ return ret_val;
+}
+
+void mailbox_ifc_waiter(void *mbuf, void *usrhandle, void *data)
+{
+ struct ifc_mb *ifc_b = (struct ifc_mb *)usrhandle;
+ struct mb_queue *queue = MAILBOX_USER_QUEUE(mbuf);
+ struct ifc_head head_r;
+
+ int i;
+
+ mailbox_read_buff(queue, (char *)&head_r, sizeof(struct ifc_head));
+
+ if (head_r.stamp == ifc_b->head.stamp) {
+ for (i = 0; i < IFC_MAX_ARG; i++) {
+ if (ifc_b->argo[i].addr) {
+ mailbox_memcpy((void *)(unsigned long)ifc_b->
+ argo[i].addr,
+ (const void *)
+ mailbox_phy_to_virt((unsigned
+ long)
+ (head_r.
+ argv[i])),
+ (ifc_b->argo[i].size));
+ }
+ }
+
+ ifc_b->head.retval = head_r.retval;
+ mailbox_complete(&ifc_b->waitsem);
+ }
+}
+
+int mailbox_ifc_send_wait(struct ifc_mb *ifc_b, unsigned int timeout)
+{
+ unsigned int rcode = ifc_b->head.rcode;
+
+ if (MAILBOX_OK != mailbox_register_cb(rcode, mailbox_ifc_waiter, ifc_b,
+ MAILBOX_NULL)) {
+ goto wait_exit;
+ }
+
+ ifc_b->waitsem = mailbox_init_completion();
+ if (MAILBOX_NULL == ifc_b->waitsem) {
+ goto wait_exit;
+ }
+ if (MAILBOX_OK != mailbox_ifc_send_buf(ifc_b)) {
+ goto wait_exit;
+ }
+
+ if (MAILBOX_OK != mailbox_wait_completion(&ifc_b->waitsem, timeout)) {
+ ifc_b->head.retval = MAILBOX_TIME_OUT;
+
+ mailbox_register_cb(rcode, MAILBOX_NULL, MAILBOX_NULL,
+ MAILBOX_NULL);
+ }
+
+ wait_exit:
+ mailbox_del_completion(&ifc_b->waitsem);
+ mailbox_release_buff(ifc_b->mbuf);
+
+ return (int)(ifc_b->head.retval);
+}
+
+int mailbox_ifc_send_no_wait(struct ifc_mb *ifc_b)
+{
+ int ret = mailbox_ifc_send_buf(ifc_b);
+
+ ret |= mailbox_release_buff(ifc_b->mbuf);
+
+ return ret;
+}
+
+int mailbox_ifc_discard_buf(struct ifc_mb *ifc_b)
+{
+ int ret = mailbox_release_buff(ifc_b->mbuf);
+ return ret;
+}
+
+void mailbox_ifc_executer(void *mbuf, void *handle, void *data)
+{
+ int (*ife) (struct ifc_head *ifc_h) = handle;
+ struct ifc_head head;
+
+ struct mb_queue *queue = MAILBOX_USER_QUEUE(mbuf);
+ (void)mailbox_read_buff(queue, (char *)&head, sizeof(struct ifc_head));
+
+ /*head.data = mailbox_phy_to_virt(head.data); */
+ if (ife) {
+ ife(&head);
+ }
+}
+
+int mailbox_ifc_register_exec(unsigned int mailcode,
+ int (*ife_cb) (struct ifc_head *ifc_h)
+ )
+{
+ return mailbox_register_cb(mailcode, mailbox_ifc_executer,
+ (void *)ife_cb,
+ (void *)(unsigned long)mailcode);
+}
+
+int mailbox_ifc_init_buf(struct ifc_mb *ifc_b,
+ unsigned int mailcode,
+ unsigned int direct,
+ unsigned int inlen, unsigned int timeout)
+{
+ int ret_val = MAILBOX_OK;
+ int try_times = 0;
+ unsigned int go_on = MAILBOX_FALSE;
+
+ mailbox_memset(ifc_b, 0x00, sizeof(struct ifc_mb));
+ ifc_b->head.scode = mailcode;
+ ifc_b->head.retval = MAILBOX_ERRO;
+
+ ret_val = (int)BSP_CPU_StateGet(mailbox_get_dst_id(mailcode));
+ if (!ret_val) {
+ return (int)MAILBOX_TARGET_NOT_READY;
+ }
+
+ if (timeout) {
+ ifc_b->head.needret = MAILBOX_TRUE;
+ }
+
+ if (IFC_LAUNCH == direct) {
+ ifc_b->head.rcode =
+ (unsigned int)mailbox_ifc_get_rcode(mailcode);
+ ifc_b->head.stamp = (unsigned int)mailbox_get_timestamp();
+ } else if (IFC_RESPONSE == direct) {
+ ifc_b->head.rcode = 0;
+ }
+
+ do {
+ ret_val = mailbox_request_buff(mailcode, (void *)&ifc_b->mbuf);
+ if (ifc_b->mbuf) {
+ ret_val = mailbox_ifc_buf_pad(ifc_b, inlen);
+ }
+
+ if ((int)MAILBOX_FULL == ret_val) {
+ go_on =
+ (unsigned int)
+ mailbox_scene_delay(MAILBOX_DELAY_SCENE_IFC_FULL,
+ &try_times);
+ if (ifc_b->mbuf) {
+ mailbox_release_buff(ifc_b->mbuf);
+ ifc_b->mbuf = MAILBOX_NULL;
+ }
+ } else {
+ go_on = MAILBOX_FALSE;
+ }
+
+ } while (MAILBOX_TRUE == go_on);
+
+ if (MAILBOX_OK != ret_val) {
+ if (MAILBOX_NULL != ifc_b->mbuf) {
+ mailbox_release_buff(ifc_b->mbuf);
+ }
+ /*mailbox_show(mailcode,0); */
+ /*mailbox_assert(mailcode); */
+ return mailbox_logerro_p1(ret_val, mailcode);
+ }
+
+ return MAILBOX_OK;
+}
+
+void __ifc_arg_out(struct ifc_arg **p_ary, unsigned int db, unsigned long ab,
+ unsigned int sb, unsigned int af)
+{
+ struct ifc_arg *ary = *p_ary;
+ if ((IFC_OUT & db) || (IFC_BI & db)) {
+ ary->addr = ab;
+ if (IFC_FIX & db) {
+ ary->size = sb;
+ } else if (IFC_VAR & db) {
+ ary->size = af;
+ }
+ }
+ *p_ary = *p_ary + 1;
+}
+
+void __ifc_push_arg(unsigned long *in, unsigned int **p_av, unsigned int db,
+ unsigned long ab, unsigned int sb, unsigned int af)
+{
+
+ unsigned int *av = *p_av;
+ *av = ab;
+ if ((IFC_IN & db) || (IFC_BI & db)) {
+ if (IFC_CNT & db) {
+ *(unsigned int *)(*in) = (unsigned int)ab;
+ *in += ifc_align_size(sizeof(unsigned int), IFC_ALIGN);
+ } else if (IFC_FIX & db) {
+ mailbox_memcpy((void *)*in, (const void *)ab, sb);
+ *in += ifc_align_size(sb, IFC_ALIGN);
+ } else if (IFC_VAR & db) {
+ mailbox_memcpy((void *)*in, (const void *)ab, af);
+ *in += ifc_align_size(af, IFC_ALIGN);
+ }
+ }
+ *p_av = *p_av + 1;
+}
+
+int __ifc_in_size(unsigned int db, unsigned int sb, unsigned int af)
+{
+ if ((IFC_IN & db) || (IFC_BI & db)) {
+ if (IFC_CNT & db) {
+ return ifc_align_size(sizeof(unsigned int), IFC_ALIGN);
+ } else if (IFC_FIX & db) {
+ return (int)ifc_align_size(sb, IFC_ALIGN);
+ } else if (IFC_VAR & db) {
+ return (int)ifc_align_size(af, IFC_ALIGN);
+ }
+ }
+ return 0;
+}
+
+int __ifc_out_size(unsigned int db, unsigned int sb, unsigned int **pp_a)
+{
+ unsigned int size = 0;
+ if ((IFC_OUT & db) || (IFC_BI & db)) {
+ if (IFC_FIX & db) {
+ size = ifc_align_size(sb, IFC_ALIGN);
+ } else if (IFC_VAR & db) {
+ unsigned int v_sz = *(unsigned int *)(*pp_a + 1);
+ size = ifc_align_size(v_sz, IFC_ALIGN);
+ }
+ }
+ *pp_a = *pp_a + 1;
+ return (int)size;
+}
+
+long __ifc_gen_arg(unsigned long *in, unsigned long *out, unsigned int db,
+ unsigned int sb, unsigned int **pp_a)
+{
+ unsigned long ret_val = 0;
+ unsigned int v_sz = 0;
+ if (IFC_VAR & db) {
+ v_sz = *(unsigned int *)(*pp_a + 1);
+ }
+
+ if (IFC_IN & db) {
+ if (IFC_CNT & db) {
+ ret_val = *(unsigned int *)*in;
+ *in = *in + sizeof(unsigned int);
+ } else if (IFC_FIX & db) {
+ ret_val = *in;
+ *in = *in + ifc_align_size(sb, IFC_ALIGN);
+ } else if (IFC_VAR & db) {
+ ret_val = *in;
+ *in = *in + ifc_align_size(v_sz, IFC_ALIGN);
+ }
+ } else if (IFC_BI & db) {
+ if (IFC_FIX & db) {
+ ret_val = *in;
+ *in = *in + ifc_align_size(sb, IFC_ALIGN);
+ *out = *out + ifc_align_size(sb, IFC_ALIGN);
+ } else if (IFC_VAR & db) {
+ ret_val = *in;
+ *in = *in + ifc_align_size(v_sz, IFC_ALIGN);
+ *out = *out + ifc_align_size(v_sz, IFC_ALIGN);
+ }
+ } else if (IFC_OUT & db) {
+ ret_val = *out;
+ if (IFC_FIX & db) {
+ *out = *out + ifc_align_size(sb, IFC_ALIGN);
+ } else if (IFC_VAR & db) {
+ *out = *out + ifc_align_size(v_sz, IFC_ALIGN);
+ }
+ }
+ *pp_a = *pp_a + 1;
+ return (long)ret_val;
+}
+
+void __ifc_copy_arg(unsigned long *in, unsigned long *out, unsigned int db,
+ unsigned int sb, unsigned int **p_av, unsigned int **p_ao)
+{
+ unsigned int cpsize = 0;
+ unsigned int *av = *p_av;
+ unsigned int *ao = *p_ao;
+
+ *ao = 0;
+ if (IFC_FIX & db) {
+ cpsize = ifc_align_size(sb, IFC_ALIGN);
+ } else if (IFC_VAR & db) {
+ unsigned int v_sz = *(av + 1);
+ cpsize = ifc_align_size(v_sz, IFC_ALIGN);
+ } else if (IFC_CNT & db) {
+ cpsize = sizeof(unsigned int);
+ }
+
+ if (IFC_BI & db) {
+ mailbox_memcpy((void *)(*out), (const void *)(*in),
+ (unsigned int)cpsize);
+ *ao = (unsigned int)mailbox_virt_to_phy(*out);
+ *out += cpsize;
+ *in += cpsize;
+ } else if (IFC_IN & db) {
+ *in += cpsize;
+ } else if (IFC_OUT & db) {
+ *ao = (unsigned int)mailbox_virt_to_phy(*out);
+ *out += cpsize;
+ }
+
+ *p_av = *p_av + 1;
+ *p_ao = *p_ao + 1;
+}
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_ifc.h b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_ifc.h
new file mode 100644
index 000000000000..2cdd1f5ca8ba
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_ifc.h
@@ -0,0 +1,296 @@
+#ifndef __DRV_MAILBOX_IFC_H__
+#define __DRV_MAILBOX_IFC_H__
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif
+
+#define IFC_IN 0x1
+#define IFC_OUT 0x2
+#define IFC_BI 0x4
+
+#define IFC_CNT (0x1 << 8)
+#define IFC_FIX (0x2 << 8)
+#define IFC_VAR (0x4 << 8)
+
+#define IFC_INCNT (IFC_IN | IFC_CNT)
+#define IFC_INFIX (IFC_IN | IFC_FIX)
+#define IFC_OUTFIX (IFC_OUT | IFC_FIX)
+#define IFC_BIFIX (IFC_BI | IFC_FIX)
+#define IFC_INVAR (IFC_IN | IFC_VAR)
+#define IFC_OUTVAR (IFC_OUT | IFC_VAR)
+#define IFC_BIVAR (IFC_BI | IFC_VAR)
+
+#define IFC_MAX_ARG (6)
+
+#define IFC_LAUNCH 0
+#define IFC_RESPONSE 1
+
+#define IFC_INLINE static __inline
+
+#define IFC_WAIT_FOREVER (0xffffffff)
+
+#ifdef _DRV_LLT_
+#define IFC_DEBUG_PARAM
+#endif
+
+#ifdef IFC_DEBUG_PARAM
+#define _ifc_chk_arg(ph, db, sb) __ifc_chk_arg(ph, db, sb)
+#define _ifc_dbg_arg(ph, db, sb, df) __ifc_dbg_arg(ph, db, sb, df)
+#else
+#define _ifc_chk_arg(ph, db, sb)
+#define _ifc_dbg_arg(ph, db, sb, df)
+#endif
+
+#define IFC_ALIGN sizeof(unsigned int)
+#define ifc_align_size(a, p) (((a)+((p)-1)) & ~((p)-1))
+
+#ifdef _DRV_LLT_
+#define C_CALL_ARG_R2L
+#endif
+
+ struct ifc_param {
+ unsigned int type;
+ unsigned int size;
+ };
+
+ struct ifc_head {
+ unsigned int scode;
+ unsigned int rcode;
+ unsigned int stamp;
+ unsigned int retval;
+ unsigned int needret;
+ unsigned int argv[IFC_MAX_ARG];
+ unsigned int data;
+ unsigned int data_phy_addr;
+
+#ifdef IFC_DEBUG_PARAM
+ struct ifc_param param_tbl[IFC_MAX_ARG];
+ signed int param_prob;
+#endif
+ };
+
+ struct ifc_arg {
+ unsigned int addr;
+ unsigned int size;
+ };
+
+ struct ifc_mb {
+ void *mbuf;
+ struct ifc_arg argo[IFC_MAX_ARG];
+ struct ifc_head head;
+ void *waitsem;
+ unsigned int total_size;
+ unsigned int data_size;
+ unsigned long data_vitrual_addr;
+ };
+
+#ifdef C_CALL_ARG_R2L
+#define _IFC_ADD_ARG0(ary, p)
+#define _IFC_ADD_ARG1(ary, d1, t1, a1, s1) __ifc_arg_out(ary, d1, (unsigned int)a1, s1, 0)
+#define _IFC_ADD_ARG2(ary, d2, t2, a2, s2, d1, t1, a1, s1, ...) _IFC_ADD_ARG1(ary, d1, t1, a1, s1); __ifc_arg_out(ary, d2, (unsigned int)a2, s2, (unsigned int)a1)
+#define _IFC_ADD_ARG3(ary, d3, t3, a3, s3, d2, t2, a2, s2, ...) _IFC_ADD_ARG2(ary, d2, t2, a2, s2, __VA_ARGS__); __ifc_arg_out(ary, d3, (unsigned int)a3, s3, (unsigned int)a2)
+#define _IFC_ADD_ARG4(ary, d4, t4, a4, s4, d3, t3, a3, s3, ...) _IFC_ADD_ARG3(ary, d3, t3, a3, s3, __VA_ARGS__); __ifc_arg_out(ary, d4, (unsigned int)a4, s4, (unsigned int)a3)
+#define _IFC_ADD_ARG5(ary, d5, t5, a5, s5, d4, t4, a4, s4, ...) _IFC_ADD_ARG4(ary, d4, t4, a4, s4, __VA_ARGS__); __ifc_arg_out(ary, d5, (unsigned int)a5, s5, (unsigned int)a4)
+#define _IFC_ADD_ARG6(ary, d6, t6, a6, s6, d5, t5, a5, s5, ...) _IFC_ADD_ARG5(ary, d5, t5, a5, s5, __VA_ARGS__); __ifc_arg_out(ary, d6, (unsigned int)a6, s6, (unsigned int)a5)
+
+#define _IFC_PUSHDATA0(d, av, p)
+#define _IFC_PUSHDATA1(d, av, d1, t1, a1, s1) __ifc_push_arg(d, av, d1, (unsigned int)a1, s1, 0)
+#define _IFC_PUSHDATA2(d, av, d2, t2, a2, s2, d1, t1, a1, s1) _IFC_PUSHDATA1(d, av, d1, t1, a1, s1); __ifc_push_arg(d, av, d2, (unsigned int)a2, s2, (unsigned int)a1)
+#define _IFC_PUSHDATA3(d, av, d3, t3, a3, s3, d2, t2, a2, s2, ...) _IFC_PUSHDATA2(d, av, d2, t2, a2, s2, __VA_ARGS__); __ifc_push_arg(d, av, d3, (unsigned int)a3, s3, (unsigned int)a2)
+#define _IFC_PUSHDATA4(d, av, d4, t4, a4, s4, d3, t3, a3, s3, ...) _IFC_PUSHDATA3(d, av, d3, t3, a3, s3, __VA_ARGS__); __ifc_push_arg(d, av, d4, (unsigned int)a4, s4, (unsigned int)a3)
+#define _IFC_PUSHDATA5(d, av, d5, t5, a5, s5, d4, t4, a4, s4, ...) _IFC_PUSHDATA4(d, av, d4, t4, a4, s4, __VA_ARGS__); __ifc_push_arg(d, av, d5, (unsigned int)a5, s5, (unsigned int)a4)
+#define _IFC_PUSHDATA6(d, av, d6, t6, a6, s6, d5, t5, a5, s5, ...) _IFC_PUSHDATA5(d, av, d5, t5, a5, s5, __VA_ARGS__); __ifc_push_arg(d, av, d6, (unsigned int)a6, s6, (unsigned int)a5)
+
+#define _IFC_PCOPY0(in, out, av, ao, p)
+#define _IFC_PCOPY1(in, out, av, ao, d1, t1, a1, s1) __ifc_copy_arg(in, out, d1, s1, av, ao)
+#define _IFC_PCOPY2(in, out, av, ao, d2, t2, a2, s2, d1, t1, a1, s1) _IFC_PCOPY1(in, out, av, ao, d1, t1, a1, s1); __ifc_copy_arg(in, out, d2, s2, av, ao)
+#define _IFC_PCOPY3(in, out, av, ao, d3, t3, a3, s3, d2, t2, a2, s2, ...) _IFC_PCOPY2(in, out, av, ao, d2, t2, a2, s2, __VA_ARGS__); __ifc_copy_arg(in, out, d3, s3, av, ao)
+#define _IFC_PCOPY4(in, out, av, ao, d4, t4, a4, s4, d3, t3, a3, s3, ...) _IFC_PCOPY3(in, out, av, ao, d3, t3, a3, s3, __VA_ARGS__); __ifc_copy_arg(in, out, d4, s4, av, ao)
+#define _IFC_PCOPY5(in, out, av, ao, d5, t5, a5, s5, d4, t4, a4, s4, ...) _IFC_PCOPY4(in, out, av, ao, d4, t4, a4, s4, __VA_ARGS__); __ifc_copy_arg(in, out, d5, s5, av, ao)
+#define _IFC_PCOPY6(in, out, av, ao, d6, t6, a6, s6, d5, t5, a5, s5, ...) _IFC_PCOPY5(in, out, av, ao, d5, t5, a5, s5, __VA_ARGS__); __ifc_copy_arg(in, out, d6, s6, av, ao)
+
+#define _IFC_OUTSIZE0(av, p) 0
+#define _IFC_OUTSIZE1(av, d1, t1, a1, s1) __ifc_out_size(d1, s1, av)
+#define _IFC_OUTSIZE2(av, d2, t2, a2, s2, d1, t1, a1, s1) _IFC_OUTSIZE1(av, d1, t1, a1, s1) + __ifc_out_size(d2, s2, av)
+#define _IFC_OUTSIZE3(av, d3, t3, a3, s3, d2, t2, a2, s2, ...) _IFC_OUTSIZE2(av, d2, t2, a2, s2, __VA_ARGS__) + __ifc_out_size(d3, s3, av)
+#define _IFC_OUTSIZE4(av, d4, t4, a4, s4, d3, t3, a3, s3, ...) _IFC_OUTSIZE3(av, d3, t3, a3, s3, __VA_ARGS__) + __ifc_out_size(d4, s4, av)
+#define _IFC_OUTSIZE5(av, d5, t5, a5, s5, d4, t4, a4, s4, ...) _IFC_OUTSIZE4(av, d4, t4, a4, s4, __VA_ARGS__) + __ifc_out_size(d5, s5, av)
+#define _IFC_OUTSIZE6(av, d6, t6, a6, s6, d5, t5, a5, s5, ...) _IFC_OUTSIZE5(av, d5, t5, a5, s5, __VA_ARGS__) + __ifc_out_size(d6, s6, av)
+#else
+#define _IFC_ADD_ARG0(ary, p)
+#define _IFC_ADD_ARG1(ary, d1, t1, a1, s1) __ifc_arg_out(ary, d1, (unsigned int)a1, s1, 0)
+#define _IFC_ADD_ARG2(ary, d2, t2, a2, s2, d1, t1, a1, s1, ...) __ifc_arg_out(ary, d2, (unsigned int)a2, s2, (unsigned int)a1); _IFC_ADD_ARG1(ary, d1, t1, a1, s1)
+#define _IFC_ADD_ARG3(ary, d3, t3, a3, s3, d2, t2, a2, s2, ...) __ifc_arg_out(ary, d3, (unsigned int)a3, s3, (unsigned int)a2); _IFC_ADD_ARG2(ary, d2, t2, a2, s2, __VA_ARGS__)
+#define _IFC_ADD_ARG4(ary, d4, t4, a4, s4, d3, t3, a3, s3, ...) __ifc_arg_out(ary, d4, (unsigned int)a4, s4, (unsigned int)a3); _IFC_ADD_ARG3(ary, d3, t3, a3, s3, __VA_ARGS__)
+#define _IFC_ADD_ARG5(ary, d5, t5, a5, s5, d4, t4, a4, s4, ...) __ifc_arg_out(ary, d5, (unsigned int)a5, s5, (unsigned int)a4); _IFC_ADD_ARG4(ary, d4, t4, a4, s4, __VA_ARGS__)
+#define _IFC_ADD_ARG6(ary, d6, t6, a6, s6, d5, t5, a5, s5, ...) __ifc_arg_out(ary, d6, (unsigned int)a6, s6, (unsigned int)a5); _IFC_ADD_ARG5(ary, d5, t5, a5, s5, __VA_ARGS__)
+
+#define _IFC_PUSHDATA0(d, av, p)
+#define _IFC_PUSHDATA1(d, av, d1, t1, a1, s1) __ifc_push_arg(d, av, d1, (unsigned int)a1, s1, 0)
+#define _IFC_PUSHDATA2(d, av, d2, t2, a2, s2, d1, t1, a1, s1) __ifc_push_arg(d, av, d2, (unsigned int)a2, s2, (unsigned int)a1); _IFC_PUSHDATA1(d, av, d1, t1, a1, s1)
+#define _IFC_PUSHDATA3(d, av, d3, t3, a3, s3, d2, t2, a2, s2, ...) __ifc_push_arg(d, av, d3, (unsigned int)a3, s3, (unsigned int)a2); _IFC_PUSHDATA2(d, av, d2, t2, a2, s2, __VA_ARGS__)
+#define _IFC_PUSHDATA4(d, av, d4, t4, a4, s4, d3, t3, a3, s3, ...) __ifc_push_arg(d, av, d4, (unsigned int)a4, s4, (unsigned int)a3); _IFC_PUSHDATA3(d, av, d3, t3, a3, s3, __VA_ARGS__)
+#define _IFC_PUSHDATA5(d, av, d5, t5, a5, s5, d4, t4, a4, s4, ...) __ifc_push_arg(d, av, d5, (unsigned int)a5, s5, (unsigned int)a4); _IFC_PUSHDATA4(d, av, d4, t4, a4, s4, __VA_ARGS__)
+#define _IFC_PUSHDATA6(d, av, d6, t6, a6, s6, d5, t5, a5, s5, ...) __ifc_push_arg(d, av, d6, (unsigned int)a6, s6, (unsigned int)a5); _IFC_PUSHDATA5(d, av, d5, t5, a5, s5, __VA_ARGS__)
+
+#define _IFC_PCOPY0(in, out, av, ao, p)
+#define _IFC_PCOPY1(in, out, av, ao, d1, t1, a1, s1) __ifc_copy_arg(in, out, d1, s1, av, ao)
+#define _IFC_PCOPY2(in, out, av, ao, d2, t2, a2, s2, d1, t1, a1, s1) __ifc_copy_arg(in, out, d2, s2, av, ao); _IFC_PCOPY1(in, out, av, ao, d1, t1, a1, s1)
+#define _IFC_PCOPY3(in, out, av, ao, d3, t3, a3, s3, d2, t2, a2, s2, ...) __ifc_copy_arg(in, out, d3, s3, av, ao); _IFC_PCOPY2(in, out, av, ao, d2, t2, a2, s2, __VA_ARGS__)
+#define _IFC_PCOPY4(in, out, av, ao, d4, t4, a4, s4, d3, t3, a3, s3, ...) __ifc_copy_arg(in, out, d4, s4, av, ao); _IFC_PCOPY3(in, out, av, ao, d3, t3, a3, s3, __VA_ARGS__)
+#define _IFC_PCOPY5(in, out, av, ao, d5, t5, a5, s5, d4, t4, a4, s4, ...) __ifc_copy_arg(in, out, d5, s5, av, ao); _IFC_PCOPY4(in, out, av, ao, d4, t4, a4, s4, __VA_ARGS__)
+#define _IFC_PCOPY6(in, out, av, ao, d6, t6, a6, s6, d5, t5, a5, s5, ...) __ifc_copy_arg(in, out, d6, s6, av, ao); _IFC_PCOPY5(in, out, av, ao, d5, t5, a5, s5, __VA_ARGS__)
+
+#define _IFC_OUTSIZE0(av, p) 0
+#define _IFC_OUTSIZE1(av, d1, t1, a1, s1) __ifc_out_size(d1, s1, av)
+#define _IFC_OUTSIZE2(av, d2, t2, a2, s2, d1, t1, a1, s1) __ifc_out_size(d2, s2, av) + _IFC_OUTSIZE1(av, d1, t1, a1, s1)
+#define _IFC_OUTSIZE3(av, d3, t3, a3, s3, d2, t2, a2, s2, ...) __ifc_out_size(d3, s3, av) + _IFC_OUTSIZE2(av, d2, t2, a2, s2, __VA_ARGS__)
+#define _IFC_OUTSIZE4(av, d4, t4, a4, s4, d3, t3, a3, s3, ...) __ifc_out_size(d4, s4, av) + _IFC_OUTSIZE3(av, d3, t3, a3, s3, __VA_ARGS__)
+#define _IFC_OUTSIZE5(av, d5, t5, a5, s5, d4, t4, a4, s4, ...) __ifc_out_size(d5, s5, av) + _IFC_OUTSIZE4(av, d4, t4, a4, s4, __VA_ARGS__)
+#define _IFC_OUTSIZE6(av, d6, t6, a6, s6, d5, t5, a5, s5, ...) __ifc_out_size(d6, s6, av) + _IFC_OUTSIZE5(av, d5, t5, a5, s5, __VA_ARGS__)
+
+#endif
+
+#define _IFC_PARAM0(in, out, av, p)
+#define _IFC_PARAM1(in, out, av, d1, t1, a1, s1) (t1)__ifc_gen_arg(in, out, d1, s1, av)
+#define _IFC_PARAM2(in, out, av, d2, t2, a2, s2, d1, t1, a1, s1) (t2)__ifc_gen_arg(in, out, d2, s2, av), _IFC_PARAM1(in, out, av, d1, t1, a1, s1)
+#define _IFC_PARAM3(in, out, av, d3, t3, a3, s3, d2, t2, a2, s2, ...) (t3)__ifc_gen_arg(in, out, d3, s3, av), _IFC_PARAM2(in, out, av, d2, t2, a2, s2, __VA_ARGS__)
+#define _IFC_PARAM4(in, out, av, d4, t4, a4, s4, d3, t3, a3, s3, ...) (t4)__ifc_gen_arg(in, out, d4, s4, av), _IFC_PARAM3(in, out, av, d3, t3, a3, s3, __VA_ARGS__)
+#define _IFC_PARAM5(in, out, av, d5, t5, a5, s5, d4, t4, a4, s4, ...) (t5)__ifc_gen_arg(in, out, d5, s5, av), _IFC_PARAM4(in, out, av, d4, t4, a4, s4, __VA_ARGS__)
+#define _IFC_PARAM6(in, out, av, d6, t6, a6, s6, d5, t5, a5, s5, ...) (t6)__ifc_gen_arg(in, out, d6, s6, av), _IFC_PARAM5(in, out, av, d5, t5, a5, s5, __VA_ARGS__)
+
+#define _IFC_DECL0(p) /*void */
+#define _IFC_DECL1(d1, t1, a1, s1) t1 a1,
+#define _IFC_DECL2(d2, t2, a2, s2, ...) t2 a2, _IFC_DECL1(__VA_ARGS__)
+#define _IFC_DECL3(d3, t3, a3, s3, ...) t3 a3, _IFC_DECL2(__VA_ARGS__)
+#define _IFC_DECL4(d4, t4, a4, s4, ...) t4 a4, _IFC_DECL3(__VA_ARGS__)
+#define _IFC_DECL5(d5, t5, a5, s5, ...) t5 a5, _IFC_DECL4(__VA_ARGS__)
+#define _IFC_DECL6(d6, t6, a6, s6, ...) t6 a6, _IFC_DECL5(__VA_ARGS__)
+
+#define _IFC_INSIZE0(p) 0
+#define _IFC_INSIZE1(d1, t1, a1, s1) __ifc_in_size(d1, s1, 0)
+#define _IFC_INSIZE2(d2, t2, a2, s2, d1, t1, a1, s1) _IFC_INSIZE1(d1, t1, a1, s1) + __ifc_in_size(d2, s2, (unsigned int)a1)
+#define _IFC_INSIZE3(d3, t3, a3, s3, d2, t2, a2, s2, ...) _IFC_INSIZE2(d2, t2, a2, s2, __VA_ARGS__) + __ifc_in_size(d3, s3, (unsigned int)a2)
+#define _IFC_INSIZE4(d4, t4, a4, s4, d3, t3, a3, s3, ...) _IFC_INSIZE3(d3, t3, a3, s3, __VA_ARGS__) + __ifc_in_size(d4, s4, (unsigned int)a3)
+#define _IFC_INSIZE5(d5, t5, a5, s5, d4, t4, a4, s4, ...) _IFC_INSIZE4(d4, t4, a4, s4, __VA_ARGS__) + __ifc_in_size(d5, s5, (unsigned int)a4)
+#define _IFC_INSIZE6(d6, t6, a6, s6, d5, t5, a5, s5, ...) _IFC_INSIZE5(d5, t5, a5, s5, __VA_ARGS__) + __ifc_in_size(d6, s6, (unsigned int)a5)
+
+#define _IFC_DBGPARAM0(p, a)
+#define _IFC_DBGPARAM1(ph, d1, t1, a1, s1) _ifc_dbg_arg(ph, d1, s1, 0)
+#define _IFC_DBGPARAM2(ph, d2, t2, a2, s2, d1, t1, a1, s1) _IFC_DBGPARAM1(ph, d1, t1, a1, s1) ; _ifc_dbg_arg(ph, d2, s2, d1)
+#define _IFC_DBGPARAM3(ph, d3, t3, a3, s3, d2, t2, a2, s2, ...) _IFC_DBGPARAM2(ph, d2, t2, a2, s2, __VA_ARGS__); _ifc_dbg_arg(ph, d3, s3, d2)
+#define _IFC_DBGPARAM4(ph, d4, t4, a4, s4, d3, t3, a3, s3, ...) _IFC_DBGPARAM3(ph, d3, t3, a3, s3, __VA_ARGS__); _ifc_dbg_arg(ph, d4, s4, d3)
+#define _IFC_DBGPARAM5(ph, d5, t5, a5, s5, d4, t4, a4, s4, ...) _IFC_DBGPARAM4(ph, d4, t4, a4, s4, __VA_ARGS__); _ifc_dbg_arg(ph, d5, s5, d4)
+#define _IFC_DBGPARAM6(ph, d6, t6, a6, s6, d5, t5, a5, s5, ...) _IFC_DBGPARAM5(ph, d5, t5, a5, s5, __VA_ARGS__); _ifc_dbg_arg(ph, d6, s6, d5)
+
+#define _IFC_CHKPARAM0(p, a)
+#define _IFC_CHKPARAM1(ph, d1, t1, a1, s1) _ifc_chk_arg(ph, d1, s1)
+#define _IFC_CHKPARAM2(ph, d2, t2, a2, s2, d1, t1, a1, s1) _ifc_chk_arg(ph, d2, s2); _IFC_CHKPARAM1(ph, d1, t1, a1, s1)
+#define _IFC_CHKPARAM3(ph, d3, t3, a3, s3, d2, t2, a2, s2, ...) _ifc_chk_arg(ph, d3, s3); _IFC_CHKPARAM2(ph, d2, t2, a2, s2, __VA_ARGS__)
+#define _IFC_CHKPARAM4(ph, d4, t4, a4, s4, d3, t3, a3, s3, ...) _ifc_chk_arg(ph, d4, s4); _IFC_CHKPARAM3(ph, d3, t3, a3, s3, __VA_ARGS__)
+#define _IFC_CHKPARAM5(ph, d5, t5, a5, s5, d4, t4, a4, s4, ...) _ifc_chk_arg(ph, d5, s5); _IFC_CHKPARAM4(ph, d4, t4, a4, s4, __VA_ARGS__)
+#define _IFC_CHKPARAM6(ph, d6, t6, a6, s6, d5, t5, a5, s5, ...) _ifc_chk_arg(ph, d6, s6); _IFC_CHKPARAM5(ph, d5, t5, a5, s5, __VA_ARGS__)
+
+#define _IFC_GEN_CALLx(x, id, name, ...) \
+int name(_IFC_DECL##x(__VA_ARGS__) unsigned int timeout) \
+{ \
+ struct ifc_mb ifc_b; \
+ struct ifc_arg *argo = ifc_b.argo; \
+ unsigned int in_buf; \
+ unsigned int *argv = ifc_b.head.argv; \
+ unsigned int in_size = (_IFC_INSIZE##x(__VA_ARGS__)); \
+ argo = argo; argv = argv; \
+ if (x > IFC_MAX_ARG) return 0; \
+ if (mailbox_ifc_init_buf(&ifc_b, id, IFC_LAUNCH, in_size, timeout)) return 0; \
+ in_buf = ifc_b.head.data; \
+ _IFC_DBGPARAM##x(&ifc_b.head, __VA_ARGS__); \
+ _IFC_PUSHDATA##x(&in_buf, &argv, __VA_ARGS__); \
+ if (timeout) { \
+ _IFC_ADD_ARG##x(&argo, __VA_ARGS__); \
+ return mailbox_ifc_send_wait(&ifc_b, timeout); \
+ } \
+ else \
+ return mailbox_ifc_send_no_wait(&ifc_b); \
+}
+
+#define IFC_GEN_EXEC_NAME(name) ife_##name
+
+#define __IFC_GEN_EXECx(x, name, ...) \
+int IFC_GEN_EXEC_NAME(name)(struct ifc_head *ifc_h) \
+{ \
+ struct ifc_mb ifc_b; \
+ unsigned int in_b, out_b; \
+ unsigned int *argv = ifc_h->argv; \
+ unsigned int *argo; \
+ unsigned int out_size = (_IFC_OUTSIZE##x(&argv, __VA_ARGS__)); \
+ if (mailbox_ifc_init_buf(&ifc_b, ifc_h->rcode, IFC_RESPONSE, out_size, 0)) return MAILBOX_ERRO; \
+ in_b = ifc_h->data; out_b = ifc_b.head.data; argv = ifc_h->argv; \
+ _IFC_CHKPARAM##x(ifc_h, __VA_ARGS__); \
+ ifc_b.head.retval = (unsigned int)name(_IFC_PARAM##x(&in_b, &out_b, &argv, __VA_ARGS__)); \
+ if (ifc_h->needret) { \
+ in_b = ifc_h->data; out_b = ifc_b.head.data; ifc_b.head.stamp = ifc_h->stamp; \
+ argv = ifc_h->argv; argo = ifc_b.head.argv; \
+ _IFC_PCOPY##x(&in_b, &out_b, &argv, &argo, __VA_ARGS__); \
+ return mailbox_ifc_send_no_wait(&ifc_b); \
+ } \
+ else \
+ return mailbox_ifc_discard_buf(&ifc_b); \
+}
+
+ extern int mailbox_ifc_init_buf(struct ifc_mb *ifc_b,
+ unsigned int mailcode,
+ unsigned int direct,
+ unsigned int inlen,
+ unsigned int timeout);
+
+ extern int mailbox_ifc_send_no_wait(struct ifc_mb *ifc_b);
+
+ extern int mailbox_ifc_wait_resp(struct ifc_mb *ifc_b);
+
+ extern int mailbox_ifc_discard_buf(struct ifc_mb *ifc_b);
+
+ extern int mailbox_ifc_send_wait(struct ifc_mb *ifc_b,
+ unsigned int timeout);
+
+ extern int mailbox_ifc_register_exec(unsigned int mailcode,
+ int (*ife_cb) (struct ifc_head *
+ ifc_h)
+ );
+
+ extern void __ifc_arg_out(struct ifc_arg **ary, unsigned int db,
+ unsigned long ab, unsigned int sb,
+ unsigned int af);
+
+ extern void __ifc_push_arg(unsigned long *in, unsigned int **av,
+ unsigned int db, unsigned long ab,
+ unsigned int sb, unsigned int af);
+
+ extern int __ifc_in_size(unsigned int db, unsigned int sb,
+ unsigned int af);
+
+ extern void __ifc_dbg_arg(struct ifc_head *head, unsigned int db,
+ unsigned int sb, unsigned int df);
+
+ extern int __ifc_out_size(unsigned int db, unsigned int sb,
+ unsigned int **pp_af);
+
+ extern long __ifc_gen_arg(unsigned long *in, unsigned long *out,
+ unsigned int db, unsigned int sb,
+ unsigned int **pp_af);
+
+ extern void __ifc_copy_arg(unsigned long *in, unsigned long *out,
+ unsigned int db, unsigned int sb,
+ unsigned int **av, unsigned int **ao);
+
+ extern void __ifc_chk_arg(struct ifc_head *head, unsigned int db,
+ unsigned int sb);
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif
+#endif /*end of __DRV_MAILBOX_IFC_H__ */
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_msg.c b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_msg.c
new file mode 100644
index 000000000000..0c2e0fbdc446
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_msg.c
@@ -0,0 +1,147 @@
+#include "drv_mailbox_cfg.h"
+#include "drv_mailbox_debug.h"
+#include "drv_mailbox_gut.h"
+
+#undef _MAILBOX_FILE_
+#define _MAILBOX_FILE_ "msg"
+int BSP_CPU_StateGet(int CpuID)
+{
+ return 1;
+}
+
+void mailbox_msg_receiver(void *mb_buf, void *handle, void *data)
+{
+ struct mb_queue *queue;
+ struct mb_buff *mbuf = (struct mb_buff *)mb_buf;
+ mb_msg_cb func = (mb_msg_cb) handle;
+
+ queue = &mbuf->usr_queue;
+ if (func) {
+ func(data, (void *)queue, queue->size);
+ } else {
+ (void)mailbox_logerro_p1(MAILBOX_ERR_GUT_READ_CALLBACK_NOT_FIND,
+ mbuf->mailcode);
+ }
+}
+
+MAILBOX_EXTERN unsigned int mailbox_reg_msg_cb(unsigned int mailcode,
+ mb_msg_cb func, void *data)
+{
+ return (unsigned int)mailbox_register_cb(mailcode, mailbox_msg_receiver,
+ func, data);
+}
+
+MAILBOX_EXTERN unsigned int mailbox_try_send_msg(unsigned int mailcode,
+ void *pdata,
+ unsigned int length)
+{
+ struct mb_buff *mb_buf = MAILBOX_NULL;
+ struct mb_queue *queue = MAILBOX_NULL;
+ int ret_val = MAILBOX_OK;
+
+ if ((0 == pdata) || (0 == length)) {
+ ret_val = mailbox_logerro_p1(MAILBOX_ERRO, mailcode);
+ goto exit_out;
+
+ }
+ ret_val = mailbox_request_buff(mailcode, (void *)&mb_buf);
+ if (MAILBOX_OK != ret_val) {
+ goto exit_out;
+ }
+
+ queue = &mb_buf->usr_queue;
+ if (length != (unsigned int)mailbox_write_buff(queue, pdata, length)) {
+ ret_val = mailbox_logerro_p1(MAILBOX_FULL, mailcode);
+ goto exit_out;
+ }
+
+ ret_val = mailbox_sealup_buff(mb_buf, length);
+ if (MAILBOX_OK == ret_val) {
+ ret_val = mailbox_send_buff(mb_buf);
+ }
+
+ exit_out:
+ if (MAILBOX_NULL != mb_buf) {
+ mailbox_release_buff(mb_buf);
+ }
+
+ return (unsigned int)ret_val;
+}
+
+MAILBOX_GLOBAL unsigned int mailbox_read_msg_data(void *mail_handle,
+ char *buff,
+ unsigned int *size)
+{
+ struct mb_queue *pMailQueue = (struct mb_queue *)mail_handle;
+
+ if ((MAILBOX_NULL == pMailQueue) || (MAILBOX_NULL == buff)
+ || (MAILBOX_NULL == size)) {
+ return (unsigned int)
+ mailbox_logerro_p1(MAILBOX_ERR_GUT_INPUT_PARAMETER, 0);
+ }
+
+ if (pMailQueue->size > *size) {
+ return (unsigned int)
+ mailbox_logerro_p1
+ (MAILBOX_ERR_GUT_USER_BUFFER_SIZE_TOO_SMALL, *size);
+ }
+
+ if ((0 == pMailQueue->length) ||
+ ((unsigned int)(pMailQueue->front - pMailQueue->base) >
+ pMailQueue->length)
+ || ((unsigned int)(pMailQueue->rear - pMailQueue->base) >
+ pMailQueue->length)) {
+ return (unsigned int)
+ mailbox_logerro_p1
+ (MAILBOX_CRIT_GUT_INVALID_USER_MAIL_HANDLE, pMailQueue);
+ }
+
+ *size =
+ (unsigned int)mailbox_read_buff(pMailQueue, buff, pMailQueue->size);
+
+ return MAILBOX_OK;
+}
+
+MAILBOX_EXTERN unsigned int mailbox_send_msg(unsigned int mailcode,
+ void *data, unsigned int length)
+{
+ int ret_val = MAILBOX_OK;
+ unsigned int try_go_on = MAILBOX_TRUE;
+ int try_times = 0;
+
+ ret_val = BSP_CPU_StateGet(mailbox_get_dst_id(mailcode));
+ if (!ret_val) {
+ return MAILBOX_TARGET_NOT_READY;
+ }
+
+ ret_val = (int)mailbox_try_send_msg(mailcode, data, length);
+
+ if (MAILBOX_FALSE == mailbox_int_context()) {
+ while ((int)MAILBOX_FULL == ret_val) {
+ mailbox_delivery(mailbox_get_channel_id(mailcode));
+ try_go_on =
+ (unsigned int)
+ mailbox_scene_delay(MAILBOX_DELAY_SCENE_MSG_FULL,
+ &try_times);
+
+ if (MAILBOX_TRUE == try_go_on) {
+ ret_val =
+ (int)mailbox_try_send_msg(mailcode, data,
+ length);
+ } else {
+ break;
+ }
+ }
+ }
+
+ if (MAILBOX_OK != ret_val) {
+ /*mailbox_show(mailcode,0); */
+ /*mailbox_assert(ret_val); */
+ if ((int)MAILBOX_FULL != ret_val) {
+ ret_val = (int)MAILBOX_ERRO;
+ }
+ return (unsigned int)ret_val;
+ }
+
+ return (unsigned int)ret_val;
+}
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_msg.h b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_msg.h
new file mode 100644
index 000000000000..88a938d39a53
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_msg.h
@@ -0,0 +1,48 @@
+#ifndef __DRV_MAILBOX_MSG_H__
+#define __DRV_MAILBOX_MSG_H__
+
+#include "drv_mailbox_cfg.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif
+
+#define IFC_GEN_CALL0(id, name) _IFC_GEN_CALLx(0, id, name)
+#define IFC_GEN_CALL1(id, name, ...) _IFC_GEN_CALLx(1, id, name, __VA_ARGS__)
+#define IFC_GEN_CALL2(id, name, ...) _IFC_GEN_CALLx(2, id, name, __VA_ARGS__)
+#define IFC_GEN_CALL3(id, name, ...) _IFC_GEN_CALLx(3, id, name, __VA_ARGS__)
+#define IFC_GEN_CALL4(id, name, ...) _IFC_GEN_CALLx(4, id, name, __VA_ARGS__)
+#define IFC_GEN_CALL5(id, name, ...) _IFC_GEN_CALLx(5, id, name, __VA_ARGS__)
+#define IFC_GEN_CALL6(id, name, ...) _IFC_GEN_CALLx(6, id, name, __VA_ARGS__)
+
+#define IFC_GEN_EXEC0(name) __IFC_GEN_EXECx(0, name)
+#define IFC_GEN_EXEC1(name, ...) __IFC_GEN_EXECx(1, name, __VA_ARGS__)
+#define IFC_GEN_EXEC2(name, ...) __IFC_GEN_EXECx(2, name, __VA_ARGS__)
+#define IFC_GEN_EXEC3(name, ...) __IFC_GEN_EXECx(3, name, __VA_ARGS__)
+#define IFC_GEN_EXEC4(name, ...) __IFC_GEN_EXECx(4, name, __VA_ARGS__)
+#define IFC_GEN_EXEC5(name, ...) __IFC_GEN_EXECx(5, name, __VA_ARGS__)
+#define IFC_GEN_EXEC6(name, ...) __IFC_GEN_EXECx(6, name, __VA_ARGS__)
+
+#define mailbox_ifc_register(id, name) mailbox_ifc_register_exec(id, IFC_GEN_EXEC_NAME(name))
+
+ int BSP_CPU_StateGet(int CpuID);
+
+ unsigned long mailbox_init(void);
+
+ unsigned long mailbox_send_msg(unsigned long mailcode,
+ void *data, unsigned long length);
+
+ unsigned long mailbox_reg_msg_cb(unsigned long mailcode,
+ mb_msg_cb func, void *data);
+
+ unsigned int mailbox_read_msg_data(void *MailHandle,
+ char *pData, unsigned int *pSize);
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif
+#endif /* end of drv_mailbox_msg.h */
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_platform.h b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_platform.h
new file mode 100644
index 000000000000..dc2c44661b71
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_platform.h
@@ -0,0 +1,118 @@
+#ifndef _DRV_MAILBOX_PLATFORM_H_
+#define _DRV_MAILBOX_PLATFORM_H_
+
+#if defined(BSP_CORE_MODEM)
+#include "drv_mailbox_port_vxworks.h"
+
+#elif defined (BSP_CORE_APP)
+#include "drv_mailbox_port_linux.h"
+
+#elif defined (BSP_CORE_CM3)
+#include "drv_mailbox_port_mcu.h"
+
+#endif
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif
+
+#ifndef MAILBOX_TRUE
+#define MAILBOX_TRUE (1)
+#endif
+
+#ifndef MAILBOX_FALSE
+#define MAILBOX_FALSE (0)
+#endif
+
+#ifndef MAILBOX_NULL
+#define MAILBOX_NULL (void *)(0)
+#endif
+
+#ifdef _DRV_LLT_
+#define MAILBOX_LOCAL
+#else
+#ifndef MAILBOX_LOCAL
+#define MAILBOX_LOCAL static
+#endif
+#endif
+
+#ifndef MAILBOX_EXTERN
+#define MAILBOX_EXTERN
+#endif
+
+#ifndef MAILBOX_GLOBAL
+#define MAILBOX_GLOBAL
+#endif
+
+#define MIALBOX_DIRECTION_INVALID (0)
+#define MIALBOX_DIRECTION_SEND (1)
+#define MIALBOX_DIRECTION_RECEIVE (2)
+
+#define MAILBOX_MAILCODE_INVALID (unsigned long)(0xffffffff)
+
+ enum MAILBOX_DELAY_SCENE_E {
+ MAILBOX_DELAY_SCENE_MSG_FULL,
+ MAILBOX_DELAY_SCENE_IFC_FULL
+ };
+
+ extern MAILBOX_EXTERN void mailbox_assert(unsigned int ErroNo);
+
+ extern MAILBOX_EXTERN int mailbox_mutex_lock(void **mutexId);
+
+ extern MAILBOX_EXTERN void mailbox_mutex_unlock(void **mutexId);
+
+ extern MAILBOX_EXTERN int mailbox_int_context(void);
+
+ extern MAILBOX_EXTERN int mailbox_get_timestamp(void);
+
+ extern MAILBOX_EXTERN int mailbox_process_register(unsigned int
+ channel_id,
+ int (*cb) (unsigned
+ int
+ channel_id),
+ void *priv);
+
+ extern MAILBOX_EXTERN int mailbox_channel_register(unsigned int
+ ChannelID,
+ unsigned int IntNum,
+ unsigned int DstID,
+ unsigned int Direct,
+ void **mutex);
+
+ extern MAILBOX_EXTERN void *mailbox_memcpy(void *dst, const void *src,
+ unsigned int size);
+
+ extern MAILBOX_EXTERN void *mailbox_memset(void *m, int c,
+ unsigned int size);
+
+ extern MAILBOX_EXTERN int mailbox_delivery(unsigned int channel_id);
+
+ extern MAILBOX_EXTERN int mailbox_init_platform(void);
+
+ extern MAILBOX_EXTERN int mailbox_scene_delay(unsigned int scene_id,
+ int *try_times);
+
+ extern MAILBOX_EXTERN void *mailbox_init_completion(void);
+
+ extern MAILBOX_EXTERN int mailbox_wait_completion(void **mutexId,
+ unsigned int timeout);
+
+ extern MAILBOX_EXTERN void mailbox_complete(void **wait_id);
+
+ extern MAILBOX_EXTERN void mailbox_del_completion(void **wait);
+ extern void mailbox_ifc_test_init(void);
+
+#if defined (BSP_CORE_CM3)
+ extern void mailbox_dpm_device_get(void);
+ extern void mailbox_dpm_device_put(void);
+#else
+#define mailbox_dpm_device_get()
+#define mailbox_dpm_device_put()
+#endif
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif
+#endif /*_DRV_MAILBOX_PLATFORM_H_*/
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_port_linux.c b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_port_linux.c
new file mode 100644
index 000000000000..3f92b337ef33
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_port_linux.c
@@ -0,0 +1,630 @@
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+
+#include "drv_mailbox_cfg.h"
+#include "drv_mailbox_debug.h"
+#include "bsp_drv_ipc.h"
+
+#undef _MAILBOX_FILE_
+#define _MAILBOX_FILE_ "linux"
+#define MAILBOX_MILLISEC_PER_SECOND 1000
+
+#define MAILBOX_LINUX_SEND_FULL_DELAY_MS 10
+#define MAILBOX_LINUX_SEND_FULL_DELAY_TIMES 0
+ enum MAILBOX_LOCK_TYPE {
+ MAILBOX_LOCK_SEMAPHORE = 0x00010000,
+ MAILBOX_LOCK_SPINLOCK = 0x00020000
+};
+#define MAILBOX_PROC_MASK 0x0000ffff
+enum MAILBOX_LINUX_PROC_STYLE_E {
+ MAILBOX_SEND = 0,
+
+ MAILBOX_RECEV_START,
+
+ MAILBOX_RECV_TASK_START,
+ MAILBOX_RECV_TASK_NORMAL,
+ MAILBOX_RECV_TASK_HIGH,
+
+ MAILBOX_RECV_TASK_END,
+
+ MAILBOX_RECV_TASKLET,
+ MAILBOX_RECV_TASKLET_HI,
+
+ MAILBOX_RECV_INT_IRQ,
+ MAILBOX_RECV_END,
+};
+
+struct mb_local_work {
+ unsigned int channel_id;
+ unsigned int data_flag;
+ int (*cb) (unsigned int channel_id);
+ struct mb_local_work *next;
+ void *mb_priv;
+};
+
+struct mb_local_proc {
+ signed char proc_name[16];
+ unsigned int proc_id;
+ signed int priority;
+ struct mb_local_work *work_list;
+ wait_queue_head_t wait;
+ struct tasklet_struct tasklet;
+ int incoming;
+
+};
+
+struct mb_local_cfg {
+ unsigned int channel_id;
+ unsigned int property;
+ unsigned int int_src;
+ unsigned int dst_id;
+};
+
+struct mb_mutex {
+ void *lock;
+ unsigned long flags;
+ unsigned int type;
+};
+
+static struct wakeup_source mb_lpwr_lock;
+
+MAILBOX_LOCAL struct mb_local_proc g_mailbox_local_proc_tbl[] = {
+ {"mailboxNormal", MAILBOX_RECV_TASK_NORMAL, 86, 0,},
+ {"mailboxHigh", MAILBOX_RECV_TASK_HIGH, 99, 0,},
+
+ {"mailboxTasklet", MAILBOX_RECV_TASKLET, 0, 0,},
+ {"mailboxTasklet", MAILBOX_RECV_TASKLET_HI, 0, 0,},
+
+ {"mailboxInt", MAILBOX_RECV_INT_IRQ, 0, 0,},
+
+};
+
+MAILBOX_LOCAL struct mb_local_cfg g_mb_local_cfg_tbl[] = {
+ /*ChannelID */
+ {MAILBOX_MAILCODE_RESERVED(MCU, ACPU, MSG), MAILBOX_RECV_TASKLET_HI, 0},
+ {MAILBOX_MAILCODE_RESERVED(HIFI, ACPU, MSG), MAILBOX_RECV_TASKLET_HI,
+ 0},
+ {MAILBOX_MAILCODE_RESERVED(CCPU, ACPU, MSG), MAILBOX_RECV_TASKLET_HI,
+ 0},
+ {MAILBOX_MAILCODE_RESERVED(CCPU, ACPU, IFC), MAILBOX_RECV_TASK_NORMAL,
+ 0},
+ {MAILBOX_MAILCODE_RESERVED(MCU, ACPU, IFC), MAILBOX_RECV_TASK_HIGH, 0},
+
+ /*ChannelID */
+ {MAILBOX_MAILCODE_RESERVED(ACPU, MCU, MSG),
+ MAILBOX_SEND | MAILBOX_LOCK_SPINLOCK, 0},
+ {MAILBOX_MAILCODE_RESERVED(ACPU, HIFI, MSG),
+ MAILBOX_SEND | MAILBOX_LOCK_SPINLOCK, 0},
+
+ {MAILBOX_MAILCODE_RESERVED(ACPU, CCPU, MSG), MAILBOX_SEND |
+ MAILBOX_LOCK_SEMAPHORE, 0},
+
+ {MAILBOX_MAILCODE_RESERVED(ACPU, CCPU, IFC),
+ MAILBOX_SEND | MAILBOX_LOCK_SEMAPHORE, 0},
+ {MAILBOX_MAILCODE_RESERVED(ACPU, MCU, IFC),
+ MAILBOX_SEND | MAILBOX_LOCK_SEMAPHORE, 0},
+
+ {MAILBOX_MAILCODE_INVALID, 0, 0}
+};
+
+MAILBOX_LOCAL void *mailbox_mutex_create(struct mb_local_cfg *local_cfg);
+MAILBOX_LOCAL void mailbox_receive_process(unsigned long data)
+{
+ struct mb_local_proc *proc = (struct mb_local_proc *)data;
+ struct mb_local_work *work = proc->work_list;
+
+ while (MAILBOX_NULL != work) {
+ if (MAILBOX_TRUE == work->data_flag) {
+ work->data_flag = MAILBOX_FALSE;
+ mailbox_record_sche_recv(work->mb_priv);
+ if (MAILBOX_NULL != work->cb) {
+ if (MAILBOX_OK != work->cb(work->channel_id)) {
+ mailbox_logerro_p1
+ (MAILBOX_ERR_LINUX_CALLBACK_ERRO,
+ work->channel_id);
+ }
+ } else {
+ mailbox_logerro_p1
+ (MAILBOX_ERR_LINUX_CALLBACK_NOT_FIND,
+ work->channel_id);
+ }
+ }
+ work = work->next;
+ }
+ __pm_relax(&mb_lpwr_lock);
+}
+
+MAILBOX_LOCAL int mailbox_receive_task(void *data)
+{
+ struct mb_local_proc *proc = (struct mb_local_proc *)data;
+ struct sched_param param;
+
+ param.sched_priority =
+ (proc->priority < MAX_RT_PRIO) ? proc->priority : (MAX_RT_PRIO - 1);
+ (void)sched_setscheduler(current, SCHED_FIFO, &param);
+
+ /*set_freezable(); *//* advised by l56193 */
+
+ do {
+ wait_event(proc->wait, proc->incoming);
+ proc->incoming = MAILBOX_FALSE;
+
+ mailbox_receive_process((unsigned long)data);
+
+ } while (!kthread_should_stop());
+ return MAILBOX_OK;
+}
+
+MAILBOX_EXTERN int mailbox_init_platform(void)
+{
+ struct mb_local_proc *local_proc = &g_mailbox_local_proc_tbl[0];
+ unsigned int count =
+ sizeof(g_mailbox_local_proc_tbl) / sizeof(struct mb_local_proc);
+ unsigned int proc_id;
+ struct task_struct *task = MAILBOX_NULL;
+
+ wakeup_source_init(&mb_lpwr_lock, "mailbox_low_power_wake_lock");
+
+ while (count) {
+ proc_id = local_proc->proc_id;
+ if ((proc_id > MAILBOX_RECV_TASK_START)
+ && (proc_id < MAILBOX_RECV_TASK_END)) {
+
+ init_waitqueue_head(&local_proc->wait);
+
+ task =
+ kthread_run(mailbox_receive_task,
+ (void *)local_proc,
+ local_proc->proc_name);
+ if (IS_ERR(task)) {
+ return
+ mailbox_logerro_p1
+ (MAILBOX_ERR_LINUX_TASK_CREATE, proc_id);
+ }
+ }
+
+ if ((MAILBOX_RECV_TASKLET == proc_id) ||
+ (MAILBOX_RECV_TASKLET_HI == proc_id)) {
+ tasklet_init(&local_proc->tasklet,
+ mailbox_receive_process,
+ (unsigned long)local_proc);
+
+ }
+ count--;
+ local_proc++;
+ }
+
+ return MAILBOX_OK;
+}
+
+MAILBOX_LOCAL int mailbox_ipc_process(struct mb_local_work *local_work,
+ struct mb_local_proc *local_proc,
+ unsigned int channel_id,
+ unsigned int proc_id)
+{
+ unsigned int is_find = MAILBOX_TRUE;
+
+ while (local_work) {
+ if (channel_id == local_work->channel_id) {
+ local_work->data_flag = MAILBOX_TRUE;
+
+ mailbox_record_sche_send(local_work->mb_priv);
+ __pm_stay_awake(&mb_lpwr_lock);
+
+ if ((proc_id > MAILBOX_RECV_TASK_START)
+ && (proc_id < MAILBOX_RECV_TASK_END)) {
+
+ local_proc->incoming = MAILBOX_TRUE;
+ wake_up(&local_proc->wait);
+
+ } else if (MAILBOX_RECV_TASKLET_HI == proc_id) {
+ tasklet_hi_schedule(&local_proc->tasklet);
+
+ } else if (MAILBOX_RECV_TASKLET == proc_id) {
+ tasklet_schedule(&local_proc->tasklet);
+
+ } else if (MAILBOX_RECV_INT_IRQ == proc_id) {
+ mailbox_receive_process((unsigned long)
+ local_proc);
+
+ } else {
+ is_find = MAILBOX_FALSE;
+ }
+
+ }
+
+ local_work = local_work->next;
+ }
+
+ return is_find;
+}
+
+MAILBOX_LOCAL int mailbox_ipc_int_handle(unsigned int int_num)
+{
+ struct mb_local_cfg *local_cfg = &g_mb_local_cfg_tbl[0];
+ struct mb_local_proc *local_proc = MAILBOX_NULL;
+ struct mb_local_work *local_work = MAILBOX_NULL;
+ unsigned int count =
+ sizeof(g_mailbox_local_proc_tbl) / sizeof(struct mb_local_proc);
+ unsigned int proc_id = 0;
+ unsigned int channel_id = 0;
+ unsigned int is_find = MAILBOX_FALSE;
+ unsigned int ret_val = MAILBOX_OK;
+
+ while (MAILBOX_MAILCODE_INVALID != local_cfg->channel_id) {
+ proc_id = local_cfg->property;
+ if ((int_num == local_cfg->int_src) && (MAILBOX_SEND
+ !=
+ (MAILBOX_PROC_MASK &
+ local_cfg->property))) {
+ channel_id = local_cfg->channel_id;
+
+ local_proc = &g_mailbox_local_proc_tbl[0];
+ count =
+ sizeof(g_mailbox_local_proc_tbl) /
+ sizeof(struct mb_local_proc);
+ while (count) {
+ if (proc_id == local_proc->proc_id) {
+ local_work = local_proc->work_list;
+ is_find =
+ mailbox_ipc_process(local_work,
+ local_proc,
+ channel_id,
+ proc_id);
+ break;
+ }
+ count--;
+ local_proc++;
+ }
+
+ if (0 == count) {
+ ret_val =
+ mailbox_logerro_p1
+ (MAILBOX_ERR_LINUX_MAIL_TASK_NOT_FIND,
+ channel_id);
+ }
+ }
+ local_cfg++;
+ }
+
+ if (MAILBOX_TRUE != is_find) {
+ ret_val =
+ mailbox_logerro_p1(MAILBOX_ERR_LINUX_MAIL_INT_NOT_FIND,
+ channel_id);
+ }
+
+ return ret_val;
+}
+
+MAILBOX_EXTERN int mailbox_process_register(unsigned int channel_id,
+ int (*cb) (unsigned int channel_id),
+ void *priv)
+{
+ struct mb_local_work *local_work = MAILBOX_NULL;
+ struct mb_local_cfg *local_cfg = &g_mb_local_cfg_tbl[0];
+ struct mb_local_proc *local_proc = &g_mailbox_local_proc_tbl[0];
+ struct mb_local_cfg *find_cfg = MAILBOX_NULL;
+ unsigned int count =
+ sizeof(g_mailbox_local_proc_tbl) / sizeof(struct mb_local_proc);
+
+ while (MAILBOX_MAILCODE_INVALID != local_cfg->channel_id) {
+ if (local_cfg->channel_id == channel_id) {
+ find_cfg = local_cfg;
+ break;
+ }
+ local_cfg++;
+ }
+
+ if (find_cfg) {
+ while (count) {
+ if (find_cfg->property == local_proc->proc_id) {
+ if (local_proc->work_list) {
+ local_work = local_proc->work_list;
+ while (MAILBOX_NULL != local_work->next) {
+ local_work = local_work->next;
+ }
+ local_work->next =
+ (struct mb_local_work *)
+ kcalloc(sizeof
+ (struct mb_local_work), 1,
+ GFP_KERNEL);
+ if (NULL == local_work->next) {
+ (void)printk(KERN_ERR
+ "%s: memory alloc error!\n",
+ __func__);
+ return
+ MAILBOX_ERR_LINUX_ALLOC_MEMORY;
+ }
+ local_work->next->channel_id =
+ find_cfg->channel_id;
+ local_work->next->cb = cb;
+ local_work->next->next = MAILBOX_NULL;
+ local_work->next->mb_priv = priv;
+
+ } else {
+ local_proc->work_list =
+ (struct mb_local_work *)
+ kcalloc(sizeof
+ (struct mb_local_work), 1,
+ GFP_KERNEL);
+ if (NULL == local_proc->work_list) {
+ (void)printk(KERN_ERR
+ "%s: memory alloc error!\n",
+ __func__);
+ return
+ MAILBOX_ERR_LINUX_ALLOC_MEMORY;
+ }
+ local_proc->work_list->channel_id =
+ find_cfg->channel_id;
+ local_proc->work_list->cb = cb;
+ local_proc->work_list->next =
+ MAILBOX_NULL;
+ local_proc->work_list->mb_priv = priv;
+ }
+
+ }
+ count--;
+ local_proc++;
+ }
+
+ return MAILBOX_OK;
+ }
+
+ return mailbox_logerro_p1(MAILBOX_ERR_LINUX_CHANNEL_NOT_FIND,
+ channel_id);
+
+}
+
+MAILBOX_EXTERN int mailbox_channel_register(unsigned int channel_id,
+ unsigned int int_src,
+ unsigned int dst_id,
+ unsigned int direct, void **mutex)
+{
+ struct mb_local_cfg *local_cfg = &g_mb_local_cfg_tbl[0];
+ while (MAILBOX_MAILCODE_INVALID != local_cfg->channel_id) {
+ if (channel_id == local_cfg->channel_id) {
+ *mutex = mailbox_mutex_create(local_cfg);
+ if (MAILBOX_NULL == *mutex) {
+ return
+ mailbox_logerro_p1(MAILBOX_CRIT_PORT_CONFIG,
+ channel_id);
+ }
+
+ local_cfg->int_src = int_src;
+ local_cfg->dst_id = dst_id;
+
+ if (MIALBOX_DIRECTION_RECEIVE == direct) {
+ IPC_IntConnect((IPC_INT_LEV_E) int_src,
+ (VOIDFUNCPTR)
+ mailbox_ipc_int_handle, int_src);
+ IPC_IntEnable((IPC_INT_LEV_E) int_src);
+
+ /*test_mailbox_msg_reg(channel_id); */
+ }
+ break;
+ }
+
+ local_cfg++;
+ }
+
+ if (MAILBOX_MAILCODE_INVALID == local_cfg->channel_id) {
+ return mailbox_logerro_p1(MAILBOX_ERR_LINUX_CHANNEL_NOT_FIND,
+ channel_id);
+ }
+
+ return MAILBOX_OK;
+}
+
+MAILBOX_EXTERN int mailbox_delivery(unsigned int channel_id)
+{
+ struct mb_local_cfg *local_cfg = &g_mb_local_cfg_tbl[0];
+ struct mb_local_cfg *find_cfg = MAILBOX_NULL;
+
+ while (MAILBOX_MAILCODE_INVALID != local_cfg->channel_id) {
+ if (local_cfg->channel_id == channel_id) {
+ find_cfg = local_cfg;
+ break;
+ }
+ local_cfg++;
+ }
+
+ if (MAILBOX_NULL != find_cfg) {
+ return (int)IPC_IntSend((IPC_INT_CORE_E) find_cfg->dst_id,
+ (IPC_INT_LEV_E) find_cfg->int_src);
+ }
+
+ return mailbox_logerro_p1(MAILBOX_ERR_LINUX_CHANNEL_NOT_FIND,
+ channel_id);
+}
+
+MAILBOX_LOCAL void *mailbox_mutex_create(struct mb_local_cfg *local_cfg)
+{
+ unsigned int channel_id = local_cfg->channel_id;
+ struct mb_mutex *mtx = MAILBOX_NULL;
+
+ mtx = (struct mb_mutex *)kmalloc(sizeof(struct mb_mutex), GFP_KERNEL);
+ if (!mtx) {
+ mailbox_logerro_p1(MAILBOX_ERR_LINUX_CHANNEL_NOT_FIND,
+ channel_id);
+ goto error_exit;
+ }
+
+ if ((local_cfg->property > MAILBOX_RECEV_START) &&
+ (local_cfg->property < MAILBOX_RECV_END)) {
+ mtx->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
+ if (mtx->lock) {
+ spin_lock_init((spinlock_t *) mtx->lock);
+ mtx->type = MAILBOX_LOCK_SPINLOCK;
+ } else {
+ mailbox_logerro_p1(MAILBOX_CRIT_PORT_CONFIG,
+ channel_id);
+ goto error_exit;
+ }
+ } else {
+ if (MAILBOX_LOCK_SEMAPHORE & local_cfg->property) {
+ mtx->lock =
+ kmalloc(sizeof(struct semaphore), GFP_KERNEL);
+ if (mtx->lock) {
+ sema_init(mtx->lock, 1);
+ mtx->type = MAILBOX_LOCK_SEMAPHORE;
+ } else {
+ mailbox_logerro_p1(MAILBOX_CRIT_PORT_CONFIG,
+ channel_id);
+ goto error_exit;
+ }
+ } else if (MAILBOX_LOCK_SPINLOCK & local_cfg->property) {
+ mtx->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
+ if (mtx->lock) {
+ spin_lock_init((spinlock_t *) mtx->lock);
+ mtx->type = MAILBOX_LOCK_SPINLOCK;
+ } else {
+ mailbox_logerro_p1(MAILBOX_CRIT_PORT_CONFIG,
+ channel_id);
+ goto error_exit;
+ }
+ } else {
+ mailbox_logerro_p1(MAILBOX_CRIT_PORT_CONFIG,
+ channel_id);
+ goto error_exit;
+ }
+ }
+ return mtx;
+
+ error_exit:
+ if (mtx)
+ kfree(mtx);
+
+ return (void *)0;
+}
+
+MAILBOX_EXTERN int mailbox_mutex_lock(void **mutexId)
+{
+ struct mb_mutex *mtx = (struct mb_mutex *)*mutexId;
+
+ if (MAILBOX_LOCK_SEMAPHORE == mtx->type) {
+ down((struct semaphore *)mtx->lock);
+ } else if (MAILBOX_LOCK_SPINLOCK == mtx->type) {
+ spin_lock_irqsave((spinlock_t *) mtx->lock, mtx->flags);
+ }
+
+ return MAILBOX_OK;
+}
+
+MAILBOX_EXTERN void mailbox_mutex_unlock(void **mutexId)
+{
+ struct mb_mutex *mtx = (struct mb_mutex *)*mutexId;
+
+ if (MAILBOX_LOCK_SEMAPHORE == mtx->type) {
+ up((struct semaphore *)mtx->lock);
+ } else if (MAILBOX_LOCK_SPINLOCK == mtx->type) {
+ spin_unlock_irqrestore((spinlock_t *) mtx->lock, mtx->flags);
+ }
+
+ return;
+}
+
+MAILBOX_EXTERN void *mailbox_init_completion(void)
+{
+ struct completion *wait;
+ wait =
+ (struct completion *)kmalloc(sizeof(struct completion), GFP_KERNEL);
+ if (!wait) {
+ return MAILBOX_NULL;
+ }
+ init_completion(wait);
+
+ return (void *)wait;
+}
+
+MAILBOX_EXTERN int mailbox_wait_completion(void **wait, unsigned int timeout)
+{
+ long jiffies = msecs_to_jiffies(timeout);
+ long ret = wait_for_completion_timeout(*wait, jiffies);
+
+ return (ret > 0) ? MAILBOX_OK : MAILBOX_ERRO;
+}
+
+MAILBOX_EXTERN void mailbox_complete(void **wait)
+{
+ complete(*wait);
+}
+
+MAILBOX_EXTERN void mailbox_del_completion(void **wait)
+{
+ kfree(*wait);
+}
+
+MAILBOX_EXTERN void *mailbox_memcpy(void *Destination, const void *Source,
+ unsigned int Size)
+{
+
+ return (void *)memcpy(Destination, Source, Size);
+}
+
+MAILBOX_EXTERN void *mailbox_memset(void *m, int c, unsigned int size)
+{
+
+ return memset(m, c, size);
+}
+
+MAILBOX_EXTERN void mailbox_assert(unsigned int ErroNo)
+{
+
+}
+
+MAILBOX_EXTERN int mailbox_int_context(void)
+{
+ return in_interrupt();
+}
+
+MAILBOX_EXTERN int mailbox_scene_delay(unsigned int scene_id, int *try_times)
+{
+ unsigned int go_on = MAILBOX_FALSE;
+ unsigned int delay_ms = 0;
+
+ switch (scene_id) {
+ case MAILBOX_DELAY_SCENE_MSG_FULL:
+ case MAILBOX_DELAY_SCENE_IFC_FULL:
+
+ delay_ms = MAILBOX_LINUX_SEND_FULL_DELAY_MS;
+
+ go_on = (*try_times >= MAILBOX_LINUX_SEND_FULL_DELAY_TIMES) ?
+ MAILBOX_FALSE : MAILBOX_TRUE;
+
+ break;
+ default:
+
+ break;
+ }
+
+ if (MAILBOX_TRUE == go_on) {
+ msleep(delay_ms);
+ }
+
+ *try_times = *try_times + 1;
+ return go_on;
+}
+
+extern void *g_slice_reg;
+MAILBOX_EXTERN int mailbox_get_timestamp(void)
+{
+ if (g_slice_reg) {
+ return (long)readl(g_slice_reg);
+ }
+ return 0;
+
+}
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_port_linux.h b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_port_linux.h
new file mode 100644
index 000000000000..09709c24cc6a
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_port_linux.h
@@ -0,0 +1,65 @@
+#ifndef _DRV_MAILBOX_PORT_LINUX_H_
+#define _DRV_MAILBOX_PORT_LINUX_H_
+
+#include <linux/kernel.h>
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif
+
+#if 0
+#ifndef _DRV_LLT_
+#include <mach/hardware.h>
+
+#define MEM_CORE_SHARE_PHY2VIRT(phy) (((unsigned int)phy) - IPC_SHARE_MEM_ADDR + IPC_SHARE_MEM_VIRT_ADDR)
+#define MEM_CORE_SHARE_VIRT2PHY(virt) (((unsigned int)virt) - IPC_SHARE_MEM_VIRT_ADDR + IPC_SHARE_MEM_ADDR)
+
+#else
+
+#define MEM_CORE_SHARE_PHY2VIRT(phy) (phy)
+#define MEM_CORE_SHARE_VIRT2PHY(virt) (virt)
+#endif
+#endif
+
+#define MEM_CORE_SHARE_PHY2VIRT(phy) (phy)
+#define MEM_CORE_SHARE_VIRT2PHY(virt) (virt)
+
+#define MAILBOX_LOCAL_CPUID MAILBOX_CPUID_ACPU
+
+#define MAILBOX_CHANNEL_NUM \
+(MAILBOX_CHANNEL_BUTT(ACPU, HIFI) \
++ MAILBOX_CHANNEL_BUTT(HIFI, ACPU) \
+)
+
+#define MAILBOX_USER_NUM \
+(MAILBOX_USER_BUTT(HIFI, ACPU, MSG) \
+)
+
+#ifdef _DRV_LLT_
+#define mailbox_out(p) (printk p)
+#else
+#define mailbox_out(p) (printk p)
+#endif
+
+#define _MAILBOX_LINE_ __LINE__
+
+#define _MAILBOX_FILE_ (void *)(0) /*__FILE__*/
+
+#define MAILBOX_LOG_LEVEL MAILBOX_LOG_ERROR
+
+#ifndef MAILBOX_OPEN_MNTN
+#define MAILBOX_OPEN_MNTN
+#endif
+
+#define MAILBOX_RECORD_USEID_NUM (64)
+
+#define RT "\n"
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif
+#endif /*_DRV_MAILBOX_LINUX_H_*/
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_stub.h b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_stub.h
new file mode 100644
index 000000000000..2d40f933d170
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_stub.h
@@ -0,0 +1,74 @@
+#ifndef __DRV_MAILBOX_STUB_H__
+#define __DRV_MAILBOX_STUB_H__
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif
+
+#define MAILBOX_MEM_BASEADDR ((int)&g_MAILBOX_BST_HEAD0[0])
+
+#define MAILBOX_MEM_LENGTH mailbox_memory_length()
+
+ extern int g_MAILBOX_BST_HEAD0[];
+#define MAILBOX_HEAD_ADDR_MCU2ACPU_MSG MAILBOX_MEM_BASEADDR
+#define MAILBOX_HEAD_ADDR_ACPU2MCU_MSG (MAILBOX_HEAD_ADDR_MCU2ACPU_MSG + MAILBOX_HEAD_LEN)
+
+#define MAILBOX_HEAD_ADDR_ACPU2HIFI_MSG (MAILBOX_HEAD_ADDR_ACPU2MCU_MSG + MAILBOX_HEAD_LEN)
+#define MAILBOX_HEAD_ADDR_HIFI2ACPU_MSG (MAILBOX_HEAD_ADDR_ACPU2HIFI_MSG + MAILBOX_HEAD_LEN)
+
+#define MAILBOX_HEAD_ADDR_MCU2CCPU_MSG (MAILBOX_HEAD_ADDR_HIFI2ACPU_MSG + MAILBOX_HEAD_LEN)
+#define MAILBOX_HEAD_ADDR_CCPU2MCU_MSG (MAILBOX_HEAD_ADDR_MCU2CCPU_MSG + MAILBOX_HEAD_LEN)
+
+#define MAILBOX_HEAD_ADDR_CCPU2HIFI_MSG (MAILBOX_HEAD_ADDR_CCPU2MCU_MSG + MAILBOX_HEAD_LEN)
+#define MAILBOX_HEAD_ADDR_HIFI2CCPU_MSG (MAILBOX_HEAD_ADDR_CCPU2HIFI_MSG + MAILBOX_HEAD_LEN)
+
+#define MAILBOX_HEAD_ADDR_CCPU2ACPU_MSG (MAILBOX_HEAD_ADDR_HIFI2CCPU_MSG + MAILBOX_HEAD_LEN)
+#define MAILBOX_HEAD_ADDR_ACPU2CCPU_MSG (MAILBOX_HEAD_ADDR_CCPU2ACPU_MSG + MAILBOX_HEAD_LEN)
+
+#define MAILBOX_HEAD_ADDR_CCPU2ACPU_IFC (MAILBOX_HEAD_ADDR_ACPU2CCPU_MSG + MAILBOX_HEAD_LEN)
+#define MAILBOX_HEAD_ADDR_ACPU2CCPU_IFC (MAILBOX_HEAD_ADDR_CCPU2ACPU_IFC + MAILBOX_HEAD_LEN)
+
+#define MAILBOX_HEAD_ADDR_CCPU2MCU_IFC (MAILBOX_HEAD_ADDR_ACPU2CCPU_IFC + MAILBOX_HEAD_LEN)
+#define MAILBOX_HEAD_ADDR_MCU2CCPU_IFC (MAILBOX_HEAD_ADDR_CCPU2MCU_IFC + MAILBOX_HEAD_LEN)
+
+#define MAILBOX_HEAD_ADDR_ACPU2MCU_IFC (MAILBOX_HEAD_ADDR_MCU2CCPU_IFC + MAILBOX_HEAD_LEN)
+#define MAILBOX_HEAD_ADDR_MCU2ACPU_IFC (MAILBOX_HEAD_ADDR_ACPU2MCU_IFC + MAILBOX_HEAD_LEN)
+
+#define MAILBOX_HEAD_BOTTOM_ADDR (MAILBOX_HEAD_ADDR_MCU2ACPU_IFC + MAILBOX_HEAD_LEN)
+
+#define MAILBOX_QUEUE_ADDR_MCU2ACPU_MSG (MAILBOX_MEM_BASEADDR + MAILBOX_MEM_HEAD_LEN)
+#define MAILBOX_QUEUE_ADDR_ACPU2MCU_MSG (MAILBOX_QUEUE_ADDR_MCU2ACPU_MSG + MAILBOX_QUEUE_SIZE(MCU, ACPU, MSG))
+
+#define MAILBOX_QUEUE_ADDR_ACPU2HIFI_MSG (MAILBOX_QUEUE_ADDR_ACPU2MCU_MSG + MAILBOX_QUEUE_SIZE(ACPU, MCU, MSG))
+#define MAILBOX_QUEUE_ADDR_HIFI2ACPU_MSG (MAILBOX_QUEUE_ADDR_ACPU2HIFI_MSG + MAILBOX_QUEUE_SIZE(ACPU, HIFI, MSG))
+
+#define MAILBOX_QUEUE_ADDR_MCU2CCPU_MSG (MAILBOX_QUEUE_ADDR_HIFI2ACPU_MSG + MAILBOX_QUEUE_SIZE(HIFI, ACPU, MSG))
+#define MAILBOX_QUEUE_ADDR_CCPU2MCU_MSG (MAILBOX_QUEUE_ADDR_MCU2CCPU_MSG + MAILBOX_QUEUE_SIZE(MCU, CCPU, MSG))
+
+#define MAILBOX_QUEUE_ADDR_CCPU2HIFI_MSG (MAILBOX_QUEUE_ADDR_CCPU2MCU_MSG + MAILBOX_QUEUE_SIZE(CCPU, MCU, MSG))
+#define MAILBOX_QUEUE_ADDR_HIFI2CCPU_MSG (MAILBOX_QUEUE_ADDR_CCPU2HIFI_MSG + MAILBOX_QUEUE_SIZE(CCPU, HIFI, MSG))
+
+#define MAILBOX_QUEUE_ADDR_CCPU2ACPU_MSG (MAILBOX_QUEUE_ADDR_HIFI2CCPU_MSG + MAILBOX_QUEUE_SIZE(HIFI, CCPU, MSG))
+#define MAILBOX_QUEUE_ADDR_ACPU2CCPU_MSG (MAILBOX_QUEUE_ADDR_CCPU2ACPU_MSG + MAILBOX_QUEUE_SIZE(CCPU, ACPU, MSG))
+
+#define MAILBOX_QUEUE_ADDR_CCPU2ACPU_IFC (MAILBOX_QUEUE_ADDR_ACPU2CCPU_MSG + MAILBOX_QUEUE_SIZE(ACPU, CCPU, MSG))
+#define MAILBOX_QUEUE_ADDR_ACPU2CCPU_IFC (MAILBOX_QUEUE_ADDR_CCPU2ACPU_IFC + MAILBOX_QUEUE_SIZE(CCPU, ACPU, IFC))
+
+#define MAILBOX_QUEUE_ADDR_CCPU2MCU_IFC (MAILBOX_QUEUE_ADDR_ACPU2CCPU_IFC + MAILBOX_QUEUE_SIZE(ACPU, CCPU, IFC))
+#define MAILBOX_QUEUE_ADDR_MCU2CCPU_IFC (MAILBOX_QUEUE_ADDR_CCPU2MCU_IFC + MAILBOX_QUEUE_SIZE(CCPU, MCU, IFC))
+
+#define MAILBOX_QUEUE_ADDR_ACPU2MCU_IFC (MAILBOX_QUEUE_ADDR_MCU2CCPU_IFC + MAILBOX_QUEUE_SIZE(MCU, CCPU, IFC))
+#define MAILBOX_QUEUE_ADDR_MCU2ACPU_IFC (MAILBOX_QUEUE_ADDR_ACPU2MCU_IFC + MAILBOX_QUEUE_SIZE(ACPU, MCU, IFC))
+
+#define MAILBOX_MEMORY_BOTTOM_ADDR (MAILBOX_QUEUE_ADDR_MCU2CCPU_IFC + MAILBOX_QUEUE_SIZE(MCU, ACPU, IFC))
+
+ int mailbox_memory_length(void);
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif
+#endif /* end of __DRV_MAILBOX_STUB_H__.h */
diff --git a/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_table.c b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_table.c
new file mode 100644
index 000000000000..7a87d5d284c8
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/drv_mailbox_table.c
@@ -0,0 +1,22 @@
+#include "drv_mailbox_cfg.h"
+#include "drv_mailbox_gut.h"
+
+#undef _MAILBOX_FILE_
+#define _MAILBOX_FILE_ "table"
+MAILBOX_EXTERN struct mb_cfg g_mailbox_global_cfg_tbl[MAILBOX_GLOBAL_CHANNEL_NUM
+ + 1] = {
+ MAILBOX_CHANNEL_COMPOSE(CCPU, HIFI, MSG),
+
+ MAILBOX_CHANNEL_COMPOSE(ACPU, HIFI, MSG),
+
+ MAILBOX_CHANNEL_COMPOSE(HIFI, CCPU, MSG),
+ MAILBOX_CHANNEL_COMPOSE(HIFI, ACPU, MSG),
+
+ {MAILBOX_MAILCODE_INVALID, 0, 0, 0}
+
+};
+
+MAILBOX_EXTERN struct mb_buff
+ g_mailbox_channel_handle_pool[MAILBOX_CHANNEL_NUM];
+
+MAILBOX_EXTERN struct mb_cb g_mailbox_user_cb_pool[MAILBOX_USER_NUM];
diff --git a/drivers/hisi/hifi_mailbox/mailbox/mdrv_ipc_enum.h b/drivers/hisi/hifi_mailbox/mailbox/mdrv_ipc_enum.h
new file mode 100644
index 000000000000..c622a5994251
--- /dev/null
+++ b/drivers/hisi/hifi_mailbox/mailbox/mdrv_ipc_enum.h
@@ -0,0 +1,413 @@
+/*
+ * Copyright (C) Huawei Technologies Co., Ltd. 2012-2015. All rights reserved.
+ * foss@huawei.com
+ *
+ * If distributed as part of the Linux kernel, the following license terms
+ * apply:
+ *
+ * * This program is free software; you can redistribute it and/or modify
+ * * it under the terms of the GNU General Public License version 2 and
+ * * only version 2 as published by the Free Software Foundation.
+ * *
+ * * This program is distributed in the hope that it will be useful,
+ * * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * * GNU General Public License for more details.
+ * *
+ * * You should have received a copy of the GNU General Public License
+ * * along with this program; if not, write to the Free Software
+ * * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Otherwise, the following license terms apply:
+ *
+ * * Redistribution and use in source and binary forms, with or without
+ * * modification, are permitted provided that the following conditions
+ * * are met:
+ * * 1) Redistributions of source code must retain the above copyright
+ * * notice, this list of conditions and the following disclaimer.
+ * * 2) Redistributions in binary form must reproduce the above copyright
+ * * notice, this list of conditions and the following disclaimer in the
+ * * documentation and/or other materials provided with the distribution.
+ * * 3) Neither the name of Huawei nor the names of its contributors may
+ * * be used to endorse or promote products derived from this software
+ * * without specific prior written permission.
+ *
+ * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef __MDRV_IPC_ENUM_H__
+#define __MDRV_IPC_ENUM_H__
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ typedef enum tagIPC_INT_CORE_E {
+ IPC_CORE_ARM11 = 0x0,
+ IPC_CORE_A9,
+ IPC_CORE_CEVA,
+ IPC_CORE_TENS0,
+ IPC_CORE_TENS1,
+ IPC_CORE_DSP,
+ IPC_CORE_APPARM = 0x0,
+ IPC_CORE_COMARM,
+ IPC_CORE_LTEDSP,
+ IPC_CORE_VDSP,
+ IPC_CORE_ZSP,
+ IPC_CORE_DSP_GU,
+ IPC_CORE_ACPU = 0x0,
+ IPC_CORE_CCPU,
+ IPC_CORE_MCU,
+ IPC_CORE_HIFI,
+ IPC_CORE_BBE16,
+ IPC_CORE_ACORE = 0x0,
+ IPC_CORE_CCORE,
+ IPC_CORE_MCORE,
+ IPC_CORE_LDSP,
+ IPC_CORE_HiFi,
+ IPC_CORE_BBE,
+ IPC_CORE0_A15,
+ IPC_CORE1_A15,
+ IPC_CORE_BUTTOM
+ } IPC_INT_CORE_E;
+
+#if defined(CHIP_BB_HI6210)
+ typedef enum tagIPC_INT_LEV_E {
+ IPC_INT_DSP_MODEM = 0,
+ IPC_CCPU_INT_SRC_HIFI_MSG = 1,
+ IPC_INT_DSP_MSP = 2,
+ IPC_INT_DSP_PS = 3,
+ IPC_CCPU_INT_SRC_MCU_MSG = 5,
+ IPC_CCPU_INT_SRC_ACPU_MSG = 6,
+ IPC_CCPU_INT_SRC_ACPU_IFC = 7,
+ IPC_INT_DSP_HALT = 8,
+ IPC_INT_DSP_RESUME = 9,
+ IPC_CCPU_INT_SRC_MCU_IFC = 10,
+ IPC_INT_WAKE_GU = 11,
+ IPC_INT_SLEEP_GU = 12,
+ IPC_INT_DICC_USRDATA_ACPU = 13,
+ IPC_INT_DICC_RELDATA_ACPU = 14,
+ IPC_INT_ARM_SLEEP = 15,
+ IPC_INT_WAKE_GSM = 16,
+ IPC_INT_WAKE_WCDMA = 17,
+ IPC_INT_DSP_PS_PUB_MBX = 18,
+ IPC_INT_DSP_PS_MAC_MBX = 19,
+ IPC_INT_DSP_MBX_RSD = 20,
+ IPC_CCPU_INT_SRC_DSP_MNTN = 21,
+ IPC_CCPU_INT_SRC_DSP_RCM_PUB_MBX = 22, /* bit22 */
+ IPC_CCPU_INT_SRC_ACPU_ICC = 30,
+ IPC_CCPU_INT_SDR_CCPU_BBP_MASTER_ERROR = 31,
+ IPC_CCPU_INT_SRC_BUTT = 32,
+
+ IPC_MCU_INT_SRC_ACPU_MSG = 4,
+ IPC_MCU_INT_SRC_CCPU_MSG = 5,
+ IPC_MCU_INT_SRC_HIFI_MSG = 6,
+ IPC_MCU_INT_SRC_CCPU_IFC = 7,
+ IPC_MCU_INT_SRC_CCPU_IPF = 8,
+ IPC_MCU_INT_SRC_ACPU_IFC = 9,
+ IPC_MCU_INT_SRC_ACPU0_PD = 10,
+ IPC_MCU_INT_SRC_ACPU1_PD = 11, /* bit11, acpu1 power down */
+ IPC_MCU_INT_SRC_ACPU2_PD = 12, /* bit12, acpu2 power down */
+ IPC_MCU_INT_SRC_ACPU3_PD = 13, /* bit13, acpu3 power down */
+ IPC_MCU_INT_SRC_ACPU_HOTPLUG = 14, /* bit14, acpu hotplug--no use */
+ IPC_MCU_INT_SRC_ACPU_DFS = 15, /* bit15, ACPU DFS */
+ IPC_MCU_INT_SRC_ACPU_PD = 16, /* bit16, acpu power down */
+ IPC_MCU_INT_SRC_CCPU_PD = 17, /* bit17, ccpu power down */
+ IPC_MCU_INT_SRC_HIFI_PD = 18, /* bit18, hifi power down */
+ IPC_MCU_INT_SRC_MCU_AGT = 19, /* bit19, mcu agent */
+ IPC_MCU_INT_SRC_HIFI_DDR_VOTE = 20, /* bit20, HIFI DDR */
+ IPC_MCU_INT_SRC_ACPU_I2S_REMOTE_SLOW = 21, /* bit21, ACPU */
+ IPC_MCU_INT_SRC_ACPU_I2S_REMOTE_SLEEP = 22, /* bit22, ACPU */
+ IPC_MCU_INT_SRC_ACPU_I2S_REMOTE_INVALID = 23, /* bit23, ACPU */
+ IPC_MCU_INT_SRC_HIFI_MEMSHARE_DDR_VOTE = 24, /* bit24, MP3 */
+ IPC_MCU_INT_SRC_HIFI_MEMSHARE_DDR_EXIT_VOTE = 25, /* bit25, MP3 */
+ IPC_MCU_INT_SRC_ACPU4_PD = 26, /* bit26, acpu4:cluster1 core0 power down */
+ IPC_MCU_INT_SRC_ACPU5_PD = 27, /* bit27, acpu5:cluster1 core1 power down */
+ IPC_MCU_INT_SRC_ACPU6_PD = 28, /* bit28, acpu6:cluster1 core2 power down */
+ IPC_MCU_INT_SRC_ACPU7_PD = 29, /* bit29, acpu7:cluster1 core3 power down */
+ IPC_MCU_INT_SRC_HIFI_IFC = 31, /* bit31, HIFI */
+ IPC_MCU_INT_SRC_BUTT = 32,
+
+ IPC_ACPU_INT_SRC_CCPU_MSG = 1, /* bit1, CCPU */
+ IPC_ACPU_INT_SRC_HIFI_MSG = 2, /* bit2, HIFI */
+ IPC_ACPU_INT_SRC_MCU_MSG = 3, /* bit3, ACPU */
+ IPC_ACPU_INT_SRC_CCPU_NVIM = 4, /* bit4, */
+ IPC_ACPU_INT_SRC_CCPU_IFC = 5, /* bit5, CCPU */
+ IPC_ACPU_INT_SRC_MCU_IFC = 6, /* bit6, MCU */
+ IPC_ACPU_INT_SRC_MCU_THERMAL_HIGH = 7, /* bit7, MCU */
+ IPC_ACPU_INT_SRC_MCU_THERMAL_LOW = 8, /* bit8, MCU ACPU */
+ IPC_INT_DSP_APP = 9, /* bit9, LDSP */
+ IPC_ACPU_INT_SRC_HIFI_PC_VOICE_RX_DATA = 10, /* hifi->acore pc voice */
+ IPC_INT_DICC_USRDATA = 13, /* bit13, TTF */
+ IPC_INT_DICC_RELDATA = 14, /* bit14, TTF IPC_INT_DICC_RELDATA_ACPU */
+ IPC_ACPU_INT_SRC_CCPU_LOG = 25, /* bit25, CCPU ACPU LOG */
+ IPC_ACPU_INI_SRC_MCU_TELE_MNTN_NOTIFY = 26, /* bit26, TELE_MNTN */
+ IPC_ACPU_INI_SRC_MCU_EXC_REBOOT = 27, /* bit27, MCU */
+ IPC_ACPU_INT_SRC_CCPU_EXC_REBOOT = 28, /* bit28, CCPU */
+ IPC_ACPU_INT_SRC_CCPU_NORMAL_REBOOT = 29, /* bit29, CCPU */
+ IPC_ACPU_INT_SRC_MCU_DDR_EXC = 30, /* bit30, MCU DDR */
+ IPC_ACPU_INT_SRC_CCPU_ICC = 31, /* bit31, CCPU ICC */
+ IPC_ACPU_INT_SRC_BUTT = 32,
+
+ IPC_HIFI_INT_SRC_ACPU_MSG = 0, /* bit0, ACPU */
+ IPC_HIFI_INT_SRC_CCPU_MSG = 1, /* bit1, CCPU */
+ IPC_HIFI_INT_SRC_BBE_MSG = 4, /* bit4, TDDSP */
+ IPC_HIFI_INT_SRC_MCU_MSG = 6, /* bit6, MCU */
+ IPC_HIFI_INT_SRC_MCU_WAKE_DDR = 7, /* bit7, MCU DDR */
+ IPC_HIFI_INT_SRC_MCU_IFC = 8, /* bit8, MCU */
+ IPC_HIFI_INT_SRC_BUTT = 32,
+
+ IPC_INT_MSP_DSP_OM_MBX = 0, /* bit0, ARM->DSP */
+ IPC_INT_PS_DSP_PUB_MBX = 1, /* bit1, ARM->DSP */
+ IPC_INT_PS_DSP_MAC_MBX = 2, /* bit2, ARM->DSP */
+ IPC_INT_HIFI_DSP_MBX = 3, /* bit3, HIFI->DSP */
+ IPC_BBE16_INT_SRC_HIFI_MSG = 3, /* bit3, */
+ IPC_BBE16_INT_SRC_BUTT = 32,
+ IPC_INT_BUTTOM = 32
+ } IPC_INT_LEV_E;
+
+ typedef enum tagIPC_SEM_ID_E {
+ IPC_SEM_ICC = 0,
+ IPC_SEM_NAND = 1,
+ IPC_SEM_MEM = 2,
+ IPC_SEM_DICC = 3,
+ IPC_SEM_RFILE_LOG = 4,
+ IPC_SEM_EMMC = 5,
+ IPC_SEM_NVIM = 6,
+ IPC_SEM_TELE_MNTN = 7,
+ IPC_SEM_MEDPLL_STATE = 8,
+ IPC_SEM_EFUSE = 9,
+ IPC_SEM_BBPMASTER_0 = 10,
+ IPC_SEM_BBPMASTER_1 = 11,
+ IPC_SEM_BBPMASTER_2 = 12,
+ IPC_SEM_BBPMASTER_3 = 13,
+ IPC_SEM_GU_SLEEP = 14,
+ IPC_SEM_BBPMASTER_5 = 15,
+ IPC_SEM_BBPMASTER_6 = 16,
+ IPC_SEM_BBPMASTER_7 = 17,
+ IPC_SEM_DPDT_CTRL_ANT = 19, /*just for compile in portland */
+ IPC_SEM_SMP_CPU0 = 21,
+ IPC_SEM_SMP_CPU1 = 22,
+ IPC_SEM_SMP_CPU2 = 23,
+ IPC_SEM_SMP_CPU3 = 24,
+ IPC_SEM_SYNC = 25,
+ IPC_SEM_BBP = 26,
+ IPC_SEM_CPUIDLE = 27,
+ IPC_SEM_BBPPS = 28,
+ IPC_SEM_HKADC = 29,
+ IPC_SEM_SYSCTRL = 30,
+ IPC_SEM_ZSP_HALT = 31,
+ IPC_SEM_BUTTOM
+ } IPC_SEM_ID_E;
+
+#else
+ typedef enum tagIPC_INT_LEV_E {
+ IPC_CCPU_INT_SRC_HIFI_MSG = 0,
+ IPC_CCPU_INT_SRC_MCU_MSG = 1,
+ IPC_INT_DSP_HALT = 2,
+ IPC_INT_DSP_RESUME = 3,
+ IPC_INT_WAKE_GU = 6,
+ IPC_INT_SLEEP_GU,
+ IPC_INT_WAKE_GSM,
+ IPC_INT_WAKE_WCDMA,
+ IPC_INT_DSP_PS_PUB_MBX,
+ IPC_INT_DSP_PS_MAC_MBX,
+ IPC_INT_DSP_MBX_RSD,
+ IPC_CCPU_INT_SRC_DSP_MNTN,
+ IPC_CCPU_INT_SRC_DSP_RCM_PUB_MBX,
+ IPC_CCPU_INT_SDR_CCPU_BBP_MASTER_ERROR,
+ IPC_CCPU_INT_SRC_COMMON_END,
+
+ IPC_CCPU_INT_SRC_ACPU_RESET = IPC_CCPU_INT_SRC_COMMON_END,
+ IPC_CCPU_SRC_ACPU_DUMP,
+ IPC_CCPU_INT_SRC_ICC_PRIVATE,
+ IPC_CCPU_INT_SRC_MCPU,
+ IPC_CCPU_INT_SRC_MCPU_WDT,
+ IPC_CCPU_INT_SRC_XDSP_1X_HALT,
+ IPC_CCPU_INT_SRC_XDSP_HRPD_HALT,
+ IPC_CCPU_INT_SRC_XDSP_1X_RESUME,
+ IPC_CCPU_INT_SRC_XDSP_HRPD_RESUME,
+ IPC_CCPU_INT_SRC_XDSP_MNTN,
+ IPC_CCPU_INT_SRC_XDSP_PS_MBX,
+ IPC_CCPU_INT_SRC_XDSP_RCM_MBX,
+
+ IPC_CCPU_INT_SRC_ACPU_ICC = 31,
+
+ IPC_CCPU_INT_SRC_ACPU_MSG = IPC_CCPU_INT_SRC_COMMON_END,
+ IPC_CCPU_INT_SRC_ACPU_IFC,
+ IPC_CCPU_INT_SRC_MCU_IFC,
+ IPC_INT_ARM_SLEEP,
+
+ IPC_MCU_INT_SRC_ACPU_MSG = 0,
+ IPC_MCU_INT_SRC_CCPU_MSG,
+ IPC_MCU_INT_SRC_HIFI_MSG,
+ IPC_MCU_INT_SRC_CCPU_IPF,
+ IPC_MCU_INT_SRC_ACPU_PD, /* acpu power down */
+ IPC_MCU_INT_SRC_HIFI_PD, /* hifi power down */
+ IPC_MCU_INT_SRC_HIFI_DDR_VOTE,
+ IPC_MCU_INT_SRC_ACPU_I2S_REMOTE_SLOW,
+ IPC_MCU_INT_SRC_ACPU_I2S_REMOTE_SLEEP,
+ IPC_MCU_INT_SRC_ACPU_I2S_REMOTE_INVALID,
+ IPC_MCU_INT_SRC_COMMON_END,
+ IPC_MCU_INT_SRC_ACPU_DRX = IPC_MCU_INT_SRC_COMMON_END,
+ IPC_MCU_INT_SRC_CCPU_DRX,
+ IPC_MCU_INT_SRC_ICC_PRIVATE,
+ IPC_MCU_INT_SRC_DUMP,
+ IPC_MCU_INT_SRC_HIFI_PU,
+ IPC_MCU_INT_SRC_HIFI_DDR_DFS_QOS,
+ IPC_MCU_INT_SRC_TEST,
+ IPC_MCPU_INT_SRC_ACPU_USB_PME_EN,
+ IPC_MCU_INT_SRC_ICC = 29,
+ IPC_MCU_INT_SRC_CCPU_PD = 30,
+ IPC_MCU_INT_SRC_CCPU_START = 31,
+
+ IPC_MCU_INT_SRC_CCPU_IFC = IPC_MCU_INT_SRC_COMMON_END,
+ IPC_MCU_INT_SRC_ACPU_IFC,
+ IPC_MCU_INT_SRC_HIFI_IFC,
+ IPC_MCU_INT_SRC_ACPU0_PD, /* acpu0 power down, */
+ IPC_MCU_INT_SRC_ACPU1_PD, /* acpu1 power down, */
+ IPC_MCU_INT_SRC_ACPU2_PD, /* acpu2 power down, */
+ IPC_MCU_INT_SRC_ACPU3_PD, /* acpu3 power down , */
+ IPC_MCU_INT_SRC_ACPU4_PD, /* acpu4:cluster1 core0 power down */
+ IPC_MCU_INT_SRC_ACPU5_PD, /* acpu5:cluster1 core1 power down */
+ IPC_MCU_INT_SRC_ACPU6_PD, /* acpu6:cluster1 core2 power down */
+ IPC_MCU_INT_SRC_ACPU7_PD, /* acpu7:cluster1 core3 power down */
+ IPC_MCU_INT_SRC_ACPU_HOTPLUG, /* acpu hotplug--no use, */
+ IPC_MCU_INT_SRC_MCU_AGT, /* mcu agent */
+ IPC_MCU_INT_SRC_HIFI_MEMSHARE_DDR_VOTE,
+ IPC_MCU_INT_SRC_HIFI_MEMSHARE_DDR_EXIT_VOTE,
+ IPC_MCU_INT_SRC_ACPU_DFS, /*ACPU DFS */
+
+ IPC_MCU_INT_SRC_END,
+
+ IPC_ACPU_INT_SRC_CCPU_MSG = 0,
+ IPC_ACPU_INT_SRC_HIFI_MSG = 1,
+ IPC_ACPU_INT_SRC_MCU_MSG = 2,
+ IPC_ACPU_INT_SRC_CCPU_NVIM = 3,
+ IPC_INT_DICC_USRDATA = 4,
+ IPC_INT_DICC_RELDATA = 5,
+ IPC_ACPU_INT_SRC_CCPU_ICC,
+ IPC_ACPU_INT_SRC_COMMON_END,
+ IPC_ACPU_INT_SRC_ICC_PRIVATE = IPC_ACPU_INT_SRC_COMMON_END,
+ IPC_ACPU_SRC_CCPU_DUMP,
+ IPC_ACPU_INT_SRC_MCPU,
+ IPC_ACPU_INT_SRC_MCPU_WDT,
+ IPC_ACPU_INT_MCU_SRC_DUMP,
+ IPC_ACPU_INT_SRC_CCPU_RESET_IDLE,
+ IPC_ACPU_INT_SRC_CCPU_RESET_SUCC,
+
+ IPC_ACPU_INT_SRC_CCPU_LOG,
+ IPC_ACPU_INT_SRC_MCU_FOR_TEST, /* test, m core to acore */
+ IPC_ACPU_INT_SRC_CCPU_TEST_ENABLE,
+ IPC_ACPU_INT_SRC_MCPU_USB_PME,
+ IPC_ACPU_INT_SRC_HIFI_PC_VOICE_RX_DATA, /* hifi->acore pc voice */
+ IPC_ACPU_INT_SRC_CCPU_PM_OM,
+
+ IPC_ACPU_INT_SRC_CCPU_IFC = IPC_ACPU_INT_SRC_COMMON_END,
+ IPC_ACPU_INT_SRC_MCU_IFC,
+ IPC_ACPU_INT_SRC_MCU_THERMAL_HIGH,
+ IPC_ACPU_INT_SRC_MCU_THERMAL_LOW,
+ IPC_ACPU_INI_SRC_MCU_TELE_MNTN_NOTIFY,
+ IPC_ACPU_INI_SRC_MCU_EXC_REBOOT,
+ IPC_ACPU_INT_SRC_CCPU_EXC_REBOOT,
+ IPC_ACPU_INT_SRC_CCPU_NORMAL_REBOOT,
+ IPC_ACPU_INT_SRC_MCU_DDR_EXC,
+
+ IPC_ACPU_INT_SRC_END,
+
+ IPC_HIFI_INT_SRC_ACPU_MSG = 0,
+ IPC_HIFI_INT_SRC_CCPU_MSG,
+ IPC_HIFI_INT_SRC_BBE_MSG,
+ IPC_HIFI_INT_SRC_MCU_MSG,
+ IPC_HIFI_INT_SRC_COMMON_END,
+
+ IPC_HIFI_INT_SRC_MCU_WAKE_DDR = IPC_HIFI_INT_SRC_COMMON_END,
+ IPC_HIFI_INT_SRC_MCU_IFC,
+
+ IPC_HIFI_INT_SRC_END,
+
+ IPC_INT_MSP_DSP_OM_MBX = 0,
+ IPC_INT_PS_DSP_PUB_MBX,
+ IPC_INT_PS_DSP_MAC_MBX,
+ IPC_INT_HIFI_DSP_MBX, /* HIFI->DSP */
+ IPC_BBE16_INT_SRC_HIFI_MSG,
+
+ IPC_BBE16_INT_SRC_END,
+
+ IPC_XDSP_INT_SRC_CCPU_1X_WAKE = 0,
+ IPC_XDSP_INT_SRC_CCPU_HRPD_WAKE,
+ IPC_XDSP_INT_SRC_CCPU_OM_MBX,
+ IPC_XDSP_INT_SRC_CCPU_PUB_MBX,
+
+ IPC_XDSP_INT_SRC_END,
+
+ IPC_INT_BUTTOM = 32,
+ } IPC_INT_LEV_E;
+
+ typedef enum tagIPC_SEM_ID_E {
+ IPC_SEM_MEM,
+ IPC_SEM_DICC,
+ IPC_SEM_EMMC,
+ IPC_SEM_SYNC,
+ IPC_SEM_SYSCTRL,
+ IPC_SEM_BBP,
+ IPC_SEM_RFILE_LOG,
+ IPC_SEM_NVIM,
+ IPC_SEM_EFUSE,
+ IPC_SEM_DPDT_CTRL_ANT,
+ IPC_SEM_BBPMASTER_0,
+ IPC_SEM_BBPMASTER_1,
+ IPC_SEM_BBPMASTER_2,
+ IPC_SEM_BBPMASTER_3,
+ IPC_SEM_BBPMASTER_5,
+ IPC_SEM_BBPMASTER_6,
+ IPC_SEM_BBPMASTER_7,
+ IPC_SEM_COMMON_END,
+
+ IPC_SEM_SPI0 = IPC_SEM_COMMON_END,
+ IPC_SEM_NV,
+ IPC_SEM_GPIO,
+ IPC_SEM_CLK,
+ IPC_SEM_PMU,
+ IPC_SEM_MTCMOS,
+ IPC_SEM_IPF_PWCTRL,
+ IPC_SEM_PMU_FPGA,
+ IPC_SEM_NV_CRC,
+ IPC_SEM_PM_OM_LOG,
+ IPC_SEM_MDRV_LOCK,
+ IPC_SEM_CDMA_DRX,
+ IPC_SEM_GU_SLEEP,
+ IPC_SEM2_IPC_TEST,
+
+ IPC_SEM_ICC = IPC_SEM_COMMON_END,
+ IPC_SEM_NAND,
+ IPC_SEM_TELE_MNTN,
+ IPC_SEM_MEDPLL_STATE,
+ IPC_SEM_SMP_CPU0,
+ IPC_SEM_SMP_CPU1,
+ IPC_SEM_SMP_CPU2,
+ IPC_SEM_SMP_CPU3,
+ IPC_SEM_CPUIDLE,
+ IPC_SEM_BBPPS,
+ IPC_SEM_HKADC,
+
+ IPC_SEM_END,
+
+ IPC_SEM_BUTTOM = 32
+ } IPC_SEM_ID_E;
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/drivers/hisi/mailbox/Kconfig b/drivers/hisi/mailbox/Kconfig
new file mode 100644
index 000000000000..a05201403947
--- /dev/null
+++ b/drivers/hisi/mailbox/Kconfig
@@ -0,0 +1,7 @@
+config HISILICON_PLATFORM_MAILBOX
+ bool "HISILICON PLATFORM MAILBOX"
+ default n
+
+if HISILICON_PLATFORM_MAILBOX
+source "drivers/hisi/mailbox/hisi_mailbox/Kconfig"
+endif
diff --git a/drivers/hisi/mailbox/Makefile b/drivers/hisi/mailbox/Makefile
new file mode 100644
index 000000000000..1b21b7f2b66f
--- /dev/null
+++ b/drivers/hisi/mailbox/Makefile
@@ -0,0 +1 @@
+obj-y += hisi_mailbox/
diff --git a/drivers/hisi/mailbox/hisi_mailbox/Kconfig b/drivers/hisi/mailbox/hisi_mailbox/Kconfig
new file mode 100644
index 000000000000..3e25be5f7a2d
--- /dev/null
+++ b/drivers/hisi/mailbox/hisi_mailbox/Kconfig
@@ -0,0 +1,44 @@
+comment "hisi ipc MailBox driver"
+config HISI_MAILBOX
+ bool "hisi ipc MailBox driver"
+ default n
+ help
+ hisi ipc MailBox driver
+
+comment "hisi ipc mailboxes debugfs user"
+config HISI_MAILBOX_DEBUGFS
+ bool "hisi ipc mailboxes debugfs user"
+ depends on (HISI_MAILBOX) && DEBUG_FS
+ default n
+ help
+ HiIPCV230 ipc mailboxes debugfs user
+
+comment "hisi ipc MailBox driver performance debug"
+config HISI_MAILBOX_PERFORMANCE_DEBUG
+ bool "hisi ipc MailBox driver performance debug"
+ depends on (HISI_MAILBOX)
+ default n
+ help
+ Print tts of sending task, if config is set y
+
+comment "hisi kernel API about remote processor communication"
+config HISI_RPROC
+ bool "hisi ipc kernel API"
+ default n
+ help
+ hisi kernel API about remote processor communication
+
+comment "hisi rproc kernel API debugfs"
+config HISI_RPROC_TESTFILE
+ bool "hisi rproc kernel API test file"
+ depends on HISI_RPROC
+ default n
+ help
+ debug for hisi_rproc
+
+comment "bsp_reset_core_notify to support modem compile"
+config BSP_RESET_CORE_NOTIFY
+ bool "bsp_reset_core_notify"
+ default n
+ help
+ to support modem compile
diff --git a/drivers/hisi/mailbox/hisi_mailbox/Makefile b/drivers/hisi/mailbox/hisi_mailbox/Makefile
new file mode 100644
index 000000000000..4a4233a73710
--- /dev/null
+++ b/drivers/hisi/mailbox/hisi_mailbox/Makefile
@@ -0,0 +1,6 @@
+obj-y +=hisi_mailbox.o
+obj-y +=hisi_rproc.o
+obj-$(CONFIG_HISI_RPROC_TESTFILE) +=hisi_rproc_test.o
+obj-$(CONFIG_HISI_MAILBOX) +=hisi_mailbox_dev.o
+
+
diff --git a/drivers/hisi/mailbox/hisi_mailbox/hisi_mailbox.c b/drivers/hisi/mailbox/hisi_mailbox/hisi_mailbox.c
new file mode 100644
index 000000000000..dfac019d8bc2
--- /dev/null
+++ b/drivers/hisi/mailbox/hisi_mailbox/hisi_mailbox.c
@@ -0,0 +1,1012 @@
+/*
+ * mailbox core driver
+ *
+ * Copyright (c) 2013- Hisilicon Technologies CO., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/semaphore.h>
+#include <linux/sched/rt.h>
+#include <linux/kthread.h>
+#include <linux/hisi/hisi_mailbox.h>
+
+#define MBOX_PR_ERR(fmt, args ...) \
+ ({ \
+ pr_err("%s(%d):" fmt "\n", \
+ MODULE_NAME, __LINE__, ##args); \
+ })
+#define MBOX_PR_INFO(fmt, args ...) \
+ ({ \
+ pr_info("%s(%d):" fmt "\n", \
+ MODULE_NAME, __LINE__, ##args); \
+ })
+#define MBOX_PR_DEBUG(fmt, args ...) \
+ ({ \
+ ; \
+ })
+
+#define MODULE_NAME "hisi_mailbox"
+
+/*#define HISI_MAILBOX_DBG */ /*open this only in every early phase*/
+
+/* debugfs tts for ipc performance */
+#define TASK_DEBUG_ON(tx_task) do {} while (0)
+#define TASK_DEBUG_OFF(tx_task) do {} while (0)
+#define START_TTS(tx_task) do {} while (0)
+#define SEND_TTS(tx_task) do {} while (0)
+#define RECEIVE_TTS(tx_task) do {} while (0)
+#define BH_TTS(tx_task) do {} while (0)
+#define COMPLETE_TTS(tx_task) do {} while (0)
+#define PRINT_TTS(tx_task) do {} while (0)
+
+#define TX_FIFO_CELL_SIZE (sizeof(struct hisi_mbox_task *))
+#define MAILBOX_MAX_TX_FIFO 256
+/* tx_thread warn level to bug_on when tx_thread is blocked by some reasons */
+#define TX_THREAD_BUFFER_WARN_LEVEL (156 * TX_FIFO_CELL_SIZE)
+
+enum { NOCOMPLETION = 0, COMPLETING, COMPLETED };
+enum { TX_TASK = 0, RX_TASK };
+
+spinlock_t g_task_buffer_lock;
+struct hisi_mbox_task *g_TxTaskBuffer;
+/*use the g_ContinuousFailCnt to control the Continuous ipc timeout times which may overflow the kmesg log */
+int g_ContinuousFailCnt;
+/* mailbox device resource pool */
+static LIST_HEAD(mdevices);
+
+
+struct hisi_mbox_task *hisi_mbox_node_alloc(void)
+{
+ int index = 0;
+ struct hisi_mbox_task *ptask = g_TxTaskBuffer;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&g_task_buffer_lock, flags);
+ for (index = 0; index < TX_TASK_DDR_NODE_NUM; index++) {
+ if (TX_TASK_DDR_NODE_AVA == ptask->tx_buffer[0])
+ break;
+ ptask++;
+ }
+
+ if (likely(TX_TASK_DDR_NODE_NUM != index))
+ ptask->tx_buffer[0] = TX_TASK_DDR_NODE_OPY; /*set the node occupied */
+ else
+ ptask = NULL;
+
+ spin_unlock_irqrestore(&g_task_buffer_lock, flags);
+
+ return ptask;
+}
+
+void hisi_mbox_task_free(struct hisi_mbox_task **tx_task)
+{
+ unsigned long flags = 0;
+
+ if ((NULL == tx_task) || (NULL == *tx_task)) {
+ MBOX_PR_ERR("null pointer\n");
+ return;
+ }
+ spin_lock_irqsave(&g_task_buffer_lock, flags);
+ /*use the tx_buffer[0] as the available flag */
+ (*tx_task)->tx_buffer[0] = TX_TASK_DDR_NODE_AVA;
+ spin_unlock_irqrestore(&g_task_buffer_lock, flags);
+
+ return;
+}
+
+EXPORT_SYMBOL(hisi_mbox_task_free);
+
+struct hisi_mbox_task *hisi_mbox_task_alloc(struct hisi_mbox *mbox,
+ mbox_msg_t *tx_buffer, mbox_msg_len_t tx_buffer_len, int need_auto_ack)
+{
+ struct hisi_mbox_task *tx_task = NULL;
+
+ if (!mbox || !mbox->tx || !tx_buffer) {
+ if (!mbox)
+ MBOX_PR_ERR("null pointer mbox!\n");
+ else
+ MBOX_PR_ERR("mailbox-%d no tx ability or no tx_buffer\n", mbox->mdev_index);
+ goto out;
+ }
+ tx_task = hisi_mbox_node_alloc();
+ if (!tx_task) {
+ MBOX_PR_ERR("tx task no mem\n");
+ goto out;
+ }
+ memcpy((void *)tx_task->tx_buffer, (void *)tx_buffer, tx_buffer_len * (sizeof(mbox_msg_t)));
+ tx_task->tx_buffer_len = tx_buffer_len;
+ tx_task->need_auto_ack = need_auto_ack;
+
+ if (MBOX_IS_DEBUG_ON(mbox))
+ TASK_DEBUG_ON(tx_task);
+ else
+ TASK_DEBUG_OFF(tx_task);
+out:
+ return tx_task;
+}
+
+EXPORT_SYMBOL(hisi_mbox_task_alloc);
+
+static inline int set_status(struct hisi_mbox_device *mdev, int status)
+{
+ int ret = 0;
+
+ spin_lock(&mdev->status_lock);
+ if ((MDEV_DEACTIVATED & mdev->status)) {
+ spin_unlock(&mdev->status_lock);
+ MBOX_PR_INFO("an unexpected ipc caused by %s\n", mdev->name);
+ /* WARN_ON(1); */
+ ret = -ENODEV;
+ goto out;
+ } else if ((MDEV_DEACTIVATED & status)) {
+ mdev->status |= status;
+
+ while ((MDEV_SYNC_SENDING & mdev->status) || (MDEV_ASYNC_ENQUEUE & mdev->status)) {
+ spin_unlock(&mdev->status_lock);
+ msleep(5);
+ spin_lock(&mdev->status_lock);
+ }
+ } else {
+ mdev->status |= status;
+ }
+
+ spin_unlock(&mdev->status_lock);
+out:
+ return ret;
+}
+
+static inline void clr_status(struct hisi_mbox_device *mdev, int status)
+{
+ spin_lock(&mdev->status_lock);
+ mdev->status &= ~status;
+ spin_unlock(&mdev->status_lock);
+ return;
+}
+
+static int hisi_mbox_task_send_async(struct hisi_mbox_device *mdev, struct hisi_mbox_task *tx_task)
+{
+ int ret = 0;
+
+ mdev->ops->ensure_channel(mdev);
+
+ mdev->tx_task = tx_task;
+ ret = mdev->ops->send(mdev, tx_task->tx_buffer, tx_task->tx_buffer_len, tx_task->need_auto_ack);
+ if (ret) {
+ MBOX_PR_ERR("mdev %s can not be sent\n", mdev->name);
+ }
+ mdev->tx_task = NULL;
+ return ret;
+}
+
+static int hisi_mbox_task_send_sync(struct hisi_mbox_device *mdev, struct hisi_mbox_task *tx_task)
+{
+ unsigned long tx_timeout = 0;
+ long timeout;
+ int ret = 0;
+ mbox_msg_t *rx_buffer = NULL;
+ mbox_msg_len_t rx_len = 0;
+ int wait = 0;
+ int need_irq_enable = 0;
+ int receipted = 0;
+ unsigned long flags;
+ unsigned int mdev_timeout = 0;
+ mdev_timeout = mdev->ops->get_timeout(mdev);
+
+ SEND_TTS(tx_task);
+
+ mdev->ops->ensure_channel(mdev);
+
+ mdev->complete.done = 0;
+ mdev->completed = NOCOMPLETION;
+ mdev->tx_task = tx_task;
+
+ ret = mdev->ops->send(mdev, tx_task->tx_buffer, tx_task->tx_buffer_len, tx_task->need_auto_ack);
+ if (ret) {
+ mdev->tx_task = NULL;
+ MBOX_PR_ERR("mdev %s can not be sent\n", mdev->name);
+ goto out;
+ }
+ if (AUTO_ACK == tx_task->need_auto_ack) {
+ tx_timeout = msecs_to_jiffies(mdev_timeout);
+ } else if (MANUAL_ACK == tx_task->need_auto_ack) {
+ tx_timeout = msecs_to_jiffies(mdev_timeout);
+ } else {
+ MBOX_PR_ERR("%s invalid ack mode.\n", mdev->name);
+ goto refresh;
+ }
+
+ timeout = wait_for_completion_timeout(&mdev->complete, tx_timeout);
+ if (unlikely(0 == timeout)) {
+ g_ContinuousFailCnt++;
+ if (CONTINUOUS_FAIL_JUDGE) {
+ MBOX_PR_ERR("\n %s ipc timeout...\n" "<INFO> MSG[0] : 0x%08x\n" "<INFO> MSG[1] : 0x%08x\n" "<INFO> fifo : %d\n",
+ mdev->name, tx_task->tx_buffer[0], tx_task->tx_buffer[1], (int)(kfifo_len(&mdev->fifo) / TX_FIFO_CELL_SIZE));
+
+ if (mdev->ops->status)
+ mdev->ops->status(mdev);
+ }
+ /* Synchronization for isr. */
+ spin_lock_irqsave(&mdev->complete_lock, flags);
+
+ switch (mdev->completed) {
+ case NOCOMPLETION:
+ if (CONTINUOUS_FAIL_JUDGE)
+ MBOX_PR_ERR("NOCOMPLETION.\n");
+ mdev->completed = COMPLETED;
+ break;
+
+ case COMPLETING:
+ if (CONTINUOUS_FAIL_JUDGE)
+ MBOX_PR_ERR("COMPLETING.\n");
+ /*
+ * Wait for ACK reception in behind half in 50ms.
+ * Both ACK reception and irq restore will be handled
+ * here instead of in behind half, in the case of
+ * tasklet jam in irq-affinity core.
+ */
+ wait = 10;
+ do {
+ spin_unlock_irqrestore(&mdev->complete_lock, flags);
+ msleep(5);
+ spin_lock_irqsave(&mdev->complete_lock, flags);
+ } while (mdev->completed != COMPLETED && wait--);
+
+ /* Tasklet jam */
+ if (mdev->completed != COMPLETED) {
+ mdev->completed = COMPLETED;
+ need_irq_enable = 1;
+ break;
+ }
+
+ case COMPLETED:
+ if (CONTINUOUS_FAIL_JUDGE)
+ MBOX_PR_ERR("COMPLETED.\n");
+ receipted = 1;
+
+ default:
+ goto unlock;
+ }
+
+ /* Handle ack & irq. */
+ if (mdev->ops->is_stm(mdev, ACK_STATUS) || need_irq_enable) {
+ rx_len = mdev->ops->recv(mdev, &rx_buffer);
+ tx_task->ack_buffer = rx_buffer;
+ tx_task->ack_buffer_len = rx_len;
+ receipted = 1;
+
+ if (need_irq_enable)
+ mdev->ops->enable_irq(mdev);
+ } else {
+ /* ACK lost */
+ ret = -ETIMEOUT;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&mdev->complete_lock, flags);
+
+ if (receipted && need_irq_enable) {
+ if (CONTINUOUS_FAIL_JUDGE)
+ MBOX_PR_ERR("reason: TASKLET jam.\n");
+ } else if (receipted && !need_irq_enable) {
+ if (CONTINUOUS_FAIL_JUDGE)
+ MBOX_PR_ERR("reason: ISR jam.\n");
+ } else if (!receipted) {
+ if (CONTINUOUS_FAIL_JUDGE)
+ MBOX_PR_ERR("reason: ACK lost.\n");
+ }
+ } else {
+ /*once success, clear the g_ContinuousFailCnt */
+ g_ContinuousFailCnt = 0;
+ }
+
+refresh:
+ mdev->ops->refresh(mdev);
+out:
+ /* completion */
+ MBOX_PR_DEBUG("mdev %s completion\n", mdev->name);
+ mdev->tx_task = NULL;
+ COMPLETE_TTS(tx_task);
+
+ return ret;
+}
+
+int hisi_mbox_msg_send_sync(struct hisi_mbox *mbox,
+ mbox_msg_t *tx_buffer, mbox_msg_len_t tx_buffer_len,
+ int need_auto_ack, mbox_msg_t *ack_buffer, mbox_msg_len_t ack_buffer_len)
+{
+ struct hisi_mbox_device *mdev;
+ struct hisi_mbox_task tx_task;
+ struct hisi_mbox_task *p_tx_task = NULL;
+ int ret = 0;
+
+ if (!mbox || !mbox->tx || !tx_buffer) {
+ if (!mbox)
+ MBOX_PR_ERR("null pointer mbox!\n");
+ else
+ MBOX_PR_ERR("mailbox-%d no tx ability or no tx_buffer\n", mbox->mdev_index);
+ ret = -EINVAL;
+ goto out;
+ }
+ memcpy((void *)tx_task.tx_buffer, (void *)tx_buffer, tx_buffer_len * (sizeof(mbox_msg_t)));
+ tx_task.tx_buffer_len = tx_buffer_len;
+ tx_task.need_auto_ack = need_auto_ack;
+
+ mdev = mbox->tx;
+ p_tx_task = &tx_task;
+
+ START_TTS(p_tx_task);
+
+ /* SYNC_SENDING start */
+ ret = set_status(mdev, MDEV_SYNC_SENDING);
+ if (ret) {
+ MBOX_PR_ERR("MSG{0x%08x, 0x%08x}\n", tx_task.tx_buffer[0], tx_task.tx_buffer[1]);
+ goto out;
+ }
+
+ /* send */
+ mutex_lock(&mdev->dev_lock);
+ ret = hisi_mbox_task_send_sync(mdev, &tx_task);
+ if (!ret && ack_buffer) {
+ memcpy((void *)ack_buffer, (void *)tx_task.ack_buffer, sizeof(mbox_msg_t) / sizeof(u8) * ack_buffer_len);
+ }
+
+ PRINT_TTS(p_tx_task);
+ mutex_unlock(&mdev->dev_lock);
+
+ /* SYNC_SENDING end */
+ clr_status(mdev, MDEV_SYNC_SENDING);
+
+out:
+ return ret;
+}
+
+EXPORT_SYMBOL(hisi_mbox_msg_send_sync);
+
+int hisi_mbox_msg_send_async(struct hisi_mbox *mbox, struct hisi_mbox_task *tx_task)
+{
+ struct hisi_mbox_device *mdev = NULL;
+ int ret = 0;
+ unsigned long flags;
+ unsigned int hardware_board_type;
+ unsigned int fifo_size;
+
+ if (!tx_task || !mbox || !mbox->tx) {
+ MBOX_PR_ERR("invalid parameters\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mdev = mbox->tx;
+
+ START_TTS(tx_task);
+
+ /* ASYNC_ENQUEUE start */
+ ret = set_status(mdev, MDEV_ASYNC_ENQUEUE);
+ if (ret) {
+ MBOX_PR_ERR("MSG{0x%08x, 0x%08x}\n", tx_task->tx_buffer[0], tx_task->tx_buffer[1]);
+ goto out;
+ }
+ hardware_board_type = mdev->ops->read_board_type(mdev);
+ fifo_size = mdev->ops->get_fifo_size(mdev);
+ /* enqueue */
+ spin_lock_irqsave(&mdev->fifo_lock, flags);
+ if (kfifo_avail(&mdev->fifo) < TX_FIFO_CELL_SIZE) {
+ spin_unlock_irqrestore(&mdev->fifo_lock, flags);
+ ret = -ENOMEM;
+ goto clearstatus;
+ }
+ /* To bug_on that tx_thread was blocked by other reasons on UDP & FPGA */
+ if ((IPC_DEFAULT_BOARD_TYPE != hardware_board_type) && (MAILBOX_MAX_TX_FIFO == fifo_size)) {
+ if (kfifo_avail(&mdev->fifo) < TX_THREAD_BUFFER_WARN_LEVEL) {
+ MBOX_PR_ERR("IPC send tx_thread is blocked by some unknown reason");
+ BUG_ON(1);
+ }
+ }
+
+ kfifo_in(&mdev->fifo, &tx_task, TX_FIFO_CELL_SIZE);
+
+ spin_unlock_irqrestore(&mdev->fifo_lock, flags);
+
+ wake_up_interruptible(&mdev->tx_wait);
+
+clearstatus:
+ /* ASYNC_ENQUEUE end */
+ clr_status(mdev, MDEV_ASYNC_ENQUEUE);
+out:
+ return ret;
+}
+
+EXPORT_SYMBOL(hisi_mbox_msg_send_async);
+
+static struct hisi_mbox_task *hisi_mbox_dequeue_task(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_task *tx_task = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->fifo_lock, flags);
+ if (kfifo_len(&mdev->fifo) >= TX_FIFO_CELL_SIZE) {
+ if (!kfifo_out(&mdev->fifo, &tx_task, TX_FIFO_CELL_SIZE))
+ tx_task = NULL;
+ }
+
+ spin_unlock_irqrestore(&mdev->fifo_lock, flags);
+ return tx_task;
+}
+
+void hisi_mbox_empty_task(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_task *tx_task = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->fifo_lock, flags);
+ while (kfifo_len(&mdev->fifo) >= TX_FIFO_CELL_SIZE) {
+ if (kfifo_out(&mdev->fifo, &tx_task, TX_FIFO_CELL_SIZE)) {
+ hisi_mbox_task_free(&tx_task);
+ }
+ }
+ spin_unlock_irqrestore(&mdev->fifo_lock, flags);
+
+ mutex_lock(&mdev->dev_lock);
+ /*do nothing here just to wait for the already-kfifo-out's tx_task finish */
+ mutex_unlock(&mdev->dev_lock);
+}
+
+EXPORT_SYMBOL(hisi_mbox_empty_task);
+
+static int hisi_mbox_tx_thread(void *context)
+{
+ struct hisi_mbox_device *mdev = (struct hisi_mbox_device *)context;
+ struct hisi_mbox_task *tx_task = NULL;
+ int ret = 0;
+
+ while (!kthread_should_stop()) {
+ ret = wait_event_interruptible(mdev->tx_wait, (kfifo_len(&mdev->fifo) >= TX_FIFO_CELL_SIZE));
+ if (unlikely(ret)) {
+ printk("hisi_mbox_tx_thread wait event failed\n");
+ continue;
+ }
+
+ mutex_lock(&mdev->dev_lock);
+ /*kick out the async send request from mdev's kfifo one by one and send it out */
+ while ((tx_task = hisi_mbox_dequeue_task(mdev))) {
+ ret = hisi_mbox_task_send_async(mdev, tx_task);
+ PRINT_TTS(tx_task);
+ hisi_mbox_task_free(&tx_task);
+ /* current task unlinked */
+ mdev->tx_task = NULL;
+ }
+
+ mutex_unlock(&mdev->dev_lock);
+ }
+ return 0;
+}
+
+static void hisi_mbox_rx_bh(unsigned long context)
+{
+ struct hisi_mbox_device *mdev = (struct hisi_mbox_device *)context;
+ mbox_msg_t *rx_buffer = NULL;
+ mbox_msg_len_t rx_len = 0;
+ unsigned long flags;
+
+ MBOX_PR_DEBUG("mdev %s rx enter\n", mdev->name);
+
+ /*
+ * check msg type
+ * - if ack interrupt occur,
+ * an entire ipc have completed, and a completion should be excuted;
+ * - if msg interrupt occur,
+ * we need broadcast msgs to useres only after submit an ack.
+ */
+ switch (mdev->cur_task) {
+ case TX_TASK:
+ if (unlikely(NULL == mdev->tx_task)) {
+ /* the tasklet is blocked by after scheduled, after timeout for TASKLET jam,
+ * the tx_task is set to NULL,
+ * but then this tasklet has it's turn, then abort.
+ */
+ mdev->ops->enable_irq(mdev);
+ return;
+ }
+ rx_len = mdev->ops->recv(mdev, &rx_buffer);
+ spin_lock_irqsave(&mdev->complete_lock, flags);
+ /* sometimes when TASKLET jam, rx_bh may be sched before tx_task == NULL,
+ * and this may make if (unlikely(NULL == mdev->tx_task)) judeg failed
+ */
+ if (unlikely(COMPLETED == mdev->completed)) {
+ spin_unlock_irqrestore(&mdev->complete_lock, flags);
+ return;
+ }
+
+ BH_TTS(mdev->tx_task);
+ mdev->tx_task->ack_buffer = rx_buffer;
+ mdev->tx_task->ack_buffer_len = rx_len;
+ mdev->completed = COMPLETED;
+ complete(&mdev->complete);
+ spin_unlock_irqrestore(&mdev->complete_lock, flags);
+ break;
+
+ case RX_TASK:
+ rx_len = mdev->ops->recv(mdev, &rx_buffer);
+ atomic_notifier_call_chain(&mdev->notifier, rx_len, (void *)rx_buffer);
+ mdev->ops->ack(mdev, NULL, 0);
+ break;
+
+ default:
+ break;
+ }
+
+ mdev->ops->enable_irq(mdev);
+ MBOX_PR_DEBUG("mdev %s rx leave\n", mdev->name);
+
+ return;
+}
+
+static int hisi_mbox_interrupt(int irq, void *p)
+{
+ struct hisi_mbox_device *mdev = (struct hisi_mbox_device *)p;
+ mdev = mdev->ops->irq_to_mdev(mdev, &mdevices, irq);
+ if (!mdev) {
+ MBOX_PR_ERR("an unexpected inttrupt %d occured\n", irq);
+ return IRQ_NONE;
+ }
+
+ if (!mdev->configured) {
+ MBOX_PR_ERR("mdev %s has not startup yet\n", mdev->name);
+ return IRQ_NONE;
+ }
+
+ /* ipc */
+ if (mdev->ops->is_stm(mdev, DESTINATION_STATUS)) {
+ MBOX_PR_DEBUG("mdev %s ipc\n", mdev->name);
+ mdev->cur_task = RX_TASK;
+ mdev->cur_irq = irq;
+ mdev->ops->disable_irq(mdev);
+ tasklet_schedule(&mdev->rx_bh);
+ return IRQ_HANDLED;
+ }
+
+ /* ack */
+ spin_lock(&mdev->complete_lock);
+ if (mdev->tx_task && mdev->ops->is_stm(mdev, ACK_STATUS)) {
+ RECEIVE_TTS(mdev->tx_task);
+
+ if (unlikely(COMPLETED == mdev->completed)) {
+ spin_unlock(&mdev->complete_lock);
+ /*need to clear the ack if the ack is reached after 300ms' timeout, otherwise the ack will trigger all the time */
+ mdev->ops->clr_ack(mdev);
+ MBOX_PR_ERR("%s(%d) has been handled" "caused of current IPC timeout(ISR).\n", mdev->name, irq);
+ return IRQ_HANDLED;
+ } else {
+ MBOX_PR_DEBUG("mdev %s ack\n", mdev->name);
+ mdev->completed = COMPLETING;
+ mdev->cur_task = TX_TASK;
+ mdev->cur_irq = irq;
+ mdev->ops->disable_irq(mdev);
+ tasklet_schedule(&mdev->rx_bh);
+ }
+ }
+ spin_unlock(&mdev->complete_lock);
+
+ MBOX_PR_DEBUG("mdev %s interrupt leave, irq %d\n", mdev->name, irq);
+ return IRQ_HANDLED;
+}
+
+static void hisi_mbox_shutdown(struct hisi_mbox_device *mdev, mbox_mail_type_t mail_type)
+{
+ mutex_lock(&mdev->dev_lock);
+ if (!--mdev->configured) {
+ MBOX_PR_DEBUG("%s, %s shutdown.\n", MODULE_NAME, mdev->name);
+
+ mdev->ops->free_irq(mdev, (void *)mdev);
+ mdev->ops->shutdown(mdev);
+
+ switch (mail_type) {
+ case TX_MAIL:
+ kthread_stop(mdev->tx_kthread);
+ kfifo_free(&mdev->fifo);
+ case RX_MAIL:
+ tasklet_kill(&mdev->rx_bh);
+ break;
+ default:
+ break;
+ }
+ }
+
+ mutex_unlock(&mdev->dev_lock);
+ return;
+}
+
+static void hisi_mbox_free(struct hisi_mbox **mbox)
+{
+ struct hisi_mbox *_mbox = *mbox;
+
+ kfree(_mbox);
+ _mbox = NULL;
+
+ *mbox = _mbox;
+ return;
+}
+
+void hisi_mbox_put(struct hisi_mbox **mbox)
+{
+ struct hisi_mbox *_mbox = NULL;
+ struct hisi_mbox_device *mdev[MAIL_TYPE_MAX] = { NULL };
+ int i;
+
+ if (!mbox) {
+ MBOX_PR_ERR("null pointer\n");
+ return;
+ }
+
+ _mbox = *mbox;
+ if (!_mbox) {
+ MBOX_PR_ERR("null pointer\n");
+ return;
+ }
+
+ /* tx & rx mailbox devices deinit */
+ mdev[TX_MAIL] = _mbox->tx;
+ mdev[RX_MAIL] = _mbox->rx;
+ for (i = TX_MAIL; i < MAIL_TYPE_MAX; i++) {
+ if (mdev[i])
+ hisi_mbox_shutdown(mdev[i], i);
+ }
+
+ if (mdev[RX_MAIL] && _mbox->nb) {
+ atomic_notifier_chain_unregister(&mdev[RX_MAIL]->notifier, _mbox->nb);
+ }
+
+ hisi_mbox_free(mbox);
+ return;
+}
+
+EXPORT_SYMBOL(hisi_mbox_put);
+
+static int hisi_mbox_startup(struct hisi_mbox_device *mdev, mbox_mail_type_t mail_type)
+{
+ int ret = 0;
+ unsigned int tx_buff = 0;
+ unsigned int sched_policy = 0;
+ mutex_lock(&mdev->dev_lock);
+ if (!mdev->configured++) {
+ switch (mail_type) {
+ case TX_MAIL:
+ tx_buff = mdev->ops->get_fifo_size(mdev) * TX_FIFO_CELL_SIZE;
+ MBOX_PR_DEBUG("tx mdev fifo_size is %d\n", tx_buff);
+ if (kfifo_alloc(&mdev->fifo, tx_buff, GFP_KERNEL)) {
+ MBOX_PR_ERR("tx mdev %s alloc kfifo failed\n", mdev->name);
+ ret = -ENOMEM;
+ goto deconfig;
+ }
+ init_waitqueue_head(&mdev->tx_wait);
+ /*create the async tx thread */
+ mdev->tx_kthread = kthread_create(hisi_mbox_tx_thread, (void *)mdev, "%s", mdev->name);
+ if (unlikely(IS_ERR(mdev->tx_kthread))) {
+ MBOX_PR_ERR("create kthread tx_kthread failed!\n");
+ ret = -EINVAL;
+ kfifo_free(&mdev->fifo);
+ goto deconfig;
+ } else {
+ /*
+ * mailbox-13 needs to set the sched_priority adjust to the max 99,
+ * and set the policy adjust to the SCHED_FIFO, to solve the schedule nervous problem
+ */
+ struct sched_param param;
+ param.sched_priority = (MAX_RT_PRIO - mdev->ops->get_sched_priority(mdev));
+ sched_policy = mdev->ops->get_sched_policy(mdev);
+ MBOX_PR_DEBUG("tx mdev sched_priority is %d \tsched_policy is %d\n",
+ param.sched_priority, sched_policy);
+ (void)sched_setscheduler(mdev->tx_kthread, sched_policy, &param);
+ wake_up_process(mdev->tx_kthread);
+ }
+ /* tx mdev owns rx tasklet as well, for ipc ack msg. */
+ case RX_MAIL:
+ tasklet_init(&mdev->rx_bh, hisi_mbox_rx_bh, (unsigned long)mdev);
+ break;
+ default:
+ ret = -EINVAL;
+ goto deconfig;
+ }
+
+ ret = mdev->ops->startup(mdev);
+ if (ret) {
+ MBOX_PR_ERR("mdev %s startup failed\n", mdev->name);
+ ret = -ENODEV;
+ goto deinit_work;
+ }
+
+ ret = mdev->ops->request_irq(mdev, (irq_handler_t) hisi_mbox_interrupt, (void *)mdev);
+ if (ret) {
+ MBOX_PR_ERR("mdev %s request irq failed\n", mdev->name);
+ ret = -ENODEV;
+ goto shutdown;
+ }
+ }
+
+ mutex_unlock(&mdev->dev_lock);
+ return ret;
+
+shutdown:
+ mdev->ops->shutdown(mdev);
+deinit_work:
+ switch (mail_type) {
+ case TX_MAIL:
+ /*flush_work(&mdev->tx_work);*/
+ kfifo_free(&mdev->fifo);
+ case RX_MAIL:
+ tasklet_kill(&mdev->rx_bh);
+ break;
+ default:
+ break;
+ }
+deconfig:
+ mdev->configured--;
+ mutex_unlock(&mdev->dev_lock);
+ return ret;
+}
+
+static struct hisi_mbox *hisi_mbox_alloc(struct hisi_mbox_device *tx_mdev, struct hisi_mbox_device *rx_mdev, int mdev_index)
+{
+ struct hisi_mbox *mbox = NULL;
+ mbox = kmalloc(sizeof(*mbox), GFP_KERNEL);
+ if (!mbox) {
+ MBOX_PR_ERR("no memory for mbox mailbox<%d>\n", mdev_index);
+ goto out;
+ }
+
+ mbox->mdev_index = mdev_index;
+ mbox->tx = tx_mdev;
+ mbox->rx = rx_mdev;
+ mbox->nb = NULL;
+
+out:
+ return mbox;
+}
+
+/*
+ * search a certain mdev,
+ * which could access the remote proccessor, "rp_name", in the list.
+ */
+static struct hisi_mbox_device *hisi_mbox_device_get(struct list_head *list, mbox_mail_type_t mtype, int mdev_index)
+{
+ struct hisi_mbox_device *mdev = NULL;
+ struct hisi_mbox_device *_mdev = NULL;
+
+ list_for_each_entry(_mdev, list, node) {
+ /*
+ * though remote processor of mailboxes could be uncertain,
+ * when mailboxes prepare for use, it won't be an uncertain one
+ * or an unaccessible one, when a tx task of mailbox is alloced.
+ *
+ * the regulation is compatible for some mailboxes, whose remote
+ * processor could not be assigned, until mailboxes is used
+ * in an ipc.
+ */
+ if (_mdev->ops->check(_mdev, mtype, mdev_index) != RPUNACCESSIBLE) {
+ mdev = _mdev;
+ break;
+ }
+ }
+
+ return mdev;
+}
+
+struct hisi_mbox *hisi_mbox_get(int mdev_index, struct notifier_block *nb)
+{
+ struct hisi_mbox *mbox = NULL;
+ struct list_head *list = &mdevices;
+ struct hisi_mbox_device *mdev[MAIL_TYPE_MAX] = { NULL };
+ int i;
+
+ if (list_empty(list)) {
+ MBOX_PR_ERR("mailboxes not ready\n");
+ goto out;
+ }
+
+ mdev[TX_MAIL] = hisi_mbox_device_get(list, TX_MAIL, mdev_index);
+ mdev[RX_MAIL] = hisi_mbox_device_get(list, RX_MAIL, mdev_index);
+ if ((!mdev[TX_MAIL]) && (!mdev[RX_MAIL])) {
+ /*MBOX_PR_ERR("neither tx nor rx mboxes were gotten, may get later\n");*/
+ goto out;
+ }
+
+ for (i = TX_MAIL; i < MAIL_TYPE_MAX; i++) {
+ if ((RX_MAIL == i) && mdev[i] && nb)
+ atomic_notifier_chain_register(&mdev[i]->notifier, nb);
+
+ if (mdev[i] && hisi_mbox_startup(mdev[i], i)) {
+ MBOX_PR_ERR("%s mdev %s startup failed\n", ((i == TX_MAIL) ? "tx" : "rx"), mdev[i]->name);
+ goto shutdown;
+ }
+ }
+
+ mbox = hisi_mbox_alloc(mdev[TX_MAIL], mdev[RX_MAIL], mdev_index);
+ if (!mbox) {
+ MBOX_PR_ERR("failed to alloc mbox\n");
+ goto shutdown;
+ }
+ mbox->nb = nb;
+ MBOX_DEBUG_OFF(mbox);
+
+ return mbox;
+
+shutdown:
+ if ((RX_MAIL == i) && nb)
+ atomic_notifier_chain_unregister(&mdev[i]->notifier, nb);
+ while (i--) {
+ if (mdev[i])
+ hisi_mbox_shutdown(mdev[i], i);
+ }
+out:
+ return mbox;
+}
+
+EXPORT_SYMBOL(hisi_mbox_get);
+
+void hisi_mbox_device_deactivate(struct hisi_mbox_device **list)
+{
+ struct hisi_mbox_device *mdev = NULL;
+ struct hisi_mbox_task *tx_task = NULL;
+ int i, ret;
+
+ for (i = 0; (mdev = list[i]); i++) {
+ (void)set_status(mdev, MDEV_DEACTIVATED);
+
+ mutex_lock(&mdev->dev_lock);
+ if (!mdev->configured) {
+ mutex_unlock(&mdev->dev_lock);
+ continue;
+ }
+ mutex_unlock(&mdev->dev_lock);
+
+ /* flush tx work & tx task list synchronously */
+ /*flush_work(&mdev->tx_work);*/
+
+ mutex_lock(&mdev->dev_lock);
+ while ((tx_task = hisi_mbox_dequeue_task(mdev))) {
+ ret = hisi_mbox_task_send_sync(mdev, tx_task);
+ /* current task unlinked */
+ mdev->tx_task = NULL;
+ }
+ mutex_unlock(&mdev->dev_lock);
+ }
+
+ return;
+}
+
+EXPORT_SYMBOL(hisi_mbox_device_deactivate);
+
+void hisi_mbox_device_activate(struct hisi_mbox_device **list)
+{
+ struct hisi_mbox_device *mdev = NULL;
+ int i;
+
+ for (i = 0; (mdev = list[i]); i++)
+ clr_status(mdev, MDEV_DEACTIVATED);
+
+ return;
+}
+
+EXPORT_SYMBOL(hisi_mbox_device_activate);
+
+static struct class *hisi_mbox_class;
+
+int hisi_mbox_device_unregister(struct hisi_mbox_device **list)
+{
+ struct hisi_mbox_device *mdev = NULL;
+ int i;
+
+ if (!list) {
+ MBOX_PR_ERR("no mboxes registered\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; (mdev = list[i]); i++) {
+ mutex_destroy(&mdev->dev_lock);
+ device_destroy(hisi_mbox_class, (dev_t) i);
+ list_del(&mdev->node);
+ }
+
+ list = NULL;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(hisi_mbox_device_unregister);
+
+static int hisi_mbox_device_is_valid(struct hisi_mbox_device *mdev)
+{
+ if (WARN_ON(!mdev->ops->startup || !mdev->ops->shutdown || !mdev->ops->check || !mdev->ops->recv || !mdev->ops->send || !mdev->ops->refresh || !mdev->ops->request_irq || !mdev->ops->free_irq || !mdev->ops->irq_to_mdev || !mdev->ops->is_stm))
+ return 0;
+
+ return 1;
+}
+
+int hisi_mbox_device_register(struct device *parent, struct hisi_mbox_device **list)
+{
+ struct hisi_mbox_device *mdev = NULL;
+ int ret, i;
+
+ if (!list) {
+ MBOX_PR_ERR("null pointer\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; (mdev = list[i]); i++) {
+ if (!hisi_mbox_device_is_valid(mdev)) {
+ MBOX_PR_ERR("invalid mdev\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ mdev->dev = device_create(hisi_mbox_class, parent, (dev_t) i, mdev, "%s", mdev->name);
+ if (IS_ERR(mdev->dev)) {
+ MBOX_PR_ERR("mdev %s can not create device\n", mdev->name);
+ ret = PTR_ERR(mdev->dev);
+ goto err_out;
+ }
+
+ spin_lock_init(&mdev->fifo_lock);
+ mdev->status = 0;
+ spin_lock_init(&mdev->status_lock);
+ spin_lock_init(&mdev->complete_lock);
+ mutex_init(&mdev->dev_lock);
+
+ init_completion(&mdev->complete);
+
+ ATOMIC_INIT_NOTIFIER_HEAD(&mdev->notifier);
+ list_add_tail(&mdev->node, &mdevices);
+ }
+
+ return 0;
+
+err_out:
+ while (i--) {
+ mdev = list[i];
+ list_del(&mdev->node);
+ mutex_destroy(&mdev->dev_lock);
+ device_destroy(hisi_mbox_class, (dev_t) i);
+ }
+
+ return ret;
+}
+
+EXPORT_SYMBOL(hisi_mbox_device_register);
+
+static int __init hisi_mbox_init(void)
+{
+ pr_debug("%s: init\n", MODULE_NAME);
+
+ hisi_mbox_class = class_create(THIS_MODULE, "hisi-mailbox");
+ if (IS_ERR(hisi_mbox_class))
+ return PTR_ERR(hisi_mbox_class);
+
+ spin_lock_init(&g_task_buffer_lock);
+ return 0;
+}
+
+postcore_initcall(hisi_mbox_init);
+
+static void __exit hisi_mbox_exit(void)
+{
+ if (hisi_mbox_class)
+ class_destroy(hisi_mbox_class);
+
+ return;
+}
+
+module_exit(hisi_mbox_exit);
+
+MODULE_DESCRIPTION("HS mailbox interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hisi/mailbox/hisi_mailbox/hisi_mailbox_dev.c b/drivers/hisi/mailbox/hisi_mailbox/hisi_mailbox_dev.c
new file mode 100644
index 000000000000..3ae6a60b584e
--- /dev/null
+++ b/drivers/hisi/mailbox/hisi_mailbox/hisi_mailbox_dev.c
@@ -0,0 +1,1304 @@
+/*
+ * hi3xxx mailbox device driver
+ *
+ * Copyright (c) 2013-2014 Hisilicon Technologies CO., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/mod_devicetable.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hisi/hisi_mailbox.h>
+#include <linux/hisi/hisi_irq_affinity.h>
+#include <linux/kern_levels.h>
+
+#define IPCBITMASK(n) (1 << (n))
+#define IPCMBxSOURCE(mdev) ((mdev) << 6)
+#define IPCMBxDSET(mdev) (((mdev) << 6) + 0x04)
+#define IPCMBxDCLR(mdev) (((mdev) << 6) + 0x08)
+#define IPCMBxDSTATUS(mdev) (((mdev) << 6) + 0x0C)
+#define IPCMBxMODE(mdev) (((mdev) << 6) + 0x10)
+#define IPCMBxIMASK(mdev) (((mdev) << 6) + 0x14)
+#define IPCMBxICLR(mdev) (((mdev) << 6) + 0x18)
+#define IPCMBxSEND(mdev) (((mdev) << 6) + 0x1C)
+#define IPCMBxDATA(mdev, index) (((mdev) << 6) + 0x20 + ((index) << 2))
+#define IPCCPUxIMST(cpu) (((cpu) << 3) + 0x800)
+#define IPCCPUxIRST(cpu) (((cpu) << 3) + 0x804)
+#define IPCLOCK() (0xA00)
+
+#define MODULE_NAME "hisi_mailbox_dev"
+#define FAST_MBOX (1 << 0)
+#define COMM_MBOX (1 << 1)
+#define SOURCE_MBOX (1 << 2)
+#define DESTINATION_MBOX (1 << 3)
+
+#define EVERY_LOOP_TIME_MS 5
+
+#define IPC_UNLOCKED 0x00000000
+#define IPCACKMSG 0x00000000
+#define COMM_MBOX_IRQ (-2)
+#define AUTOMATIC_ACK_CONFIG (1 << 0)
+#define NO_FUNC_CONFIG (0 << 0)
+
+/* Optimize interrupts assignment */
+#define IPC_IRQ_AFFINITY_CPU (1)
+
+#define SYS_RPROC_NUMBER 0x9
+
+#define ISP_RPROC_NUMBER 0x2
+#define STATE_NUMBER 0x4
+
+#define MAILBOX_ASYNC_UDELAY_CNT (1000)
+
+#define ISP_INDEX_BASE 100
+#define DEFAULT_MAILBOX_TIMEOUT 300
+#define DEFAULT_FIFO_SIZE 256
+#define DEFAULT_SCHED_PRIORITY 20
+#define MAILBOX_NO_USED 0
+#define MAX_AP_IPC_INDEX 99
+
+#define MDEV_ERR(fmt, args ...) \
+ ({ \
+ pr_err("%s(%d):" fmt "\n", \
+ MODULE_NAME, __LINE__, ##args); \
+ })
+/*MDEV_DEBUG used only in project developing phase*/
+#define MDEV_DEBUG(fmt, args ...)
+/*
+ ({ \
+ pr_debug("%s(%d):" fmt "\n", \
+ MODULE_NAME, __LINE__, ##args); \
+ })
+ */
+enum {
+ RX_BUFFER_TYPE = 0,
+ ACK_BUFFER_TYPE,
+ MBOX_BUFFER_TYPE_MAX,
+};
+
+/*
+ * Table for available remote processors. DTS sub-node, "remote_processor_type",
+ * of node, "hisi_mdev", is configured according to the table.
+ *
+ * If the table was modified, DTS configiuration should be updated accordingly.
+ */
+typedef enum {
+ GIC = 0,
+ GIC_1 = 0,
+ GIC_2,
+ IOM3,
+ LPM3,
+ HIFI,
+ MCPU,
+ BBE16,
+ IVP32,
+ ISP,
+ UNCERTAIN_REMOTE_PROCESSOR,
+ HI3XXX_RP_TYPES
+} remote_processor_type_t;
+
+
+struct hisi_common_mbox_info {
+ int gic_1_irq_requested;
+ int gic_2_irq_requested;
+ int cmbox_gic_1_irq;
+ int cmbox_gic_2_irq;
+ struct hisi_mbox_device *cmdev;
+};
+
+struct hisi_ipc_device {
+ void __iomem *base;
+ u32 unlock;
+ mbox_msg_t *buf_pool;
+ struct hisi_common_mbox_info *cmbox_info;
+ struct hisi_mbox_device **mdev_res;
+};
+
+struct hisi_mbox_device_priv {
+ u8 func;
+ remote_processor_type_t src;
+ remote_processor_type_t des;
+ int mbox_channel;
+ int irq;
+ int capability;
+ int used;
+ unsigned int timeout;
+ unsigned int fifo_size;
+ unsigned int sched_priority;
+ unsigned int sched_policy;
+ unsigned int hardware_board_type;
+ struct hisi_ipc_device *idev;
+};
+
+/*
+ **HiIPCV230 fixed all the communicate processors to the unique bits:
+ **austin:
+ **00000001:A53
+ **00000010:Maia
+ **00000100:IOM7
+ **00001000:LPM3
+ **00010000:ASP
+ **00100000:Modem-A9
+ **01000000:Modem-bbe16
+ **10000000:IVP32
+ **
+ **chicago:
+ **000000001:A53
+ **000000010:Maia
+ **000000100:IOM7
+ **000001000:LPM3
+ **000010000:ASP
+ **000100000:Modem-A9
+ **001000000:Modem-bbe16
+ **010000000:IVP32
+ **100000000:ISP32
+ */
+char *sys_rproc_name[SYS_RPROC_NUMBER] = {
+ "AP_LIT_CLUSTER",
+ "AP_BIG_CLUSTER",
+ "SENSORHUB",
+ "LPMCU",
+ "HIFI",
+ "MODEM",
+ "BBE16",
+ "IVP",
+ "ISP"
+};
+
+/* only used in austin and dallas */
+char *isp_rproc_name[ISP_RPROC_NUMBER] = {
+ "AP_LIT_CLUSTER",
+ "ISP"
+};
+
+/*
+ **HiIPCV230 have a state machine, the state machine have 4 status:
+ **4'b0001:IDLE_STATE
+ **4'b0010:SOURCE_STATE
+ **4'b0100:DEST_STATE
+ **4'b1000:ACK_STATE
+ */
+char *ipc_state_name[STATE_NUMBER] = {
+ "%s is idle\n",
+ "%s is occupied\n",
+ "%s may be power off or freeze\n",
+ "%s have no time to handle ACK\n"
+};
+
+enum IPC_STATE_MACHINE {
+ IDLE_STATE,
+ SOURCE_STATE,
+ DEST_STATE,
+ ACK_STATE
+};
+
+extern int hisi_rproc_init(void);
+
+char *rproc_analysis(const char *mdev_name, unsigned int pro_code)
+{
+ unsigned char index = 0;
+ while (pro_code) {
+ index++;
+ pro_code >>= 1;
+ }
+ if (likely(0 != index))
+ index--;
+ else
+ return "ERR_RPROC";
+ /*sys ips's mailbox channel */
+ if (NULL == strstr(mdev_name, "isp")) {
+ if (likely(index < SYS_RPROC_NUMBER))
+ return sys_rproc_name[index];
+ else
+ return "ERR_RPROC";
+ } else { /*isp ips's mailbox channel */
+ if (likely(index < ISP_RPROC_NUMBER))
+ return isp_rproc_name[index];
+ else
+ return "ERR_RPROC";
+ }
+
+}
+
+char *ipc_state_analysis(unsigned int mode, unsigned char *outp)
+{
+ unsigned char index = 0;
+ mode >>= 4; /*bit4~bit7 is the state machine index */
+ while (mode) {
+ index++;
+ mode >>= 1;
+ }
+ if (likely(0 != index))
+ index--;
+ else
+ return "%s ERR_STATE\n";
+
+ *outp = index;
+
+ if (likely(index < STATE_NUMBER))
+ return ipc_state_name[index];
+ else
+ return "%s ERR_STATE\n";
+}
+
+static inline void __ipc_lock(void __iomem *base, unsigned int lock_key)
+{
+ __raw_writel(lock_key, base + IPCLOCK());
+}
+
+static inline void __ipc_unlock(void __iomem *base, unsigned int key)
+{
+ __raw_writel(key, base + IPCLOCK());
+}
+
+static inline unsigned int __ipc_lock_status(void __iomem *base)
+{
+ return __raw_readl(base + IPCLOCK());
+}
+
+static inline void __ipc_set_src(void __iomem *base, int source, int mdev)
+{
+ __raw_writel(IPCBITMASK(source), base + IPCMBxSOURCE(mdev));
+}
+
+static inline unsigned int __ipc_read_src(void __iomem *base, int mdev)
+{
+ return __raw_readl(base + IPCMBxSOURCE(mdev));
+}
+
+static inline void __ipc_set_des(void __iomem *base, int source, int mdev)
+{
+ __raw_writel(IPCBITMASK(source), base + IPCMBxDSET(mdev));
+}
+
+static inline void __ipc_clr_des(void __iomem *base, int source, int mdev)
+{
+ __raw_writel(IPCBITMASK(source), base + IPCMBxDCLR(mdev));
+}
+
+static inline unsigned int __ipc_des_status(void __iomem *base, int mdev)
+{
+ return __raw_readl(base + IPCMBxDSTATUS(mdev));
+}
+
+static inline void __ipc_send(void __iomem *base, unsigned int tosend, int mdev)
+{
+ __raw_writel(tosend, base + IPCMBxSEND(mdev));
+}
+
+static inline unsigned int __ipc_read(void __iomem *base, int mdev, int index)
+{
+ return __raw_readl(base + IPCMBxDATA(mdev, index));
+}
+
+static inline void __ipc_write(void __iomem *base, u32 data, int mdev, int index)
+{
+ __raw_writel(data, base + IPCMBxDATA(mdev, index));
+}
+
+static inline unsigned int __ipc_cpu_imask_get(void __iomem *base, int mdev)
+{
+ return __raw_readl(base + IPCMBxIMASK(mdev));
+}
+
+static inline void __ipc_cpu_imask_clr(void __iomem *base, unsigned int toclr, int mdev)
+{
+ unsigned int reg;
+
+ reg = __raw_readl(base + IPCMBxIMASK(mdev));
+ reg = reg & (~(toclr));
+
+ __raw_writel(reg, base + IPCMBxIMASK(mdev));
+}
+
+static inline void __ipc_cpu_imask_all(void __iomem *base, int mdev)
+{
+ __raw_writel((~0), base + IPCMBxIMASK(mdev));
+}
+
+static inline void __ipc_cpu_iclr(void __iomem *base, unsigned int toclr, int mdev)
+{
+ __raw_writel(toclr, base + IPCMBxICLR(mdev));
+}
+
+static inline int __ipc_cpu_istatus(void __iomem *base, int mdev)
+{
+ return __raw_readl(base + IPCMBxICLR(mdev));
+}
+
+static inline unsigned int __ipc_mbox_istatus(void __iomem *base, int cpu)
+{
+ return __raw_readl(base + IPCCPUxIMST(cpu));
+}
+
+static inline unsigned int __ipc_mbox_irstatus(void __iomem *base, int cpu)
+{
+ return __raw_readl(base + IPCCPUxIRST(cpu));
+}
+
+static inline unsigned int __ipc_status(void __iomem *base, int mdev)
+{
+ return __raw_readl(base + IPCMBxMODE(mdev));
+}
+
+static inline void __ipc_mode(void __iomem *base, unsigned int mode, int mdev)
+{
+ __raw_writel(mode, base + IPCMBxMODE(mdev));
+}
+
+static int hisi_mdev_startup(struct hisi_mbox_device *mdev)
+{
+ /*
+ * nothing won't be done during suspend & resume flow for HI3xxx IPC.
+ * see dummy like SR function, hisi_mdev_suspend & hisi_mdev_resume.
+ * reserve runtime power management proceeding for further modification,
+ * if necessary.
+ */
+ return 0;
+}
+
+static void hisi_mdev_shutdown(struct hisi_mbox_device *mdev)
+{
+ /*
+ * nothing won't be done during suspend & resume flow for HI3xxx IPC.
+ * see dummy like SR function, hisi_mdev_suspend & hisi_mdev_resume.
+ * reserve runtime power management proceeding for further modification,
+ * if necessary.
+ */
+ return;
+}
+
+static void hisi_mdev_dump_status(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+ /*the size 64 is the sumary max size of sys_rproc_name and ipc_state_name */
+ char finalfortmat[64] = { 0 };
+ char statem = 0;
+ char *src_name = rproc_analysis(mdev->name, __ipc_read_src(priv->idev->base, priv->mbox_channel));
+ char *des_name = rproc_analysis(mdev->name, __ipc_des_status(priv->idev->base, priv->mbox_channel));
+ /*\0013 is the KERN_SOH KERN_ERR */
+ char *direcstr = KERN_ERR "\n<INFO>: [%s]-->[%s], ";
+ char *machinestr = ipc_state_analysis(__ipc_status(priv->idev->base, priv->mbox_channel), &statem);
+
+ memcpy(finalfortmat, direcstr, strlen(direcstr));
+
+ strncat(finalfortmat, machinestr, strlen(machinestr));
+
+ if (DEST_STATE == statem)
+ printk(finalfortmat, src_name, des_name, des_name);
+ else if (ACK_STATE == statem)
+ printk(finalfortmat, src_name, des_name, src_name);
+ else
+ printk(finalfortmat, src_name, des_name, mdev->name);
+
+
+ return;
+}
+
+static int hisi_mdev_check(struct hisi_mbox_device *mdev, mbox_mail_type_t mtype, int mdev_index)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+ int ret = RPUNACCESSIBLE;
+ int index = priv->mbox_channel;
+ if (NULL != strstr(mdev->name, "isp")) {
+ index = index + ISP_INDEX_BASE;
+ MDEV_DEBUG("isp-index is %d\n", index);
+ }
+ if ((TX_MAIL == mtype) && (SOURCE_MBOX & priv->func) && (index == mdev_index) && (priv->used == 1))
+ ret = RPACCESSIBLE;
+ else if ((RX_MAIL == mtype) && (DESTINATION_MBOX & priv->func) && (index == mdev_index) && (priv->used == 1))
+ ret = RPACCESSIBLE;
+
+ return ret;
+}
+
+static void hisi_mdev_clr_ack(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+ unsigned int imask;
+ unsigned int toclr;
+
+ imask = __ipc_cpu_imask_get(priv->idev->base, priv->mbox_channel);
+ toclr = (IPCBITMASK(GIC_1) | IPCBITMASK(GIC_2)) & (~imask);
+ __ipc_cpu_iclr(priv->idev->base, toclr, priv->mbox_channel);
+}
+
+static void hisi_mdev_clr_irq_and_ack(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+ unsigned int status = 0;
+ unsigned int imask;
+ unsigned int todo;
+ int i;
+
+ /*
+ * temporarily, local processor will clean msg register,
+ * and ack zero for an ipc from remote processors.
+ */
+ for (i = 0; i < priv->capability; i++)
+ __ipc_write(priv->idev->base, IPCACKMSG, priv->mbox_channel, i);
+
+ imask = __ipc_cpu_imask_get(priv->idev->base, priv->mbox_channel);
+ /*get the irq unmask core bits, and clear the irq according to the unmask core bits,
+ * because the irq to be sure triggered to the unmasked cores
+ */
+ todo = (IPCBITMASK(GIC_1) | IPCBITMASK(GIC_2)) & (~imask);
+ __ipc_cpu_iclr(priv->idev->base, todo, priv->mbox_channel);
+
+ status = __ipc_status(priv->idev->base, priv->mbox_channel);
+
+ if ((DESTINATION_STATUS & status) && (!(AUTOMATIC_ACK_CONFIG & status))) {
+ __ipc_send(priv->idev->base, todo, priv->mbox_channel);
+ }
+
+ return;
+}
+
+static void hisi_mdev_ack(struct hisi_mbox_device *mdev, mbox_msg_t *msg, mbox_msg_len_t len)
+{
+ return;
+}
+
+static mbox_msg_len_t hisi_mdev_hw_read(struct hisi_mbox_device *mdev, mbox_msg_t *msg)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+ mbox_msg_len_t cap;
+ int i;
+
+ cap = priv->capability;
+ for (i = 0; i < cap; i++)
+ msg[i] = __ipc_read(priv->idev->base, priv->mbox_channel, i);
+
+ return cap;
+}
+
+/*to judge the four kind machine status of the ip, they are idle,src,des,ack*/
+static int hisi_mdev_is_stm(struct hisi_mbox_device *mdev, unsigned int stm)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+ int is_stm = 0;
+
+ if ((stm & __ipc_status(priv->idev->base, priv->mbox_channel)))
+ is_stm = 1;
+
+ return is_stm;
+}
+
+static mbox_msg_len_t hisi_mdev_receive_msg(struct hisi_mbox_device *mdev, mbox_msg_t **buf)
+{
+ mbox_msg_t *_buf = NULL;
+ mbox_msg_len_t len = 0;
+
+ if (hisi_mdev_is_stm(mdev, ACK_STATUS))
+ _buf = mdev->ack_buffer;
+ else
+ _buf = mdev->rx_buffer;
+
+ if (_buf)
+ len = hisi_mdev_hw_read(mdev, _buf);
+ *buf = _buf;
+
+ hisi_mdev_clr_irq_and_ack(mdev);
+ return len;
+}
+
+static int hisi_mdev_unlock(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+ int retry = 3;
+
+ do {
+ __ipc_unlock(priv->idev->base, priv->idev->unlock);
+ if (IPC_UNLOCKED == __ipc_lock_status(priv->idev->base))
+ break;
+
+ udelay(10);
+ retry--;
+ } while (retry);
+
+ if (!retry)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int hisi_mdev_occupy(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+ int retry = 10;
+
+ do {
+ /*
+ * Hardware lock
+ * A hardware lock is needed here to lock a mailbox resource,
+ * which could be used by another remote proccessor, such as
+ * a HiIPCV230 common mailbox-25/mailbox-26.
+ */
+ if (!(__ipc_status(priv->idev->base, priv->mbox_channel) & IDLE_STATUS)) {
+ asm volatile ("wfe");
+ } else {
+ /*set the source processor bit, we set common mailbox's source processor bit through dtsi */
+ __ipc_set_src(priv->idev->base, priv->src, priv->mbox_channel);
+ if (__ipc_read_src(priv->idev->base, priv->mbox_channel) & IPCBITMASK(priv->src))
+ break;
+ }
+ retry--;
+ /* Hardware unlock */
+ } while (retry);
+
+ if (!retry)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int hisi_mdev_hw_send(struct hisi_mbox_device *mdev, mbox_msg_t *msg, mbox_msg_len_t len, int ack_mode)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+ int i;
+ unsigned int temp;
+
+ /* interrupts unmask */
+ __ipc_cpu_imask_all(priv->idev->base, priv->mbox_channel);
+
+ if (AUTO_ACK == ack_mode)
+ temp = IPCBITMASK(priv->des);
+ else
+ temp = IPCBITMASK(priv->src) | IPCBITMASK(priv->des);
+
+ __ipc_cpu_imask_clr(priv->idev->base, temp, priv->mbox_channel);
+
+ /* des config */
+
+ __ipc_set_des(priv->idev->base, priv->des, priv->mbox_channel);
+
+ /* ipc mode config */
+ if (AUTO_ACK == ack_mode)
+ temp = AUTOMATIC_ACK_CONFIG;
+ else
+ temp = NO_FUNC_CONFIG;
+
+ __ipc_mode(priv->idev->base, temp, priv->mbox_channel);
+
+ /* write data */
+ for (i = 0; i < ((priv->capability < len) ? priv->capability : len); i++)
+ __ipc_write(priv->idev->base, msg[i], priv->mbox_channel, i);
+
+ /* enable sending */
+ __ipc_send(priv->idev->base, IPCBITMASK(priv->src), priv->mbox_channel);
+ return 0;
+}
+
+static void hisi_mdev_ensure_channel(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_device_priv *priv = NULL;
+ int timeout = 0;
+ int loop = 0;
+ priv = mdev->priv;
+ loop = priv->timeout / EVERY_LOOP_TIME_MS + MAILBOX_ASYNC_UDELAY_CNT;
+ if (mdev->ops->is_stm(mdev, IDLE_STATUS)) {
+ /*IDLE STATUS, return directly */
+ return;
+ }
+ /*the ack status is reached, just release, the sync and async is mutexed by by mdev->dev_lock */
+ else if (mdev->ops->is_stm(mdev, ACK_STATUS)) {
+ /*ACK STATUS, release the channel directly */
+ goto release;
+ }
+ /*DEST STATUS and SRC STATUS, the dest is processing, wait here */
+ else { /*if(mdev->ops->is_stm(mdev, DESTINATION_STATUS) || mdev->ops->is_stm(mdev, SOURCE_STATUS)) */
+ /*the worst situation is to delay 1000*5us+60*5ms = 305ms */
+ while (timeout < loop) {
+ if (timeout < MAILBOX_ASYNC_UDELAY_CNT) {
+ udelay(5);
+ } else {
+ /*the hifi may power off when send ipc msg, so the ack status may wait 20ms */
+ usleep_range(3000, 5000);
+ /*MDEV_ERR("mdev %s sleep 5ms, timeout = %d\n", mdev->name, timeout); */
+ }
+ /*if the ack status is ready, break out */
+ if (mdev->ops->is_stm(mdev, ACK_STATUS)) {
+ break;
+ }
+ timeout++;
+ }
+
+ if (unlikely(timeout == loop)) {
+ MDEV_ERR("\n %s ipc timeout...\n", mdev->name);
+
+ if (mdev->ops->status)
+ mdev->ops->status(mdev);
+ }
+
+ goto release;
+ }
+
+release:
+ /*release the channel */
+ mdev->ops->refresh(mdev);
+}
+
+static int hisi_mdev_send_msg(struct hisi_mbox_device *mdev, mbox_msg_t *msg, mbox_msg_len_t len, int ack_mode)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+ int err = 0;
+ /*all the mailbox channel is treated as fast-mailbox */
+ if (DESTINATION_MBOX & priv->func) {
+ MDEV_ERR("mdev %s has no tx ability\n", mdev->name);
+ err = -EMDEVCLEAN;
+ goto out;
+ }
+
+ /*
+ * Whenever an ipc starts,
+ * ipc module has to be unlocked at the very beginning.
+ */
+ if (hisi_mdev_unlock(mdev)) {
+ pr_err("%s: mdev %s can not be unlocked\n", MODULE_NAME, mdev->name);
+ err = -EMDEVCLEAN;
+ goto out;
+ }
+
+ if (hisi_mdev_occupy(mdev)) {
+ MDEV_ERR("mdev %s can not be occupied\n", mdev->name);
+ err = -EMDEVCLEAN;
+ goto out;
+ }
+
+ (void)hisi_mdev_hw_send(mdev, msg, len, ack_mode);
+
+out:
+ return err;
+}
+
+static void hisi_mdev_release(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+
+ __ipc_cpu_imask_all(priv->idev->base, priv->mbox_channel);
+ __ipc_set_src(priv->idev->base, priv->src, priv->mbox_channel);
+
+ asm volatile ("sev");
+ return;
+}
+
+static unsigned int hisi_mdev_board_type(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+ return priv->hardware_board_type;
+}
+
+static int hisi_mdev_irq_request(struct hisi_mbox_device *mdev, irq_handler_t handler, void *p)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+ int ret = 0;
+
+ if (priv->idev->cmbox_info->cmbox_gic_1_irq == priv->irq) {
+ if (!priv->idev->cmbox_info->gic_1_irq_requested++) {
+ ret = request_irq(priv->irq, handler, 0, mdev->name, (void *)priv->idev->cmbox_info->cmdev);
+ if (ret) {
+ MDEV_ERR("fast source %s request gic_1_irq %d failed\n", mdev->name, priv->irq);
+ priv->idev->cmbox_info->gic_1_irq_requested--;
+ goto out;
+ }
+
+ hisi_irqaffinity_register(priv->irq, IPC_IRQ_AFFINITY_CPU);
+ }
+ } else if (priv->idev->cmbox_info->cmbox_gic_2_irq == priv->irq) {
+ if (!priv->idev->cmbox_info->gic_2_irq_requested++) {
+ ret = request_irq(priv->irq, handler, 0, mdev->name, (void *)priv->idev->cmbox_info->cmdev);
+ if (ret) {
+ MDEV_ERR("fast source %s request gic_2_irq %d failed\n", mdev->name, priv->irq);
+ priv->idev->cmbox_info->gic_2_irq_requested--;
+ goto out;
+ }
+
+ hisi_irqaffinity_register(priv->irq, IPC_IRQ_AFFINITY_CPU);
+ }
+ } else {
+ ret = request_irq(priv->irq, handler, 0, mdev->name, p);
+ if (ret) {
+ MDEV_ERR("fast desitnation %s request irq %d failed\n", mdev->name, priv->irq);
+ goto out;
+ }
+
+ hisi_irqaffinity_register(priv->irq, IPC_IRQ_AFFINITY_CPU);
+ }
+
+out:
+ return ret;
+}
+
+static void hisi_mdev_irq_free(struct hisi_mbox_device *mdev, void *p)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;
+
+ if (priv->idev->cmbox_info->cmbox_gic_1_irq == priv->irq) {
+ if (!--priv->idev->cmbox_info->gic_1_irq_requested)
+ free_irq(priv->irq, (void *)priv->idev->cmbox_info->cmdev);
+ } else if (priv->idev->cmbox_info->cmbox_gic_2_irq == priv->irq) {
+ if (!--priv->idev->cmbox_info->gic_2_irq_requested)
+ free_irq(priv->irq, (void *)priv->idev->cmbox_info->cmdev);
+ } else {
+ free_irq(priv->irq, p);
+ }
+
+ return;
+}
+
+static void hisi_mdev_irq_enable(struct hisi_mbox_device *mdev)
+{
+ enable_irq((unsigned int)mdev->cur_irq);
+}
+
+static void hisi_mdev_irq_disable(struct hisi_mbox_device *mdev)
+{
+ disable_irq_nosync((unsigned int)mdev->cur_irq);
+}
+
+static struct hisi_mbox_device *hisi_mdev_irq_to_mdev(struct hisi_mbox_device *_mdev, struct list_head *list, int irq)
+{
+ struct hisi_mbox_device_priv *_priv = NULL;
+ struct hisi_mbox_device *mdev = NULL;
+ struct hisi_mbox_device_priv *priv = NULL;
+ remote_processor_type_t src = UNCERTAIN_REMOTE_PROCESSOR;
+ unsigned int regval = 0x0;
+
+ if ((list_empty(list)) || (NULL == _mdev)) {
+ MDEV_ERR("invalid input\n");
+ goto out;
+ }
+
+ _priv = _mdev->priv;
+
+ /* fast destination mailboxes use unique irq number */
+ if ((DESTINATION_MBOX & _priv->func) && (FAST_MBOX & _priv->func)) {
+ mdev = _mdev;
+ goto out;
+ }
+
+ /* fast source & common mailboxes share GIC_1 & GIC_2 irq number */
+ if (irq == _priv->idev->cmbox_info->cmbox_gic_1_irq) {
+ src = GIC_1;
+ } else if (irq == _priv->idev->cmbox_info->cmbox_gic_2_irq) {
+ src = GIC_2;
+ } else {
+ MDEV_ERR("odd irq for hisi mailboxes\n");
+ goto out;
+ }
+
+ regval = __ipc_mbox_istatus(_priv->idev->base, src);
+ if (0 == regval) {
+ mdev = NULL;
+ goto out;
+ }
+ list_for_each_entry(mdev, list, node) {
+ priv = mdev->priv;
+
+ if ((regval & IPCBITMASK(priv->mbox_channel)) && (priv->func & SOURCE_MBOX))
+ goto out;
+ }
+
+out:
+ /* it is nearly occured */
+ return mdev;
+}
+
+static unsigned int hisi_mdev_timeout(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;/*lint !e838 */
+ return priv->timeout;
+}
+
+static unsigned int hisi_mdev_fifo_size(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;/*lint !e838 */
+ return priv->fifo_size;
+}
+
+static unsigned int hisi_mdev_sched_priority(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;/*lint !e838 */
+ return priv->sched_priority;
+}
+
+static unsigned int hisi_mdev_sched_policy(struct hisi_mbox_device *mdev)
+{
+ struct hisi_mbox_device_priv *priv = mdev->priv;/*lint !e838 */
+ return priv->sched_policy;
+}
+
+struct hisi_mbox_dev_ops hisi_mdev_ops = {
+ .startup = hisi_mdev_startup,
+ .shutdown = hisi_mdev_shutdown,
+ .check = hisi_mdev_check,
+ .recv = hisi_mdev_receive_msg,
+ .send = hisi_mdev_send_msg,
+ .ack = hisi_mdev_ack,
+ .refresh = hisi_mdev_release,
+
+ .get_timeout = hisi_mdev_timeout,
+ .get_fifo_size = hisi_mdev_fifo_size,
+ .get_sched_priority = hisi_mdev_sched_priority,
+ .get_sched_policy = hisi_mdev_sched_policy,
+ .read_board_type = hisi_mdev_board_type,
+ .request_irq = hisi_mdev_irq_request,
+ .free_irq = hisi_mdev_irq_free,
+ .enable_irq = hisi_mdev_irq_enable,
+ .disable_irq = hisi_mdev_irq_disable,
+ .irq_to_mdev = hisi_mdev_irq_to_mdev,
+ .is_stm = hisi_mdev_is_stm,
+ .clr_ack = hisi_mdev_clr_ack,
+ .ensure_channel = hisi_mdev_ensure_channel,
+ .status = hisi_mdev_dump_status,
+};
+
+static void hisi_mdev_put(struct hisi_ipc_device *idev)
+{
+ struct hisi_mbox_device **list = idev->mdev_res;
+ struct hisi_mbox_device *mdev = NULL;
+ int i;
+
+ iounmap(idev->base);
+
+ kfree(idev->cmbox_info);
+ kfree(idev->buf_pool);
+
+ for (i = 0; (mdev = list[i]); i++) {
+ kfree(mdev->priv);
+ kfree(mdev);
+ }
+
+ return;
+}
+
+static int hisi_mdev_remove(struct platform_device *pdev)
+{
+ struct hisi_ipc_device *idev = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (idev) {
+ hisi_mbox_device_unregister(idev->mdev_res);
+ hisi_mdev_put(idev);
+ kfree(idev);
+ }
+
+ return 0;
+}
+
+static int hisi_mdev_get(struct hisi_ipc_device *idev, struct hisi_mbox_device **mdevs, struct device_node *node)
+{
+ struct device_node *son = NULL;
+ struct hisi_common_mbox_info *cmbox_info = NULL;
+ struct hisi_mbox_device *mdev;
+ struct hisi_mbox_device_priv *priv;
+ remote_processor_type_t src_bit;
+ remote_processor_type_t des_bit;
+ mbox_msg_t *buf_pool = NULL;
+ mbox_msg_len_t buf_pool_len = 0;
+ const char *mdev_name = NULL;
+ mbox_msg_t *rx_buffer = NULL;
+ mbox_msg_t *ack_buffer = NULL;
+ u8 func = 0;
+ u32 output[3] = { 0 };
+ int irq = 0;
+ int i = 0;
+ int mbox_channel;
+ int mdev_index_temp = 0;/* some mailbox is not used */
+ int ret = 0;
+ unsigned int used = 0;
+ int cm_gic_1_irq = -1;
+ int cm_gic_2_irq = -1;
+ int capability = 0;
+ unsigned int hardware_board_type = 0;
+ u32 unlock = 0;
+ int mdev_num = 0;
+ unsigned int timeout;
+ unsigned int fifo_size;
+ unsigned int sched_priority;
+ unsigned int sched_policy;
+ void __iomem *ipc_base = NULL;
+ ipc_base = of_iomap(node, 0);
+ if (!ipc_base) {
+ MDEV_ERR("iomap error\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ MDEV_DEBUG("ipc_base: 0x%lx\n", (unsigned long)ipc_base);
+
+ ret = of_property_read_u32(node, "capability", &capability);
+ if (ret) {
+ MDEV_ERR("prop \"capability\" error %d\n", ret);
+ ret = -ENODEV;
+ goto to_iounmap;
+ }
+
+ MDEV_DEBUG("capability: %d\n", (int)capability);
+
+ ret = of_property_read_u32(node, "hardware_board_type", &hardware_board_type);
+ if (ret) {
+ MDEV_DEBUG("hardware_board_type: %d, it's not UDP & FPGA\n", (int)hardware_board_type);
+ hardware_board_type = IPC_DEFAULT_BOARD_TYPE;
+ }
+
+ MDEV_DEBUG("hardware_board_type: %d\n", (int)hardware_board_type);
+
+ ret = of_property_read_u32(node, "unlock_key", &unlock);
+ if (ret) {
+ MDEV_ERR("prop \"key\" error %d\n", ret);
+ ret = -ENODEV;
+ goto to_iounmap;
+ }
+
+ MDEV_DEBUG("unlock_key: 0x%x\n", (unsigned int)unlock);
+ ret = of_property_read_u32(node, "mailboxes", &mdev_num);
+ if (ret) {
+ pr_err("%s: prop \"mailboxes\" error %d\n", MODULE_NAME, ret);
+ ret = -ENODEV;
+ goto to_iounmap;
+ }
+
+ MDEV_DEBUG("mailboxes: %d\n", (int)mdev_num);
+ cmbox_info = kmalloc(sizeof(*cmbox_info), GFP_KERNEL);
+ if (!cmbox_info) {
+ ret = -ENOMEM;
+ goto to_iounmap;
+ }
+
+ buf_pool_len = capability * MBOX_BUFFER_TYPE_MAX * mdev_num;
+ buf_pool = kzalloc(sizeof(mbox_msg_t) * buf_pool_len, GFP_KERNEL);
+ if (!buf_pool) {
+ ret = -ENOMEM;
+ goto free_cmbox;
+ }
+
+ MDEV_DEBUG("buffer pool: 0x%lx\n", (unsigned long)buf_pool);
+
+ cm_gic_1_irq = irq_of_parse_and_map(node, 0);
+ cm_gic_2_irq = irq_of_parse_and_map(node, 1);
+
+ cmbox_info->gic_1_irq_requested = 0;
+ cmbox_info->gic_2_irq_requested = 0;
+ cmbox_info->cmbox_gic_1_irq = cm_gic_1_irq;
+ cmbox_info->cmbox_gic_2_irq = cm_gic_2_irq;
+ cmbox_info->cmdev = NULL;
+
+ idev->cmbox_info = cmbox_info;
+ idev->unlock = unlock;
+ idev->base = ipc_base;
+ idev->mdev_res = mdevs;
+ idev->buf_pool = buf_pool;
+
+ for (i = 0; (son = of_get_next_child(node, son)); i++) {
+ mdev = NULL;
+ priv = NULL;
+ mdev_name = NULL;
+ func = 0;
+ mbox_channel = -1;
+ rx_buffer = NULL;
+ ack_buffer = NULL;
+ used = 0;
+
+ ret = of_property_read_u32(son, "used", &used);
+ if (ret) {
+ MDEV_ERR("mailbox-%d has no tag <used>\n", mdev_index_temp);
+ goto to_break;
+ }
+ if (MAILBOX_NO_USED == used) {
+ MDEV_DEBUG("mailbox node %s is not used\n", son->name);
+ continue;
+ }
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev) {
+ ret = -ENOMEM;
+ goto to_break;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto free_mdev;
+ }
+
+ mdev_name = son->name;
+
+ MDEV_DEBUG("mailbox node: %s\n", mdev_name);
+
+ ret = of_property_read_u32(son, "src_bit", &src_bit);
+ if (ret)
+ goto free_priv;
+
+ ret = of_property_read_u32(son, "des_bit", &des_bit);
+ if (ret)
+ goto free_priv;
+ /* get software code-index to mbox_channel and calculate the right mbox_channel */
+ ret = of_property_read_u32(son, "index", &mbox_channel);
+ if (ret)
+ goto free_priv;
+
+ MDEV_DEBUG("index: %d\n", (int)mbox_channel);
+ /* to distinguish different ipc and calculate the true mailbox-index */
+ if (MAX_AP_IPC_INDEX < mbox_channel)
+ mbox_channel = mbox_channel % 100;
+
+ ret = of_property_read_u32(son, "timeout", &timeout);
+ if (ret || 0 != timeout % EVERY_LOOP_TIME_MS)
+ timeout = DEFAULT_MAILBOX_TIMEOUT;
+
+ MDEV_DEBUG("timeout: %d\n", (int)timeout);
+
+ ret = of_property_read_u32(son, "fifo_size", &fifo_size);
+ if (ret)
+ fifo_size = DEFAULT_FIFO_SIZE;
+
+ MDEV_DEBUG("fifo_size: %d\n", (int)fifo_size);
+
+ ret = of_property_read_u32(son, "sched_priority", &sched_priority);
+ if (ret)
+ sched_priority = DEFAULT_SCHED_PRIORITY;
+
+ MDEV_DEBUG("sched_priority: %d\n", (int)sched_priority);
+
+ ret = of_property_read_u32(son, "sched_policy", &sched_policy);
+ if (ret)
+ sched_policy = SCHED_RR;/* default sched_policy is SCHED_RR */
+
+ MDEV_DEBUG("sched_policy: %d\n", (int)sched_policy);
+
+ ret = of_property_read_u32_array(son, "func", output, 3);
+ if (ret)
+ goto free_priv;
+
+ func |= (output[0] ? FAST_MBOX : COMM_MBOX);
+
+ func |= (output[1] ? SOURCE_MBOX : 0);
+
+ func |= (output[2] ? DESTINATION_MBOX : 0);
+
+ if ((FAST_MBOX & func) && (DESTINATION_MBOX & func)) {
+ MDEV_DEBUG("func FAST DES MBOX\n");
+ irq = irq_of_parse_and_map(son, 0);
+ MDEV_DEBUG("irq: %d\n", (int)irq);
+ } else if ((FAST_MBOX & func) && (SOURCE_MBOX & func)) {
+ MDEV_DEBUG("func FAST SRC MBOX\n");
+ irq = (GIC_1 == src_bit) ? cm_gic_1_irq : cm_gic_2_irq;
+ MDEV_DEBUG("irq: %d\n", (int)irq);
+ /*set the cmdev, the cmdev will be used in acore't interrupts */
+ if (NULL == cmbox_info->cmdev)
+ cmbox_info->cmdev = mdev;
+ } else {
+ /* maybe GIC_1 OR GIC_2 */
+ MDEV_DEBUG(" xxxxxxxxx we don't use comm-mailbox , we use it as fast-mailbox\n");
+ /*we don't use comm-mailbox , we use it as fast-mailbox, please set the comm to fast in the dtsi */
+ irq = COMM_MBOX_IRQ;
+ cmbox_info->cmdev = mdev;
+ MDEV_DEBUG("irq: %d\n", (int)irq);
+ }
+
+ rx_buffer = buf_pool + capability * RX_BUFFER_TYPE;
+ ack_buffer = buf_pool + capability * ACK_BUFFER_TYPE;
+ buf_pool = buf_pool + capability * MBOX_BUFFER_TYPE_MAX;
+ MDEV_DEBUG("rx_buffer: 0x%lx\nack_buffer: 0x%lx\n", (unsigned long)rx_buffer, (unsigned long)ack_buffer);
+
+ priv->capability = capability;
+ priv->hardware_board_type = hardware_board_type;
+ priv->func = func;
+ priv->src = src_bit;
+ priv->des = des_bit;
+ priv->irq = irq;
+ priv->mbox_channel = mbox_channel;
+ priv->idev = idev;
+ priv->used = used;
+ priv->timeout = timeout;
+ priv->fifo_size = fifo_size;
+ priv->sched_priority = sched_priority;
+ priv->sched_policy = sched_policy;
+
+ mdev->name = mdev_name;
+ mdev->priv = priv;
+ mdev->rx_buffer = rx_buffer;
+ mdev->ack_buffer = ack_buffer;
+ mdev->ops = &hisi_mdev_ops;
+
+ mdevs[mdev_index_temp] = mdev;
+ mdev_index_temp++;
+ continue;
+free_priv:
+ kfree(priv);
+free_mdev:
+ kfree(mdev);
+to_break:
+ break;
+ }
+
+ if (ret)
+ goto deinit_mdevs;
+
+ return ret;
+
+deinit_mdevs:
+ while (i--) {
+ kfree(mdevs[i]->priv);
+ kfree(mdevs[i]);
+ }
+
+ kfree(idev->buf_pool);
+free_cmbox:
+ kfree(cmbox_info);
+to_iounmap:
+ iounmap(ipc_base);
+out:
+ return ret;
+}
+
+static int hisi_mdev_probe(struct platform_device *pdev)
+{
+ struct hisi_ipc_device *idev = NULL;
+ struct hisi_mbox_device **mdev_res = NULL;
+ struct device_node *node = pdev->dev.of_node;
+ int mdev_num = 0;
+ int ret = 0;
+
+ if (!node) {
+ MDEV_ERR("dts[%s] node not found\n", "hisilicon,HiIPCV230");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ idev = kmalloc(sizeof(*idev), GFP_KERNEL);
+ if (!idev) {
+ MDEV_ERR("no mem for ipc resouce\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_property_read_u32(node, "mailboxes", &mdev_num);
+ if (ret) {
+ MDEV_ERR("no mailboxes resources\n");
+ ret = -ENODEV;
+ goto free_idev;
+ }
+
+ mdev_res = kzalloc((sizeof(*mdev_res) * (mdev_num + 1)), GFP_KERNEL);
+ if (!mdev_res) {
+ ret = -ENOMEM;
+ goto free_idev;
+ }
+ mdev_res[mdev_num] = NULL;
+
+ ret = hisi_mdev_get(idev, mdev_res, node);
+ if (ret) {
+ MDEV_ERR("can not get ipc resource\n");
+ ret = -ENODEV;
+ goto free_mdevs;
+ }
+
+ ret = hisi_mbox_device_register(&pdev->dev, mdev_res);
+ if (ret) {
+ MDEV_ERR("mdevs register failed\n");
+ ret = -ENODEV;
+ goto put_res;
+ }
+
+ platform_set_drvdata(pdev, idev);
+
+ MDEV_DEBUG("HiIPCV230 mailboxes are ready\n");
+
+ hisi_rproc_init(); /*we call it here to let the pl011_init can use the rproc send function */
+
+ return 0;
+
+put_res:
+ hisi_mdev_put(idev);
+free_mdevs:
+ kfree(idev->mdev_res);
+free_idev:
+ kfree(idev);
+out:
+ return ret;
+}
+
+static int hisi_mdev_suspend(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct hisi_ipc_device *idev = platform_get_drvdata(pdev);
+
+ pr_info("%s: suspend +\n", __func__);
+ if (idev)
+ hisi_mbox_device_deactivate(idev->mdev_res);
+ pr_info("%s: suspend -\n", __func__);
+ return 0;
+}
+
+static int hisi_mdev_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct hisi_ipc_device *idev = platform_get_drvdata(pdev);
+
+ pr_info("%s: resume +\n", __func__);
+ if (idev)
+ hisi_mbox_device_activate(idev->mdev_res);
+ pr_info("%s: resume -\n", __func__);
+ return 0;
+}
+
+static const struct of_device_id hisi_mdev_of_match[] = {
+ {.compatible = "hisilicon,HiIPCV230",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hisi_mdev_of_match);
+
+static const struct dev_pm_ops hisi_mdev_pm_ops = {
+ .suspend_late = hisi_mdev_suspend,
+ .resume_early = hisi_mdev_resume,
+};
+
+static struct platform_driver hisi_mdev_driver = {
+ .probe = hisi_mdev_probe,
+ .remove = hisi_mdev_remove,
+ .driver = {
+ .name = "HiIPCV230-mailbox",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(hisi_mdev_of_match),
+ .pm = &hisi_mdev_pm_ops,
+ },
+};
+
+static int __init hisi_mdev_init(void)
+{
+ pr_debug("%s: init\n", MODULE_NAME);
+
+ platform_driver_register(&hisi_mdev_driver);
+ return 0;
+}
+
+core_initcall(hisi_mdev_init);
+
+static void __exit hisi_mdev_exit(void)
+{
+ platform_driver_unregister(&hisi_mdev_driver);
+ return;
+}
+
+module_exit(hisi_mdev_exit);
+
+MODULE_DESCRIPTION("HiIPCV230 ipc, mailbox device driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hisi/mailbox/hisi_mailbox/hisi_rproc.c b/drivers/hisi/mailbox/hisi_mailbox/hisi_rproc.c
new file mode 100644
index 000000000000..07134d125bd3
--- /dev/null
+++ b/drivers/hisi/mailbox/hisi_mailbox/hisi_rproc.c
@@ -0,0 +1,410 @@
+/*
+ * hisi rproc communication interface
+ *
+ * Copyright (c) 2013- Hisilicon Technologies CO., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/hisi/hisi_mailbox.h>
+#include <linux/hisi/hisi_rproc.h>
+
+#define MODULE_NAME "hisi_rproc"
+
+#define READY() do { is_ready = 1; } while (0)
+#define NOT_READY() do { is_ready = 0; } while (0)
+#define IS_READY() ({ is_ready; })
+#define RPROC_PR_ERR(fmt, args ...) \
+ ({ \
+ pr_err("%s(%d):" fmt "\n", \
+ MODULE_NAME, __LINE__, ##args); \
+ })
+/*#define RPROC_PR_INFO(fmt, args ...) \
+ ({ \
+ pr_info("%s(%d):" fmt "\n", \
+ MODULE_NAME, __LINE__, ##args); \
+ })*/
+
+typedef enum {
+ ASYNC_CALL = 0,
+ SYNC_CALL
+} call_type_t;
+
+struct hisi_rproc_info {
+ rproc_id_t rproc_id;
+ struct atomic_notifier_head notifier;
+ struct notifier_block nb;
+ struct hisi_mbox *mbox;
+};
+
+static int is_ready;
+extern struct hisi_mbox_task *g_TxTaskBuffer;
+static struct hisi_rproc_info rproc_table[] = {
+ {
+ .rproc_id = HISI_RPROC_LPM3_MBX0,
+ },
+ {
+ .rproc_id = HISI_RPROC_RDR_MBX1,
+ },
+ {
+ .rproc_id = HISI_RPROC_HIFI_MBX2,
+ },
+ {
+ .rproc_id = HISI_RPROC_DEFAULT_MBX3,
+ },
+ {
+ .rproc_id = HISI_RPROC_IOM3_MBX4,
+ },
+ {
+ .rproc_id = HISI_RPROC_IVP_MBX5,
+ },
+ {
+ .rproc_id = HISI_RPROC_IVP_MBX6,
+ },
+ {
+ .rproc_id = HISI_RPROC_IOM3_MBX10,
+ },
+ {
+ .rproc_id = HISI_RPROC_IOM3_MBX11,
+ },
+ {
+ .rproc_id = HISI_RPROC_IOM3_MBX12,
+ },
+ {
+ .rproc_id = HISI_RPROC_LPM3_MBX13,
+ },
+ {
+ .rproc_id = HISI_RPROC_LPM3_MBX14,
+ },
+ {
+ .rproc_id = HISI_RPROC_LPM3_MBX15,
+ },
+ {
+ .rproc_id = HISI_RPROC_LPM3_MBX16,
+ },
+ {
+ .rproc_id = HISI_RPROC_LPM3_MBX17,
+ },
+ {
+ .rproc_id = HISI_RPROC_HIFI_MBX18,
+ },
+ {
+ .rproc_id = HISI_RPROC_IVP_MBX25,
+ },
+ {
+ .rproc_id = HISI_RPROC_IVP_MBX26,
+ },
+ {
+ .rproc_id = HISI_RPROC_LPM3_MBX27,
+ },
+ {
+ .rproc_id = HISI_RPROC_LPM3_MBX28,
+ },
+ {
+ .rproc_id = HISI_RPROC_HIFI_MBX29,
+ },
+ {
+ .rproc_id = HISI_RPROC_LPM3_MBX30,
+ },
+ {
+ .rproc_id = HISI_RPROC_ISP_MBX0,
+ },
+ {
+ .rproc_id = HISI_RPROC_ISP_MBX1,
+ },
+ {
+ .rproc_id = HISI_RPROC_ISP_MBX2,
+ },
+ {
+ .rproc_id = HISI_RPROC_ISP_MBX3,
+ }
+};
+
+extern void hisi_mbox_empty_task(struct hisi_mbox_device *mdev);
+
+static inline struct hisi_rproc_info *find_rproc(rproc_id_t rproc_id)
+{
+ struct hisi_rproc_info *rproc = NULL;
+ int i;
+
+ for (i = 0; i < sizeof(rproc_table) / sizeof(struct hisi_rproc_info); i++) {
+ if (rproc_id == rproc_table[i].rproc_id && NULL != rproc_table[i].mbox) {
+ rproc = &rproc_table[i];
+ break;
+ }
+ }
+
+ return rproc;
+}
+
+int hisi_rproc_xfer_async(rproc_id_t rproc_id, rproc_msg_t *msg, rproc_msg_len_t len)
+{
+ struct hisi_rproc_info *rproc;
+ struct hisi_mbox_task *tx_task = NULL;
+ struct hisi_mbox *mbox = NULL;
+ mbox_ack_type_t ack_type = AUTO_ACK;
+
+ int ret = 0;
+
+ if (WARN_ON(!IS_READY())) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (MBOX_CHAN_DATA_SIZE < len) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ rproc = find_rproc(rproc_id);
+ if (!rproc) {
+ RPROC_PR_ERR("invalid rproc xfer\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mbox = rproc->mbox;
+
+ tx_task = hisi_mbox_task_alloc(mbox, msg, len, ack_type);
+ if (!tx_task) {
+ RPROC_PR_ERR("no mem\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = hisi_mbox_msg_send_async(mbox, tx_task);
+ if (ret) {
+ RPROC_PR_ERR("%s async send failed, errno: %d (-12:tx_fifo full;)\n", mbox->tx->name, ret);
+ hisi_mbox_task_free(&tx_task);
+ }
+
+out:
+ return ret;
+}
+
+EXPORT_SYMBOL(hisi_rproc_xfer_async);
+
+int hisi_rproc_xfer_sync(rproc_id_t rproc_id, rproc_msg_t *msg, rproc_msg_len_t len, rproc_msg_t *ack_buffer, rproc_msg_len_t ack_buffer_len)
+{
+ struct hisi_rproc_info *rproc;
+ struct hisi_mbox *mbox = NULL;
+ mbox_ack_type_t ack_type = MANUAL_ACK;
+ int ret = 0;
+
+ if (WARN_ON(!IS_READY())) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (MBOX_CHAN_DATA_SIZE < len || MBOX_CHAN_DATA_SIZE < ack_buffer_len) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ rproc = find_rproc(rproc_id);
+ if (!rproc) {
+ RPROC_PR_ERR("invalid rproc xfer\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mbox = rproc->mbox;
+
+ ret = hisi_mbox_msg_send_sync(mbox, msg, len, ack_type, ack_buffer, ack_buffer_len);
+ if (ret) {
+ RPROC_PR_ERR("fail to sync send\n");
+ }
+
+out:
+ return ret;
+}
+
+EXPORT_SYMBOL(hisi_rproc_xfer_sync);
+
+
+static int hisi_rproc_rx_notifier(struct notifier_block *nb, unsigned long len, void *msg)
+{
+ struct hisi_rproc_info *rproc = container_of(nb, struct hisi_rproc_info, nb);
+
+ atomic_notifier_call_chain(&rproc->notifier, len, msg);
+ return 0;
+}
+
+int hisi_rproc_rx_register(rproc_id_t rproc_id, struct notifier_block *nb)
+{
+ struct hisi_rproc_info *rproc;
+ int ret = 0;
+
+ if (WARN_ON(!IS_READY())) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ rproc = find_rproc(rproc_id);
+ if (!rproc) {
+ RPROC_PR_ERR("invalid rproc xfer\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ atomic_notifier_chain_register(&rproc->notifier, nb);
+out:
+ return ret;
+}
+
+EXPORT_SYMBOL(hisi_rproc_rx_register);
+
+int hisi_rproc_rx_unregister(rproc_id_t rproc_id, struct notifier_block *nb)
+{
+ struct hisi_rproc_info *rproc;
+ int ret = 0;
+
+ if (WARN_ON(!IS_READY())) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ rproc = find_rproc(rproc_id);
+ if (!rproc) {
+ RPROC_PR_ERR("invalid rproc xfer\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ atomic_notifier_chain_unregister(&rproc->notifier, nb);
+out:
+ return ret;
+}
+
+/*
+ * Function name:hisi_rproc_put.
+ * Discription:release the ipc channel's structure, it's usually called by module_exit function, but the module_exit function should never be used .
+ * Parameters:
+ * @ rproc_id_t
+ * return value:
+ * @ -ENODEV-->failed, other-->succeed.
+ */
+int hisi_rproc_put(rproc_id_t rproc_id)
+{
+ struct hisi_rproc_info *rproc;
+ int i;
+
+ for (i = 0; i < sizeof(rproc_table) / sizeof(struct hisi_rproc_info); i++) {
+ rproc = &rproc_table[i];
+ if (rproc->mbox && rproc_id == rproc->rproc_id) {
+ hisi_mbox_put(&rproc->mbox);
+ break;
+ }
+ }
+ if (unlikely(sizeof(rproc_table) / sizeof(struct hisi_rproc_info) == i)) {
+ RPROC_PR_ERR("\nrelease the ipc channel %d 's structure failed\n", rproc->rproc_id);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/*
+ * Function name:hisi_rproc_flush_tx.
+ * Discription:flush the tx_work queue.
+ * Parameters:
+ * @ rproc_id_t
+ * return value:
+ * @ -ENODEV-->failed, other-->succeed.
+ */
+int hisi_rproc_flush_tx(rproc_id_t rproc_id)
+{
+ struct hisi_rproc_info *rproc;
+ int i;
+
+ for (i = 0; i < sizeof(rproc_table) / sizeof(struct hisi_rproc_info); i++) {
+ rproc = &rproc_table[i];
+ /* MBOX8/9/23/24 may be null in austin and dallas */
+ if (NULL == rproc->mbox)
+ continue;
+ if (rproc->mbox->tx && rproc_id == rproc->rproc_id) {
+ hisi_mbox_empty_task(rproc->mbox->tx);
+ break;
+ }
+ }
+
+ if (unlikely(sizeof(rproc_table) / sizeof(struct hisi_rproc_info) == i)) {
+ return -ENODEV;
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL(hisi_rproc_rx_unregister);
+
+int hisi_rproc_init(void)
+{
+ struct hisi_rproc_info *rproc;
+ struct hisi_mbox_task *ptask = NULL;
+ int i;
+
+ for (i = 0; i < sizeof(rproc_table) / sizeof(struct hisi_rproc_info); i++) {
+ rproc = &rproc_table[i];
+ if (NULL == rproc->mbox) {
+ ATOMIC_INIT_NOTIFIER_HEAD(&rproc->notifier);
+
+ rproc->nb.next = NULL;
+ rproc->nb.notifier_call = hisi_rproc_rx_notifier;
+ /* rproc->rproc_id as mdev_index to get the right mailbox-dev */
+ rproc->mbox = hisi_mbox_get(rproc->rproc_id, &rproc->nb);
+ if (!rproc->mbox) {
+ /*RPROC_PR_ERR("\nrproc %d will get later \n",rproc->rproc_id);*/
+ continue;
+ }
+ }
+ }
+ /*the last rproc info has been initialize, set the rproc ready */
+ if ((sizeof(rproc_table) / sizeof(struct hisi_rproc_info)) == i) {
+ READY();
+ }
+
+ if (NULL == g_TxTaskBuffer) {
+ g_TxTaskBuffer = (struct hisi_mbox_task *)kmalloc(TX_TASK_DDR_NODE_NUM * sizeof(struct hisi_mbox_task), GFP_KERNEL);
+ if (NULL == g_TxTaskBuffer) {
+ RPROC_PR_ERR("\n failed to get g_TxTaskBuffer\n");
+ return -ENOMEM;
+ }
+
+ ptask = g_TxTaskBuffer;
+ for (i = 0; i < TX_TASK_DDR_NODE_NUM; i++) {
+ /*init the tx buffer 's node , set the flag to available */
+ ptask->tx_buffer[0] = TX_TASK_DDR_NODE_AVA;
+ ptask++;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(hisi_rproc_init);
+static void __exit hisi_rproc_exit(void)
+{
+ struct hisi_rproc_info *rproc;
+ int i;
+
+ NOT_READY();
+
+ for (i = 0; i < sizeof(rproc_table) / sizeof(struct hisi_rproc_info); i++) {
+ rproc = &rproc_table[i];
+ if (rproc->mbox)
+ hisi_mbox_put(&rproc->mbox);
+ }
+
+ return;
+}
+
+module_exit(hisi_rproc_exit);
+
+MODULE_DESCRIPTION("HISI rproc communication interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hisi/mailbox/hisi_mailbox/hisi_rproc_test.c b/drivers/hisi/mailbox/hisi_mailbox/hisi_rproc_test.c
new file mode 100644
index 000000000000..95c45d72aaac
--- /dev/null
+++ b/drivers/hisi/mailbox/hisi_mailbox/hisi_rproc_test.c
@@ -0,0 +1,775 @@
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/syscalls.h>
+#include <linux/time.h>
+#include <linux/hisi/hisi_rproc.h>
+#include <linux/hisi/ipc_msg.h>
+/*total pressure times once*/
+#define PRESSURE_TIMES 500
+#define PERFORMANC_TIMES 100
+/*show only 5 middle time cost*/
+#define PERFORMANC_SHOW_NUM 5
+#define PERFORMANC_SHOW_INDEX (PERFORMANC_TIMES/PERFORMANC_SHOW_NUM)
+/*the standard time cost, if longer, failed */
+#define PERFORMANC_FAIL_NSEC 2000000
+#define SYNC_SEND 1
+#define ASYNC_SEND 2
+
+#define test_rproc_get_slice() (0xFFFFFFFF)
+#define TEST_RPROC_NULL ((void *)0)
+#define test_rproc_slice_diff(s, e) (((e) >= (s))?((e) - (s)) : (0xFFFFFFFF - (s) + (e)))
+
+struct test_rproc_cb {
+ void *done_sema;
+ void *sync_sema;
+ unsigned int start_slice; /*for calculate boundwidth*/
+ unsigned int end_slice; /*for calculate boundwidth*/
+ unsigned int msg_count;
+ unsigned int sync_task_cnt;
+ unsigned int done_task_cnt;
+ int check_ret;
+};
+
+typedef int (*test_rproc_send_func)(unsigned int sync_id,
+ unsigned char rp_id,
+ unsigned int msg_len,
+ unsigned int msg_num,
+ struct test_rproc_cb *rproc_cb);
+
+struct test_rproc_proc {
+ test_rproc_send_func proc_cb;
+ unsigned int sync_id;
+ unsigned char rp_id;
+ unsigned int msg_len;
+ unsigned int msg_num;
+ unsigned int task_num;
+ struct test_rproc_cb *rproc_cb;
+};
+
+static char data_buf[] = "hello, the message looping back.";
+static struct semaphore send_mutex_sema;
+static struct semaphore task_mutex_sema;
+static struct notifier_block nb;
+int interval_v ;
+
+
+int test_rproc_msg_send(unsigned int sync_id,
+ unsigned char rp_id,
+ unsigned int msg_len,
+ unsigned int msg_num,
+ struct test_rproc_cb *rproc_cb)
+{
+ int ret = 0;
+ rproc_msg_t tx_buffer[8] = {0};
+ rproc_msg_t ack_buffer[8] = {0};
+ struct semaphore *complete_sema;
+ unsigned int start_slice = 0;
+ unsigned int end_slice = 0;
+ unsigned int sync_task_cnt = 0;
+
+ if (sync_id > 0x3) {
+ printk(KERN_ERR "wrong sync id!\n");
+ return -1;
+ }
+
+ if (msg_len > 8) {
+ printk(KERN_ERR "illegal message length!\n");
+ return -1;
+ }
+
+ if (rproc_cb) {
+ down(&task_mutex_sema);
+ rproc_cb->sync_task_cnt--;
+ sync_task_cnt = rproc_cb->sync_task_cnt;
+ up(&task_mutex_sema);
+ if ((0 == sync_task_cnt)
+ && (TEST_RPROC_NULL != rproc_cb->sync_sema)) {
+ rproc_cb->start_slice = test_rproc_get_slice();
+ up(rproc_cb->sync_sema);
+ }
+
+ if (TEST_RPROC_NULL != rproc_cb->sync_sema) {
+ down(rproc_cb->sync_sema);
+ up(rproc_cb->sync_sema);
+ }
+ }
+
+ tx_buffer[0] = (OBJ_TEST << 16) | (CMD_TEST << 8)/*0x00120500*/;
+ memcpy((void *)&tx_buffer[1], (void *)&data_buf[0], sizeof(tx_buffer) - sizeof(tx_buffer[0]));
+ switch (sync_id) {
+ case 0:
+ while (msg_num--) {
+ ret = RPROC_SYNC_SEND(rp_id, tx_buffer, msg_len, ack_buffer, msg_len);
+ if (ret || (((OBJ_TEST << 16) | (CMD_TEST << 8)) != ack_buffer[0]) || (0x12345678 != ack_buffer[1])) {
+ printk(KERN_ERR "rproc send error, ret %d, rp %d, sync %d, ack[0x%x][0x%x]!\r\n",
+ ret, rp_id, sync_id, ack_buffer[0], ack_buffer[1]);
+ return -1;
+ }
+ if (rproc_cb)
+ rproc_cb->msg_count++;
+ }
+ break;
+
+ case 1:
+ while (msg_num--) {
+ ret = RPROC_SYNC_SEND(rp_id, tx_buffer, msg_len, NULL, 0);
+ if (ret) {
+ printk(KERN_ERR "rproc send error, ret %d, rp %d, sync %d!\r\n", ret, rp_id, sync_id);
+ return -1;
+ }
+ if (rproc_cb)
+ rproc_cb->msg_count++;
+ }
+ break;
+
+ case 2:
+ while (msg_num--) {
+ complete_sema = (struct semaphore *)kmalloc(sizeof(struct semaphore), GFP_KERNEL);
+ if (NULL == complete_sema) {
+ printk(KERN_ERR "error test_rproc_msg_send kmalloc complete_sema\n");
+ return -1;
+ }
+ sema_init(complete_sema, 0);
+ ret = RPROC_ASYNC_SEND(rp_id, tx_buffer, msg_len);
+ if (ret) {
+ printk(KERN_ERR "rproc send error, ret %d, rp %d, sync %d!\r\n", ret, rp_id, sync_id);
+ kfree(complete_sema);
+ return -1;
+ }
+ start_slice = test_rproc_get_slice();
+ if (0 != down_timeout(complete_sema, msecs_to_jiffies(1000))) {
+ printk(KERN_ERR "msg_send timeout rp_id:%d rproc async send err!\r\n", rp_id);
+ kfree(complete_sema);
+ return -1;
+ }
+ end_slice = test_rproc_get_slice();
+ printk(KERN_INFO "async send sync msg spend %d slice!\r\n",
+ test_rproc_slice_diff(start_slice, end_slice));
+ kfree(complete_sema);
+ if (rproc_cb)
+ rproc_cb->msg_count++;
+ }
+ break;
+
+ case 3:
+ while (msg_num--) {
+ ret = RPROC_ASYNC_SEND(rp_id, tx_buffer, msg_len);
+ if (ret) {
+ printk(KERN_ERR "rproc send error, ret %d, rp %d, sync %d!\r\n", ret, rp_id, sync_id);
+ return ret;
+ }
+ if (rproc_cb)
+ rproc_cb->msg_count++;
+ }
+ break;
+
+ default:
+ printk(KERN_ERR "wrong sync ID!\n");
+ return -1;
+ }
+ printk(KERN_INFO "rproc send ok, rp %d, sync %d!\n", rp_id, sync_id);
+
+ return ret;
+}
+
+void test_rproc_msg_send_entry(void *_proc)
+{
+ struct test_rproc_proc *rproc_proc = _proc;
+ struct test_rproc_cb *rproc_cb = TEST_RPROC_NULL;
+ unsigned int done_task_cnt = 0;
+
+
+ if (!rproc_proc) {
+ printk(KERN_ERR "test_rproc_msg_send_entry rproc_proc is NULL\n ");
+ return ;
+ }
+
+ if (!rproc_proc->proc_cb) {
+ printk(KERN_ERR "test_rproc_msg_send_entry proc_cb is NULL\n ");
+ kfree(rproc_proc);
+ return ;
+ }
+
+ if (!rproc_proc->rproc_cb) {
+ printk(KERN_ERR "test_rproc_msg_send_entry rproc_cb is NULL\n ");
+ kfree(rproc_proc);
+ return ;
+ }
+
+ rproc_cb = rproc_proc->rproc_cb;
+ rproc_cb->check_ret = rproc_proc->proc_cb(
+ rproc_proc->sync_id,
+ rproc_proc->rp_id,
+ rproc_proc->msg_len,
+ rproc_proc->msg_num,
+ rproc_cb);
+ kfree(rproc_proc);
+
+ down(&task_mutex_sema);
+ rproc_cb->done_task_cnt--;
+ done_task_cnt = rproc_cb->done_task_cnt;
+ up(&task_mutex_sema);
+ if ((0 == done_task_cnt)
+ && (TEST_RPROC_NULL != rproc_cb->done_sema)) {
+ up(rproc_cb->done_sema);
+ }
+}
+
+void test_rproc_msg_create_task(test_rproc_send_func entryPtr,
+ unsigned int sync_id,
+ unsigned char rp_id,
+ unsigned int msg_len,
+ unsigned int msg_num,
+ unsigned int task_num,
+ struct test_rproc_cb *rproc_cb)
+{
+ char task_name[32] = {0};
+ struct test_rproc_proc *pEntryParam = kmalloc(sizeof(struct test_rproc_proc), GFP_KERNEL);
+ if (NULL == pEntryParam)
+ return;
+
+ pEntryParam->proc_cb = (test_rproc_send_func)entryPtr;
+ pEntryParam->sync_id = sync_id;
+ pEntryParam->rp_id = rp_id;
+ pEntryParam->msg_len = msg_len;
+ pEntryParam->msg_num = msg_num;
+ pEntryParam->task_num = task_num;
+ pEntryParam->rproc_cb = rproc_cb;
+
+ snprintf(task_name, sizeof(task_name), "trproc_task%d", (int)task_num);
+
+ kthread_run((int (*)(void *))test_rproc_msg_send_entry, (void *)pEntryParam, task_name);
+}
+
+int test_rproc_msg_multi_send(unsigned int sync_id,
+ unsigned char rp_id,
+ unsigned int msg_len,
+ unsigned int msg_num,
+ unsigned int task_num)
+{
+ struct test_rproc_cb *rproc_cb = 0;
+ unsigned int time_diff = 0;
+ unsigned int task_num_t = task_num;
+ int ret = 0;
+
+ down(&send_mutex_sema);
+
+ rproc_cb = kcalloc(sizeof(struct test_rproc_cb), 1, GFP_KERNEL);
+ if (!rproc_cb) {
+ printk(KERN_ERR "error test_rproc_msg_multi_send kcalloc\n");
+ ret = -1;
+ goto up_send_mutex_sema;
+ }
+
+ rproc_cb->done_sema = (struct semaphore *)kmalloc(sizeof(struct semaphore), GFP_KERNEL);
+ if (NULL == rproc_cb->done_sema) {
+ printk(KERN_ERR "error test_rproc_msg_multi_send kmalloc done_sema\n");
+ ret = -1;
+ goto free_rproc_cb;
+ }
+ sema_init(rproc_cb->done_sema, 0);
+ rproc_cb->sync_sema = (struct semaphore *)kmalloc(sizeof(struct semaphore), GFP_KERNEL);
+ if (NULL == rproc_cb->sync_sema) {
+ printk(KERN_ERR "error test_rproc_msg_multi_send kmalloc done_sema\n");
+ ret = -1;
+ goto free_done_sema;
+ }
+ sema_init(rproc_cb->sync_sema, 0);
+ rproc_cb->check_ret = -1;
+ rproc_cb->sync_task_cnt = task_num;
+ rproc_cb->done_task_cnt = task_num;
+ rproc_cb->msg_count = 0;
+
+ while (task_num) {
+ test_rproc_msg_create_task(test_rproc_msg_send,
+ sync_id,
+ rp_id,
+ msg_len,
+ msg_num,
+ task_num,
+ rproc_cb);
+
+ task_num--;
+ }
+
+ down(rproc_cb->done_sema);
+
+ rproc_cb->end_slice = test_rproc_get_slice();
+
+ time_diff = test_rproc_slice_diff(rproc_cb->start_slice, rproc_cb->end_slice);
+ printk(KERN_INFO "rp: %d, sync: %d, total:%d(4byte), latency: %d (slice)\n", rp_id, sync_id,
+ (int)(msg_len * msg_num * task_num_t), (int)time_diff);
+
+ if (0 != rproc_cb->check_ret) {
+ printk(KERN_ERR "test_rproc_msg_multi_send: CheckRet error\n");
+ ret = -1;
+ } else if (msg_num * task_num_t != rproc_cb->msg_count) {
+ printk(KERN_ERR "test_rproc_msg_multi_send: MsgCount(%d) error\n", (int)rproc_cb->msg_count);
+ ret = -1;
+ } else {
+ printk(KERN_INFO "test_rproc_msg_multi_send: Success!\n");
+ ret = 0;
+ }
+
+ kfree(rproc_cb->sync_sema);
+ rproc_cb->sync_sema = TEST_RPROC_NULL;
+free_done_sema:
+ kfree(rproc_cb->done_sema);
+ rproc_cb->done_sema = TEST_RPROC_NULL;
+free_rproc_cb:
+ kfree(rproc_cb);
+ rproc_cb = TEST_RPROC_NULL;
+up_send_mutex_sema:
+ up(&send_mutex_sema);
+ return ret;
+}
+
+void test_rproc_single_task_sync(unsigned int msg_type, unsigned char rp_id, unsigned int msg_len, unsigned int msg_num)
+{
+ int ret = 0;
+ unsigned int sync_id = 0;
+
+ sync_id = (0 == msg_type) ? 0 : 1;
+ ret = test_rproc_msg_multi_send(sync_id, rp_id, msg_len, msg_num, 1);
+ if (0 != ret)
+ printk(KERN_ERR "test_rproc_single_task_sync: Fail\r\n");
+ else
+ printk(KERN_ERR "test_rproc_single_task_sync: Success\r\n");
+}
+
+void test_rproc_single_task_async(unsigned int msg_type,
+ unsigned char rp_id,
+ unsigned int msg_len,
+ unsigned int msg_num)
+{
+ int ret = 0;
+ unsigned int sync_id = 0;
+
+ sync_id = (0 == msg_type) ? 2 : 3;
+ ret = test_rproc_msg_multi_send(sync_id, rp_id, msg_len, msg_num, 1);
+ if (0 != ret)
+ printk(KERN_ERR "test_rproc_single_task_async: Fail\r\n");
+ else
+ printk(KERN_ERR "test_rproc_single_task_async: Success\r\n");
+}
+
+void test_rproc_multi_task_sync(unsigned int msg_type,
+ unsigned char task_num,
+ unsigned char rp_id,
+ unsigned int msg_len,
+ unsigned int msg_num)
+{
+ int ret = 0;
+ unsigned int sync_id = 0;
+
+ sync_id = (0 == msg_type) ? 0 : 1;
+ ret = test_rproc_msg_multi_send(sync_id, rp_id, msg_len, msg_num, task_num);
+ if (0 != ret)
+ printk(KERN_ERR "test_rproc_multi_task_sync: Fail\r\n");
+ else
+ printk(KERN_ERR "test_rproc_multi_task_sync: Success\r\n");
+}
+
+void test_rproc_multi_task_async(unsigned int msg_type,
+ unsigned char task_num,
+ unsigned char rp_id,
+ unsigned int msg_len,
+ unsigned int msg_num)
+{
+ int ret = 0;
+ unsigned int sync_id = 0;
+
+ sync_id = (0 == msg_type) ? 2 : 3;
+ ret = test_rproc_msg_multi_send(sync_id, rp_id, msg_len, msg_num, task_num);
+ if (0 != ret)
+ printk(KERN_ERR "test_rproc_multi_task_async: Fail\r\n");
+ else
+ printk(KERN_ERR "test_rproc_multi_task_async: Success\r\n");
+}
+
+static int mbox_recv_from_lpmcu(struct notifier_block *nb, unsigned long len, void *msg)
+{
+ int i;
+
+ pr_err("$$$$$$$$$$$$$$$$$$$$$----%s--->\n", __func__);
+ for (i = 0; i < len; ++i) {
+ pr_err("-------->msg[%d] = %#.8x\n", i, ((int *)msg)[i]);
+ }
+ return 0;
+}
+
+static void test_all_sync_ipc_init(void)
+{
+ int ret, rproc_id = 0;
+ static int times;
+ /*init just once*/
+ if (times) {
+ return;
+ }
+ rproc_id = HISI_RPROC_LPM3_MBX0;
+
+ nb.next = NULL;
+ nb.notifier_call = mbox_recv_from_lpmcu;
+
+ /* register the rx notify callback */
+ ret = RPROC_MONITOR_REGISTER(rproc_id, &nb);
+ if (ret) {
+ pr_err("%s:RPROC_MONITOR_REGISTER failed", __func__);
+ }
+ times++;
+}
+
+static int test_all_send_to_lpmcu(void *arg)
+{
+ rproc_msg_t ack_buffer[2] = {0};
+ int err = 0;
+ int index = 0;
+ union ipc_data *lpm3Msg = NULL;
+ int syntypye = 0;
+ int idata = 0;
+
+ lpm3Msg = (union ipc_data *)kmalloc(sizeof(union ipc_data), GFP_KERNEL);
+ if (NULL == lpm3Msg)
+ return -1;
+
+ lpm3Msg->cmd_mix.cmd_src = OBJ_AP;
+ lpm3Msg->cmd_mix.cmd_obj = OBJ_LPM3;
+ lpm3Msg->cmd_mix.cmd = CMD_SETTING;
+ lpm3Msg->cmd_mix.cmd_type = TYPE_TEST;
+ /*****************************************************************************************************
+ data[0]: 0x0008030D
+ data[1]: 0xAABBCCDD
+AA: 00(sec or not)
+BB: mailbox id, lpmcu-->ap is 0
+CC: dest core (1:A53, 2:Maia, 4:Sensorhub, 8:LPMCU, 16:HIFI, 32:MODEM, 64:BBE16, 128:IVP)
+DD: mode, 1 for autoack
+ *****************************************************************************************************/
+ lpm3Msg->data[0] = 0x0008030F;
+ lpm3Msg->data[1] = 0x01000101;
+
+ while (1) {
+ syntypye++;
+ idata++;
+ if (0 == syntypye%10000)
+ pr_err("xxxxxxxxx pressure test have succeed %d cycles\n", syntypye);
+
+ lpm3Msg->data[2] = idata;
+ lpm3Msg->data[3] = idata+0x10;
+ lpm3Msg->data[4] = idata+0x100;
+ lpm3Msg->data[5] = idata+0x1000;
+ lpm3Msg->data[6] = idata+0x10000;
+ lpm3Msg->data[7] = idata+0x100000;
+ for (index = HISI_RPROC_LPM3_MBX13; index < HISI_RPROC_ISP_MBX0; index++) {
+ if (HISI_RPROC_LPM3_MBX17 < index && HISI_RPROC_LPM3_MBX27 > index)
+ continue; /*just only lpmcu's channel*/
+
+ if (syntypye%2)
+ err = RPROC_ASYNC_SEND(index, (rproc_msg_t *)lpm3Msg, 8);
+ else
+ err = RPROC_SYNC_SEND(index, (rproc_msg_t *) lpm3Msg, 8, ack_buffer, 2);
+ if (err) {
+ kfree(lpm3Msg);
+ lpm3Msg = NULL;
+ pr_err("xxxxxxxxx pressure test failed and break with err %d!\n", err);
+ return -1;
+ }
+ msleep(*(int *)arg);
+ }
+ }
+ if (lpm3Msg) {
+ kfree(lpm3Msg);
+ lpm3Msg = NULL;
+ }
+ return 0;
+}
+
+static int rproc_performance_cnt(struct timespec *ptimespec, unsigned char rproc_id, union ipc_data *msg,
+ unsigned char len, unsigned char synctpye)
+{
+ unsigned int index = 0;
+ rproc_msg_t ack_buffer[2] = {0};
+ struct timespec time_begin, time_end;
+ int sleepcnt = 0;
+ int ret = 0;
+
+ for (index = 0; index < PRESSURE_TIMES; index++) {
+ /*count the first PERFORMANC_TIMES 's cost only*/
+ if (index < PERFORMANC_TIMES)
+ getnstimeofday(&time_begin);
+
+ if (SYNC_SEND == synctpye)
+ ret |= RPROC_SYNC_SEND(rproc_id, (rproc_msg_t *) msg, len, ack_buffer, 2);
+ else if (ASYNC_SEND == synctpye)
+ ret |= RPROC_ASYNC_SEND(rproc_id, (rproc_msg_t *) msg, len);
+
+ if (index < PERFORMANC_TIMES) {
+ getnstimeofday(&time_end);
+ ptimespec[index].tv_sec = time_end.tv_sec - time_begin.tv_sec;
+ if (time_end.tv_nsec < time_begin.tv_nsec && ptimespec[index].tv_sec) {
+ ptimespec[index].tv_sec - = 1;
+ ptimespec[index].tv_nsec = time_end.tv_nsec + 1000000000 - time_begin.tv_nsec;
+ } else
+ ptimespec[index].tv_nsec = time_end.tv_nsec - time_begin.tv_nsec;
+ /*if cost more than PERFORMANC_FAIL_NSEC, then break and fail*/
+ if (ptimespec[index].tv_sec || PERFORMANC_FAIL_NSEC < ptimespec[index].tv_nsec) {
+ sleepcnt++;
+ pr_err("rproc_performance: rproc_id %d index-%d cost %lu sec, %lu nsec\n", rproc_id, index, (unsigned long)ptimespec[index].tv_sec, ptimespec[index].tv_nsec);
+ /*The remote processor like HIFI, SENSORHUB may fall into sleep and the first IPC will wakeup it, this may cost a lot time*/
+ if (10 < sleepcnt)
+ ret |= -1;
+ }
+ }
+
+ if (ret)
+ break;
+
+ if (ASYNC_SEND == synctpye)
+ msleep(1);
+ }
+
+ return ret;
+
+}
+
+void show_rproc_performance(struct timespec *ptimespec, unsigned char rproc_id, unsigned char synctpye)
+{
+ unsigned int index = 0;
+ for (index = 0; index < PERFORMANC_TIMES; index++) {
+ /*just show the some of the index*/
+ if (0 == index%PERFORMANC_SHOW_INDEX) {
+ if (SYNC_SEND == synctpye)
+ pr_err("rproc_performance: rproc_id %d sync send cost %lu sec, %lu nsec\n", rproc_id, (unsigned long)ptimespec[index].tv_sec, ptimespec[index].tv_nsec);
+ else if (ASYNC_SEND == synctpye)
+ pr_err("rproc_performance: rproc_id %d async send cost %lu sec, %lu nsec\n", rproc_id, (unsigned long)ptimespec[index].tv_sec, ptimespec[index].tv_nsec);
+ }
+ }
+}
+/*
+ * Function name:test_rproc_performance.
+ * Discription:test all the core's performance with AP .
+ * Parameters:
+ * @ objmax:the remote proccessor number to send ipc msg. Sometimes like HIFI,ISP,etc. are in sleepmode and not suitable for pressure test.
+ * return value:
+ * @ 0:success, others failed.
+ */
+int test_rproc_performance(unsigned char objmax, int type)
+{
+ unsigned char rproc_id = 0;
+ union ipc_data *msg = NULL;
+ int ret = 0, max_index;
+ struct timespec *ptimespec = NULL;
+
+ /*
+ * unsigned char rproc_table[] = {HISI_RPROC_LPM3_MBX13, HISI_RPROC_HIFI_MBX18, HISI_RPROC_IOM3_MBX10 , HISI_RPROC_ISP_MBX2};
+ * at dallas and austin
+ */
+ unsigned char rproc_table[] = {HISI_RPROC_LPM3_MBX13, HISI_RPROC_HIFI_MBX18, HISI_RPROC_IOM3_MBX10, HISI_RPROC_ISP_MBX24};
+ msg = (union ipc_data *)kmalloc(sizeof(union ipc_data), GFP_KERNEL);
+ ptimespec = (struct timespec *)kmalloc(sizeof(struct timespec) * PERFORMANC_TIMES, GFP_KERNEL);
+ if (NULL == msg || NULL == ptimespec) {
+ ret = -1;
+ goto out;
+ }
+ if (objmax < 1 || objmax > 4) {
+ pr_err("test_rproc_performance array bound\n");
+ return -1;
+ }
+ /*we don't care the msg's content*/
+ memset((void *)msg, 0xFF, sizeof(union ipc_data));
+
+ max_index = objmax < sizeof(rproc_table)/sizeof(unsigned char)?objmax:sizeof(rproc_table)/sizeof(unsigned char);
+
+ rproc_id = rproc_table[objmax-1];
+ if (type == 1) {
+ ret |= rproc_performance_cnt(ptimespec, rproc_id, msg, MAX_MAIL_SIZE, SYNC_SEND);
+ show_rproc_performance(ptimespec, rproc_id, SYNC_SEND);
+ } else if (type == 0) {
+ ret |= rproc_performance_cnt(ptimespec, rproc_id, msg, MAX_MAIL_SIZE, ASYNC_SEND);
+ show_rproc_performance(ptimespec, rproc_id, ASYNC_SEND);
+ } else {
+ ret |= rproc_performance_cnt(ptimespec, rproc_id, msg, MAX_MAIL_SIZE, SYNC_SEND);
+ show_rproc_performance(ptimespec, rproc_id, SYNC_SEND);
+ ret |= rproc_performance_cnt(ptimespec, rproc_id, msg, MAX_MAIL_SIZE, ASYNC_SEND);
+ show_rproc_performance(ptimespec, rproc_id, ASYNC_SEND);
+ }
+
+out:
+ if (ret)
+ pr_err("rproc_performance and pressure failed\n");
+ else
+ pr_err("rproc_performance and pressure pass\n");
+
+ if (msg)
+ kfree(msg);
+ if (ptimespec)
+ kfree(ptimespec);
+
+ return ret;
+}
+
+
+static int test_async_send_to_lpmcu(void *arg)
+{
+ int err = 0, rproc_lpm3 = 0;
+ union ipc_data *lpm3Msg = NULL;
+
+ lpm3Msg = (union ipc_data *)kmalloc(sizeof(union ipc_data), GFP_KERNEL);
+ if (NULL == lpm3Msg)
+ return -1;
+
+ lpm3Msg->cmd_mix.cmd_src = OBJ_AP;
+ lpm3Msg->cmd_mix.cmd_obj = OBJ_LPM3;
+ lpm3Msg->cmd_mix.cmd = CMD_SETTING;
+ lpm3Msg->cmd_mix.cmd_type = TYPE_TEST;
+ /*****************************************************************************************************
+ data[0]: 0x0008030D
+ data[1]: 0xAABBCCDD
+AA: 00(sec or not)
+BB: mailbox id, lpmcu-->ap is 0
+CC: dest core (1:A53, 2:Maia, 4:Sensorhub, 8:LPMCU, 16:HIFI, 32:MODEM, 64:BBE16, 128:IVP)
+DD: mode, 1 for autoack
+ *****************************************************************************************************/
+ lpm3Msg->data[0] = 0x0008030F;
+ lpm3Msg->data[1] = 0x01000101;
+ lpm3Msg->data[2] = 0x22222222;
+ lpm3Msg->data[3] = 0x33333333;
+ lpm3Msg->data[4] = 0x44444444;
+ lpm3Msg->data[5] = 0x55555555;
+ lpm3Msg->data[6] = 0x66666666;
+ lpm3Msg->data[7] = 0x77777777;
+
+ rproc_lpm3 = HISI_RPROC_LPM3_MBX13;
+ while (1) {
+ err = RPROC_ASYNC_SEND(rproc_lpm3, (rproc_msg_t *)lpm3Msg, 8);
+
+ if (err) {
+ pr_err("xxxxxxxxx pressure test failed and break with err %d!\n", err);
+ kfree(lpm3Msg);
+ lpm3Msg = NULL;
+ return -1;
+ }
+ msleep(*(int *)arg);
+ }
+ return 0;
+}
+
+
+/*
+ * Function name:test_all_kind_ipc.
+ * Discription:test all the ipc channel .
+ * Parameters:
+ * @ type: 1, send sync msg, 2, send async msg , 3, pressure test with lpmcu through all channels, 4,pressure test with lpmcu async
+ * @ objmax:the remote proccessor number to send ipc msg. Sometimes like HIFI,ISP,etc. are in sleepmode and not suitable for pressure test.
+ * @ interval: to delay, ms
+ * @ threads: thread num to create
+ * return value:
+ * @ 0:success, others failed.
+ */
+int test_all_kind_ipc(int type, int obj, int interval, int threads)
+{
+ rproc_msg_t ack_buffer[2] = {0};
+ int err = 0;
+ int index = 0, max_id = 0;
+ int ret = 0;
+ union ipc_data *lpm3Msg = NULL;
+
+ interval_v = interval;
+ max_id = HISI_RPROC_LPM3_MBX30;
+ lpm3Msg = (union ipc_data *)kmalloc(sizeof(union ipc_data), GFP_KERNEL);
+ if (lpm3Msg == NULL)
+ return -1;
+ lpm3Msg->data[0] = 0x00000000;
+ lpm3Msg->data[1] = 0x11111111;
+ lpm3Msg->data[2] = 0x22222222;
+ lpm3Msg->data[3] = 0x33333333;
+ lpm3Msg->data[4] = 0x44444444;
+ lpm3Msg->data[5] = 0x55555555;
+ lpm3Msg->data[6] = 0x66666666;
+ lpm3Msg->data[7] = 0x77777777;
+ /*register the rx callback*/
+ test_all_sync_ipc_init();
+ /*traverse all the ipc channel in sync mode*/
+ if (1 == type) {
+ for (index = 0; index <= max_id; index++) {
+ err = RPROC_SYNC_SEND(index, (rproc_msg_t *) lpm3Msg, 8, ack_buffer, 2);
+ if (err)
+ pr_err("xxxxxxxxx mailbox channel %d: send err!\n", index);
+ else
+ pr_err("xxxxxxxxx mailbox channel %d: send ok, ack is 0x%x, 0x%x!\n", index, ack_buffer[0], ack_buffer[1]);
+
+ msleep(interval);
+ }
+ } else if (2 == type)/*traverse all the ipc channel in async mode*/ {
+
+ for (index = 0; index <= max_id; index++) {
+ err = RPROC_ASYNC_SEND(index, (rproc_msg_t *)lpm3Msg, 8);
+ if (err)
+ pr_err("xxxxxxxxx mailbox channel %d: send err!\n", index);
+ else
+ pr_err("xxxxxxxxx mailbox channel %d: send ok, ack is 0x%x, 0x%x!\n", index, ack_buffer[0], ack_buffer[1]);
+
+ msleep(interval);
+ }
+ } else if (3 == type) /*send to lpmcu and will receive msg from lpmcu, use for pressure test*/ {
+ for (index = 0; index < threads; index++) {
+ kthread_run(test_all_send_to_lpmcu, (void *)&interval_v, "all_to_lpmcu");
+ }
+ } else if (4 == type) /*send to lpmcu and will receive msg from lpmcu, use for pressure test*/ {
+ for (index = 0; index < threads; index++) {
+ kthread_run(test_async_send_to_lpmcu, (void *)&interval_v, "async_to_lpmcu");
+ }
+ } else if (5 == type) /* single mailbox sync send */ {
+ for (index = 0; index < threads; index++) {
+ pr_err("mailbox-%d sync send start!\n", obj);
+ ret = RPROC_SYNC_SEND((rproc_id_t)obj, (rproc_msg_t *) lpm3Msg, 8, ack_buffer, 2);
+ if (ret)
+ err++;
+ pr_err("mailbox-%d async send end!\n", obj);
+ msleep(interval);/*lint !e732 */
+ }
+ if (0 < err)
+ pr_err("xxxxxxxxx mailbox channel %d: async send err count: %d!\n", obj, err);
+ } else if (6 == type)/* single mailbox sync send */ {
+ for (index = 0; index < threads; index++) {
+ pr_err("mailbox-%d async send start!\n", obj);
+ ret = RPROC_ASYNC_SEND((rproc_id_t)obj, (rproc_msg_t *)lpm3Msg, 8);
+ if (ret)
+ err++;
+ pr_err("mailbox-%d async send end!\n", obj);
+ msleep(interval);/*lint !e732 */
+ }
+ if (0 < err)
+ pr_err("xxxxxxxxx mailbox channel %d: sync send err count: %d!\n", obj, err);
+ }
+ kfree(lpm3Msg);
+ lpm3Msg = NULL;
+
+ return err;
+}
+
+static int __init test_rproc_init(void)
+{
+ sema_init(&send_mutex_sema, 1);
+ sema_init(&task_mutex_sema, 1);
+ return 0;
+}
+
+static void __exit test_rproc_exit(void)
+{
+}
+
+module_init(test_rproc_init);
+module_exit(test_rproc_exit);
+MODULE_DESCRIPTION("hisi_rproc_test");
+MODULE_LICENSE("GPL v2");
+
+
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 22bfbe147870..92d34e800c05 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -80,6 +80,7 @@ struct dw_i2c_dev {
void __iomem *base;
struct completion cmd_complete;
struct clk *clk;
+ struct reset_control *rst;
u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev);
struct dw_pci_controller *controller;
int cmd_err;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index b42d95f09c68..e630d4f0dde2 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -38,6 +38,7 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/io.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/platform_data/i2c-designware.h>
@@ -179,6 +180,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
/* fast mode by default because of legacy reasons */
dev->clk_freq = 400000;
+ dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(dev->rst)) {
+ if (PTR_ERR(dev->rst) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ } else {
+ reset_control_deassert(dev->rst);
+ }
+
if (pdata) {
dev->clk_freq = pdata->i2c_scl_freq;
} else {
@@ -207,12 +216,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
&& dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
dev_err(&pdev->dev,
"Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
- return -EINVAL;
+ r = -EINVAL;
+ goto exit_reset;
}
r = i2c_dw_eval_lock_support(dev);
if (r)
- return r;
+ goto exit_reset;
dev->functionality =
I2C_FUNC_I2C |
@@ -270,10 +280,18 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
}
r = i2c_dw_probe(dev);
- if (r && !dev->pm_runtime_disabled)
- pm_runtime_disable(&pdev->dev);
+ if (r)
+ goto exit_probe;
return r;
+
+exit_probe:
+ if (!dev->pm_runtime_disabled)
+ pm_runtime_disable(&pdev->dev);
+exit_reset:
+ if (!IS_ERR_OR_NULL(dev->rst))
+ reset_control_assert(dev->rst);
+ return r;
}
static int dw_i2c_plat_remove(struct platform_device *pdev)
@@ -290,6 +308,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
if (!dev->pm_runtime_disabled)
pm_runtime_disable(&pdev->dev);
+ if (!IS_ERR_OR_NULL(dev->rst))
+ reset_control_assert(dev->rst);
return 0;
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 8ee54d71c7eb..b984fffccb6e 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -362,4 +362,22 @@ config MTK_IOMMU_V1
if unsure, say N here.
+config HISI_IODOMAIN_API
+ bool
+
+config HISI_IOMMU
+ bool "Hisilicon IOMMU Support"
+ select IOMMU_API
+ select HISI_IODOMAIN_API
+ help
+ Hisilicon IOMMU Support.
+
+config HISI_IOMMU_LPAE
+ bool "Hisilicon IOMMU LPAE Support"
+ select IOMMU_API
+ select IODOMAIN_API
+ select HISI_IOMMU
+ help
+ Hisilicon IOMMU Support.
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 195f7b997d8e..b0d5377af413 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -27,3 +27,5 @@ obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
+obj-$(CONFIG_HISI_IODOMAIN_API) += ion-iommu-map.o
+obj-$(CONFIG_HISI_IOMMU_LPAE) += hisi_smmu_lpae.o
diff --git a/drivers/iommu/hisi_smmu.h b/drivers/iommu/hisi_smmu.h
new file mode 100644
index 000000000000..4637244dba6b
--- /dev/null
+++ b/drivers/iommu/hisi_smmu.h
@@ -0,0 +1,178 @@
+#ifndef HISI_SMMU_H
+#define HISI_SMMU_H
+
+/*#define IOMMU_DEBUG*/
+#ifdef IOMMU_DEBUG
+#define dbg(format, arg...) printk(KERN_ERR "[iommu]"format, ##arg);
+#else
+#define dbg(format, arg...)
+#endif
+
+#define SMMU_PHY_PTRS_PER_PTE (256)
+/*#define SMMU_PHY_PTRS_PER_PGD (4096)*/
+#define SMMU_PTRS_PER_PGD (4)
+#define SMMU_PTRS_PER_PMD (512)
+#define SMMU_PTRS_PER_PTE (512)
+#define SMMU_PAGE_SHIFT (12)
+
+#define PAGE_TABLE_ADDR_MASK (UL(0xFFFFFFF) << SMMU_PAGE_SHIFT)
+
+#define SMMU_PAGE_SIZE BIT(SMMU_PAGE_SHIFT)
+#define SMMU_PAGE_MASK (~(SMMU_PAGE_SIZE-1))
+
+#define SMMU_PGDIR_SHIFT (30)
+#define SMMU_PGDIR_SIZE BIT(SMMU_PGDIR_SHIFT)
+#define SMMU_PGDIR_MASK (~(SMMU_PGDIR_SIZE-1))
+
+#define SMMU_PMDIR_SHIFT (21)
+#define SMMU_PMDIR_SIZE BIT(SMMU_PMDIR_SHIFT)
+#define SMMU_PMDIR_MASK (~(SMMU_PMDIR_SIZE-1))
+#define SMMU_PGD_TYPE (BIT(0) | BIT(1))
+#define SMMU_PMD_TYPE (BIT(0) | BIT(1))
+#define SMMU_PTE_TYPE (BIT(0) | BIT(1))
+
+#define SMMU_PGD_NS BIT(63)
+#define SMMU_PMD_NS BIT(63)
+#define SMMU_PTE_NS BIT(5)
+
+#define SMMU_PTE_PXN BIT(53) /* Privileged XN */
+#define SMMU_PTE_UXN BIT(54) /* User XN */
+#define SMMU_PTE_USER BIT(6) /* AP[1] */
+#define SMMU_PTE_RDONLY BIT(7) /* AP[2] */
+#define SMMU_PTE_SHARED (BIT(8) | BIT(9)) /* SH[1:0], inner shareable */
+#define SMMU_PTE_AF BIT(10) /* Access Flag */
+#define SMMU_PTE_NG BIT(11) /* nG */
+#define SMMU_PTE_ATTRINDX(t) ((t) << 2)
+/*
+ * Memory types available.
+ * USED BY A7
+ */
+#define HISI_MT_NORMAL 0
+#define HISI_MT_NORMAL_CACHE 4
+#define HISI_MT_NORMAL_NC 5
+#define HISI_MT_DEVICE_nGnRE 6
+
+
+#define SMMU_PAGE_DEFAULT (SMMU_PTE_TYPE | SMMU_PTE_AF | SMMU_PTE_SHARED)
+
+#define SMMU_PROT_DEVICE_nGnRE (SMMU_PAGE_DEFAULT | SMMU_PTE_PXN | \
+ SMMU_PTE_UXN | SMMU_PTE_ATTRINDX(HISI_MT_DEVICE_nGnRE))
+#define SMMU_PROT_NORMAL_CACHE (SMMU_PAGE_DEFAULT | SMMU_PTE_PXN | \
+ SMMU_PTE_UXN | SMMU_PTE_ATTRINDX(HISI_MT_NORMAL_CACHE))
+#define SMMU_PROT_NORMAL_NC (SMMU_PAGE_DEFAULT | SMMU_PTE_PXN | \
+ SMMU_PTE_UXN | SMMU_PTE_ATTRINDX(HISI_MT_NORMAL_NC))
+#define SMMU_PROT_NORMAL (SMMU_PAGE_DEFAULT | SMMU_PTE_PXN | \
+ SMMU_PTE_UXN | SMMU_PTE_ATTRINDX(HISI_MT_NORMAL))
+
+#define SMMU_PAGE_READWRITE (SMMU_PAGE_DEFAULT | SMMU_PTE_USER | \
+ SMMU_PTE_NG | SMMU_PTE_PXN | SMMU_PTE_UXN)
+#define SMMU_PAGE_READONLY (SMMU_PAGE_DEFAULT | SMMU_PTE_USER | \
+ SMMU_PTE_RDONLY | SMMU_PTE_NG | SMMU_PTE_PXN | SMMU_PTE_UXN)
+#define SMMU_PAGE_READONLY_EXEC (SMMU_PAGE_DEFAULT | SMMU_PTE_USER | \
+ SMMU_PTE_NG)
+
+#define smmu_pte_index(addr) (((addr) >> SMMU_PAGE_SHIFT) & (SMMU_PTRS_PER_PTE - 1))
+#define smmu_pmd_index(addr) (((addr) >> SMMU_PMDIR_SHIFT) & (SMMU_PTRS_PER_PMD - 1))
+#define smmu_pgd_index(addr) (((addr) >> SMMU_PGDIR_SHIFT) & (SMMU_PTRS_PER_PGD - 1))
+#define SMMU_PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+
+typedef u64 smmu_pgd_t;
+typedef u64 smmu_pmd_t;
+typedef u64 smmu_pte_t;
+
+/*smmu device object*/
+struct hisi_smmu_device_lpae {
+ struct device *dev ;
+ struct list_head domain_list;
+ unsigned int ref_count;
+ spinlock_t lock;
+ unsigned long va_pgtable_addr;
+ phys_addr_t smmu_phy_pgtable_addr;
+ smmu_pgd_t *smmu_pgd;
+};
+
+struct hisi_map_tile_position_lpae {
+ struct scatterlist *sg ;
+ unsigned long offset;
+};
+
+extern struct hisi_smmu_device_lpae *hisi_smmu_dev;
+
+static inline unsigned int smmu_pgd_none_lpae(smmu_pgd_t pgd) {
+ return !(pgd ? pgd : 0);
+}
+
+static inline unsigned int smmu_pmd_none_lpae(smmu_pmd_t pmd) {
+ return !(pmd ? pmd : 0);
+}
+
+static inline unsigned int smmu_pte_none_lpae(smmu_pte_t pte) {
+ return !(pte ? pte : 0);
+}
+
+static inline unsigned int pte_is_valid_lpae(smmu_pte_t *ptep) {
+ return (unsigned int)((*(ptep)&SMMU_PTE_TYPE) ? 1 : 0);
+}
+
+/* Find an entry in the second-level page table.. */
+static inline void *smmu_pmd_page_vaddr_lpae(smmu_pmd_t *pgd)
+{
+ return phys_to_virt(*pgd & PAGE_TABLE_ADDR_MASK);
+}
+
+/* Find an entry in the third-level page table.. */
+static inline void *smmu_pte_page_vaddr_lpae(smmu_pmd_t *pmd)
+{
+ return phys_to_virt(*pmd & PAGE_TABLE_ADDR_MASK);
+}
+
+
+/*fill the pgd entry, pgd value must be 64bit */
+static inline void smmu_set_pgd_lpae(smmu_pgd_t *pgdp, u64 pgd)
+{
+ *pgdp = pgd;
+ dsb(ishst);
+ isb();
+}
+
+/*fill the pmd entry, pgd value must be 64bit */
+static inline void smmu_set_pmd_lpae(smmu_pgd_t *pmdp, u64 pmd)
+{
+ dbg("smmu_set_pmd_lpae: pmd = 0x%lx \n", pmd);
+ *pmdp = pmd;
+ dsb(ishst);
+ isb();
+}
+
+static inline void smmu_pmd_populate_lpae(smmu_pmd_t *pmdp, pgtable_t ptep, pgdval_t prot)
+{
+ smmu_set_pmd_lpae(pmdp, (u64)(page_to_phys(ptep) | prot));
+}
+
+static inline void smmu_pgd_populate_lpae(smmu_pgd_t *pgdp, pgtable_t pmdp, pgdval_t prot)
+{
+ smmu_set_pgd_lpae(pgdp, (u64)(page_to_phys(pmdp) | prot));
+}
+
+static inline unsigned long smmu_pgd_addr_end_lpae(unsigned long addr, unsigned long end)
+{
+ unsigned long boundary = (addr + SMMU_PGDIR_SIZE) & SMMU_PGDIR_MASK;
+
+ return (boundary - 1 < end - 1) ? boundary : end;
+}
+
+static inline unsigned long smmu_pmd_addr_end_lpae(unsigned long addr, unsigned long end)
+{
+ unsigned long boundary = (addr + SMMU_PMDIR_SIZE) & SMMU_PMDIR_MASK;
+
+ return (boundary - 1 < end - 1) ? boundary : end;
+}
+
+int hisi_smmu_handle_mapping_lpae(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot);
+
+unsigned int hisi_smmu_handle_unmapping_lpae(struct iommu_domain *domain,
+ unsigned long iova, size_t size);
+
+#endif
diff --git a/drivers/iommu/hisi_smmu_lpae.c b/drivers/iommu/hisi_smmu_lpae.c
new file mode 100644
index 000000000000..0ccd5c9ffeb1
--- /dev/null
+++ b/drivers/iommu/hisi_smmu_lpae.c
@@ -0,0 +1,849 @@
+
+/*
+ * hisi_smmu_lpae.c -- 3 layer pagetable
+ *
+ * Copyright (c) 2014 Huawei Technologies CO., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/spinlock.h>
+#include <asm/pgalloc.h>
+#include <linux/debugfs.h>
+#include <linux/hisi/hisi-iommu.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include "hisi_smmu.h"
+
+struct hisi_smmu_device_lpae *hisi_smmu_dev;
+
+/*transfer 64bit pte table pointer to struct page*/
+static pgtable_t smmu_pgd_to_pte_lpae(unsigned int ppte_table)
+{
+ unsigned long page_table_addr;
+
+ if (!ppte_table) {
+ dbg("error: the pointer of pte_table is NULL\n");
+ return NULL;
+ }
+ page_table_addr = (unsigned long)ppte_table;
+ return phys_to_page(page_table_addr);
+}
+
+/*transfer 64bit pte table pointer to struct page*/
+static pgtable_t smmu_pmd_to_pte_lpae(unsigned long ppte_table)
+{
+ struct page *table = NULL;
+
+ if (!ppte_table) {
+ dbg("error: the pointer of pte_table is NULL\n");
+ return NULL;
+ }
+ table = phys_to_page(ppte_table);
+ return table;
+}
+
+static int get_domain_data_lpae(struct device_node *np,
+ struct iommu_domain_data *data)
+{
+ unsigned long long align;
+ struct device_node *node = NULL;
+ int ret = 0;
+
+ data->phy_pgd_base = hisi_smmu_dev->smmu_phy_pgtable_addr;
+ if (np) {
+ node = of_find_node_by_name(np, "iommu_info");
+ if (!node) {
+ dbg("find iommu_info node error\n");
+ return -ENODEV;
+ }
+ ret = of_property_read_u32(node, "start-addr",
+ &data->iova_start);
+ if (ret) {
+ dbg("read iova start address error\n");
+ goto read_error;
+ }
+ ret = of_property_read_u32(node, "size", &data->iova_size);
+ if (ret) {
+ dbg("read iova size error\n");
+ goto read_error;
+ }
+ ret = of_property_read_u64(node, "iova-align", &align);
+ if (!ret)
+ data->iova_align = (unsigned long)align;
+ else
+ data->iova_align = SZ_256K;
+
+ pr_err("%s:start_addr 0x%x, size 0x%x align 0x%lx\n",
+ __func__, data->iova_start,
+ data->iova_size, data->iova_align);
+ }
+
+ return 0;
+
+read_error:
+ return ret;
+}
+
+static struct iommu_domain
+*hisi_smmu_domain_alloc_lpae(unsigned iommu_domain_type)
+{
+ struct iommu_domain *domain;
+
+ if (iommu_domain_type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain) {
+ pr_err("%s: fail to kzalloc %lu bytes\n",
+ __func__, sizeof(*domain));
+ }
+
+ return domain;
+}
+
+
+static void hisi_smmu_flush_pgtable_lpae(void *addr, size_t size)
+{
+ __flush_dcache_area(addr, size);
+}
+
+static void hisi_smmu_free_ptes_lpae(smmu_pgd_t pmd)
+{
+ pgtable_t table = smmu_pgd_to_pte_lpae(pmd);
+
+ if (!table) {
+ dbg("pte table is null\n");
+ return;
+ }
+ __free_page(table);
+ smmu_set_pmd_lpae(&pmd, 0);
+}
+
+
+static void hisi_smmu_free_pmds_lpae(smmu_pgd_t pgd)
+{
+ pgtable_t table = smmu_pmd_to_pte_lpae(pgd);
+
+ if (!table) {
+ dbg("pte table is null\n");
+ return;
+ }
+ __free_page(table);
+ smmu_set_pgd_lpae(&pgd, 0);
+}
+
+static void hisi_smmu_free_pgtables_lpae(unsigned long *page_table_addr)
+{
+ int i, j;
+ smmu_pgd_t *pgd;
+ smmu_pmd_t *pmd;
+ unsigned long flags;
+
+ pgd = (smmu_pgd_t *)page_table_addr;
+ pmd = (smmu_pmd_t *)page_table_addr;
+
+ spin_lock_irqsave(&hisi_smmu_dev->lock, flags);
+ for (i = 0; i < SMMU_PTRS_PER_PGD; ++i) {
+ if ((smmu_pgd_none_lpae(*pgd)) & (smmu_pmd_none_lpae(*pmd)))
+ continue;
+ for (j = 0; j < SMMU_PTRS_PER_PMD; ++j) {
+ hisi_smmu_free_pmds_lpae(*pgd);
+ pmd++;
+ }
+ hisi_smmu_free_ptes_lpae(*pmd);
+ pgd++;
+ }
+ memset((void *)page_table_addr, 0, PAGE_SIZE);
+ spin_unlock_irqrestore(&hisi_smmu_dev->lock, flags);
+}
+
+static void hisi_smmu_domain_free_lpae(struct iommu_domain *domain)
+{
+ if (list_empty(&hisi_smmu_dev->domain_list))
+ hisi_smmu_free_pgtables_lpae((unsigned long *)
+ hisi_smmu_dev->va_pgtable_addr);
+
+ kfree(domain);
+
+}
+
+static int hisi_smmu_alloc_init_pte_lpae(smmu_pmd_t *ppmd,
+ unsigned long addr, unsigned long end,
+ unsigned long pfn, u64 prot, unsigned long *flags)
+{
+ smmu_pte_t *pte, *start;
+ pgtable_t table;
+ u64 pteval = SMMU_PTE_TYPE;
+
+ if (!smmu_pmd_none_lpae(*ppmd))
+ goto pte_ready;
+
+ /* Allocate a new set of tables */
+ spin_unlock_irqrestore(&hisi_smmu_dev->lock, *flags);
+ table = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_DMA);
+ spin_lock_irqsave(&hisi_smmu_dev->lock, *flags);
+ if (!table) {
+ dbg("%s: alloc page fail\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (smmu_pmd_none_lpae(*ppmd)) {
+ hisi_smmu_flush_pgtable_lpae(page_address(table),
+ SMMU_PAGE_SIZE);
+ smmu_pmd_populate_lpae(ppmd, table, SMMU_PMD_TYPE|SMMU_PMD_NS);
+ hisi_smmu_flush_pgtable_lpae(ppmd, sizeof(*ppmd));
+ } else
+ __free_page(table);
+
+pte_ready:
+ if (prot & IOMMU_SEC)
+ *ppmd &= (~SMMU_PMD_NS);
+
+ start = (smmu_pte_t *)smmu_pte_page_vaddr_lpae(ppmd)
+ + smmu_pte_index(addr);
+ pte = start;
+ if (!prot) {
+ pteval |= SMMU_PROT_NORMAL;
+ pteval |= SMMU_PTE_NS;
+ } else {
+ if (prot & IOMMU_DEVICE) {
+ pteval |= SMMU_PROT_DEVICE_nGnRE;
+ } else {
+ if (prot & IOMMU_CACHE)
+ pteval |= SMMU_PROT_NORMAL_CACHE;
+ else
+ pteval |= SMMU_PROT_NORMAL_NC;
+
+ if ((prot & IOMMU_READ) && (prot & IOMMU_WRITE))
+ pteval |= SMMU_PAGE_READWRITE;
+ else if ((prot & IOMMU_READ) && !(prot & IOMMU_WRITE))
+ pteval |= SMMU_PAGE_READONLY;
+ else
+ WARN_ON("you do not set read attribute!");
+
+ if (prot & IOMMU_EXEC) {
+ pteval |= SMMU_PAGE_READONLY_EXEC;
+ pteval &= ~(SMMU_PTE_PXN | SMMU_PTE_UXN);
+ }
+ }
+ if (prot & IOMMU_SEC)
+ pteval &= (~SMMU_PTE_NS);
+ else
+ pteval |= SMMU_PTE_NS;
+ }
+
+ do {
+ if (!pte_is_valid_lpae(pte))
+ *pte = (u64)(__pfn_to_phys(pfn)|pteval);
+ else
+ WARN_ONCE(1, "map to same VA more times!\n");
+ pte++;
+ pfn++;
+ addr += SMMU_PAGE_SIZE;
+ } while (addr < end);
+
+ hisi_smmu_flush_pgtable_lpae(start, sizeof(*pte) * (pte - start));
+ return 0;
+}
+
+static int hisi_smmu_alloc_init_pmd_lpae(smmu_pgd_t *ppgd,
+ unsigned long addr, unsigned long end,
+ unsigned long paddr, int prot, unsigned long *flags)
+{
+ int ret = 0;
+ smmu_pmd_t *ppmd, *start;
+ u64 next;
+ pgtable_t table;
+
+ if (!smmu_pgd_none_lpae(*ppgd))
+ goto pmd_ready;
+
+ /* Allocate a new set of tables */
+ spin_unlock_irqrestore(&hisi_smmu_dev->lock, *flags);
+ table = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_DMA);
+ spin_lock_irqsave(&hisi_smmu_dev->lock, *flags);
+ if (!table) {
+ dbg("%s: alloc page fail\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (smmu_pgd_none_lpae(*ppgd)) {
+ hisi_smmu_flush_pgtable_lpae(page_address(table),
+ SMMU_PAGE_SIZE);
+ smmu_pgd_populate_lpae(ppgd, table, SMMU_PGD_TYPE|SMMU_PGD_NS);
+ hisi_smmu_flush_pgtable_lpae(ppgd, sizeof(*ppgd));
+ } else
+ __free_page(table);
+
+pmd_ready:
+ if (prot & IOMMU_SEC)
+ *ppgd &= (~SMMU_PGD_NS);
+ start = (smmu_pmd_t *)smmu_pmd_page_vaddr_lpae(ppgd)
+ + smmu_pmd_index(addr);
+ ppmd = start;
+
+ do {
+ next = smmu_pmd_addr_end_lpae(addr, end);
+ ret = hisi_smmu_alloc_init_pte_lpae(ppmd,
+ addr, next, __phys_to_pfn(paddr), prot, flags);
+ if (ret)
+ goto error;
+ paddr += (next - addr);
+ addr = next;
+ } while (ppmd++, addr < end);
+error:
+ return ret;
+}
+
+int hisi_smmu_handle_mapping_lpae(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
+{
+ int ret;
+ unsigned long end;
+ unsigned long next;
+ unsigned long flags;
+ smmu_pgd_t *pgd = (smmu_pgd_t *)hisi_smmu_dev->va_pgtable_addr;
+
+ if (!pgd) {
+ dbg("pgd is null\n");
+ return -EINVAL;
+ }
+ iova = ALIGN(iova, SMMU_PAGE_SIZE);
+ size = ALIGN(size, SMMU_PAGE_SIZE);
+ spin_lock_irqsave(&hisi_smmu_dev->lock, flags);
+ pgd += smmu_pgd_index(iova);
+ end = iova + size;
+ do {
+ next = smmu_pgd_addr_end_lpae(iova, end);
+ ret = hisi_smmu_alloc_init_pmd_lpae(pgd,
+ iova, next, paddr, prot, &flags);
+ if (ret)
+ goto out_unlock;
+ paddr += next - iova;
+ iova = next;
+ } while (pgd++, iova < end);
+out_unlock:
+ spin_unlock_irqrestore(&hisi_smmu_dev->lock, flags);
+ return ret;
+}
+
+static int hisi_smmu_map_lpae(struct iommu_domain *domain,
+ unsigned long iova,
+ phys_addr_t paddr, size_t size,
+ int prot)
+{
+ unsigned long max_iova;
+ struct iommu_domain_data *data;
+
+ if (!domain) {
+ dbg("domain is null\n");
+ return -ENODEV;
+ }
+ data = domain->priv;
+ max_iova = data->iova_start + data->iova_size;
+ if (iova < data->iova_start) {
+ dbg("iova failed: iova = 0x%lx, start = 0x%8x\n",
+ iova, data->iova_start);
+ goto error;
+ }
+ if ((iova+size) > max_iova) {
+ dbg("iova out of domain range, iova+size=0x%lx, end=0x%lx\n",
+ iova+size, max_iova);
+ goto error;
+ }
+ return hisi_smmu_handle_mapping_lpae(domain, iova, paddr, size, prot);
+error:
+ dbg("iova is not in this range\n");
+ return -EINVAL;
+}
+
+static unsigned int hisi_smmu_clear_pte_lpae(smmu_pgd_t *pmdp,
+ unsigned int iova, unsigned int end)
+{
+ smmu_pte_t *ptep = NULL;
+ smmu_pte_t *ppte = NULL;
+ unsigned int size = end - iova;
+
+ ptep = smmu_pte_page_vaddr_lpae(pmdp);
+ ppte = ptep + smmu_pte_index(iova);
+
+ if (!!size)
+ memset(ppte, 0x0, (size / SMMU_PAGE_SIZE) * sizeof(*ppte));
+
+ return size;
+}
+
+static unsigned int hisi_smmu_clear_pmd_lpae(smmu_pgd_t *pgdp,
+ unsigned int iova, unsigned int end)
+{
+ smmu_pmd_t *pmdp = NULL;
+ smmu_pmd_t *ppmd = NULL;
+ unsigned int next = 0;
+ unsigned int size = end - iova;
+
+ pmdp = smmu_pmd_page_vaddr_lpae(pgdp);
+ ppmd = pmdp + smmu_pmd_index(iova);
+ do {
+ next = smmu_pmd_addr_end_lpae(iova, end);
+ hisi_smmu_clear_pte_lpae(ppmd, iova, next);
+ iova = next;
+ dbg("%s: iova=0x%lx, end=0x%lx\n", __func__, iova, end);
+ } while (ppmd++, iova < end);
+
+ return size;
+}
+
+unsigned int hisi_smmu_handle_unmapping_lpae(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ smmu_pgd_t *pgdp = NULL;
+ unsigned int end = 0;
+ unsigned int next = 0;
+ unsigned int unmap_size = 0;
+ unsigned long flags;
+
+ iova = SMMU_PAGE_ALIGN(iova);
+ size = SMMU_PAGE_ALIGN(size);
+ pgdp = (smmu_pgd_t *)hisi_smmu_dev->va_pgtable_addr;
+ end = iova + size;
+ dbg("%s:end=0x%x\n", __func__, end);
+ pgdp += smmu_pgd_index(iova);
+ spin_lock_irqsave(&hisi_smmu_dev->lock, flags);
+ do {
+ next = smmu_pgd_addr_end_lpae(iova, end);
+ unmap_size += hisi_smmu_clear_pmd_lpae(pgdp, iova, next);
+ iova = next;
+ dbg("%s: pgdp=%p, iova=0x%lx\n", __func__, pgdp, iova);
+ } while (pgdp++, iova < end);
+
+ spin_unlock_irqrestore(&hisi_smmu_dev->lock, flags);
+ return unmap_size;
+}
+
+static size_t hisi_smmu_unmap_lpae(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ unsigned long max_iova;
+ unsigned int ret;
+ struct iommu_domain_data *data;
+
+ if (!domain) {
+ dbg("domain is null\n");
+ return -ENODEV;
+ }
+ data = domain->priv;
+ /*caculate the max io virtual address */
+ max_iova = data->iova_start + data->iova_size;
+ /*check the iova */
+ if (iova < data->iova_start)
+ goto error;
+ if ((iova+size) > max_iova) {
+ dbg("iova out of domain range, iova+size=0x%lx, end=0x%lx\n",
+ iova+size, max_iova);
+ goto error;
+ }
+ /*unmapping the range of iova*/
+ ret = hisi_smmu_handle_unmapping_lpae(domain, iova, size);
+ if (ret == size) {
+ dbg("%s:unmap size:0x%x\n", __func__, (unsigned int)size);
+ return size;
+ } else {
+ return 0;
+ }
+error:
+ dbg("%s:the range of io address is wrong\n", __func__);
+ return -EINVAL;
+}
+
+static phys_addr_t hisi_smmu_iova_to_phys_lpae(
+ struct iommu_domain *domain, dma_addr_t iova)
+{
+ smmu_pgd_t *pgdp, pgd;
+ smmu_pmd_t pmd;
+ smmu_pte_t pte;
+
+ pgdp = (smmu_pgd_t *)hisi_smmu_dev->va_pgtable_addr;
+ if (!pgdp)
+ return 0;
+
+ pgd = *(pgdp + smmu_pgd_index(iova));
+ if (smmu_pgd_none_lpae(pgd))
+ return 0;
+
+ pmd = *((smmu_pmd_t *)smmu_pmd_page_vaddr_lpae(&pgd) +
+ smmu_pmd_index(iova));
+ if (smmu_pmd_none_lpae(pmd))
+ return 0;
+
+ pte = *((u64 *)smmu_pte_page_vaddr_lpae(&pmd) + smmu_pte_index(iova));
+ if (smmu_pte_none_lpae(pte))
+ return 0;
+
+ return __pfn_to_phys(pte_pfn(__pte(pte))) | (iova & ~SMMU_PAGE_MASK);
+}
+
+static int hisi_attach_dev_lpae(struct iommu_domain *domain, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int ret = 0;
+ struct iommu_domain_data *iommu_info = NULL;
+
+ iommu_info = kzalloc(sizeof(struct iommu_domain_data), GFP_KERNEL);
+ if (!iommu_info) {
+ dbg("alloc iommu_domain_data fail\n");
+ return -EINVAL;
+ }
+ list_add(&iommu_info->list, &hisi_smmu_dev->domain_list);
+ domain->priv = iommu_info;
+ ret = get_domain_data_lpae(np, domain->priv);
+ return ret;
+}
+
+static void hisi_detach_dev_lpae(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct iommu_domain_data *data;
+
+ data = (struct iommu_domain_data *)domain->priv;
+ if (data) {
+ list_del(&data->list);
+ domain->priv = NULL;
+ kfree(data);
+ } else {
+ dbg("%s:error! data entry has been delected\n", __func__);
+ }
+}
+
+static dma_addr_t get_phys_addr_lpae(struct scatterlist *sg)
+{
+ dma_addr_t dma_addr = sg_dma_address(sg);
+
+ if (!dma_addr)
+ dma_addr = sg_phys(sg);
+ return dma_addr;
+}
+
+int iommu_map_tile(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, size_t size, int prot,
+ struct tile_format *format)
+{
+ if (unlikely(!(domain->ops->map_tile)))
+ return -ENODEV;
+
+ BUG_ON(iova & (~PAGE_MASK));
+
+ return domain->ops->map_tile(domain, iova, sg, size, prot, format);
+}
+
+int iommu_unmap_tile(struct iommu_domain *domain, unsigned long iova,
+ size_t size)
+{
+ if (unlikely(!(domain->ops->unmap_tile)))
+ return -ENODEV;
+
+ BUG_ON(iova & (~PAGE_MASK));
+
+ return domain->ops->unmap_tile(domain, iova, size);
+}
+
+/*
+ *iova: the start address for tile mapping
+ *size: the physical memory size
+ *sg: the node of scatter list where are the start node of physical memory
+ *sg_offset:the physical memory offset in the sg node ,where is the start
+ position of physical memory
+ *port: the pape property of virtual memory
+ * this function complete one row mapping.
+ */
+static size_t hisi_map_tile_row_lpae(struct iommu_domain *domain, unsigned long
+ iova, size_t size, struct scatterlist *sg, size_t sg_offset,
+ struct hisi_map_tile_position_lpae *map_position,
+ unsigned int prot){
+
+ unsigned long map_size; /*the memory size that will be mapped*/
+ unsigned long phys_addr;
+ unsigned long mapped_size = 0; /*memory size that has been mapped*/
+ int ret;
+
+ while (1) {
+ /*
+ *get the remain memory,if current sg node is not enough memory,
+ *we map the remain memory firstly.
+ */
+ map_size = size - mapped_size;
+ if (map_size > (sg->length - sg_offset))
+ map_size = (sg->length - sg_offset);
+
+ /*get the start physical address*/
+ phys_addr = (unsigned long)get_phys_addr_lpae(sg) + sg_offset;
+ ret = hisi_smmu_map_lpae(domain,
+ iova + mapped_size, phys_addr, map_size, prot);
+ if (ret) {
+ dbg("[%s] hisi_smmu_map failed!\n", __func__);
+ break;
+ }
+ /*update mapped memory size*/
+ mapped_size += map_size;
+ /*
+ * if finished mapping,
+ * we update the memory offset of current node and
+ * save the memory position. otherwise we clean the sg_offset
+ * to zero and get next sg node.
+ */
+ if (mapped_size < size) {
+ sg_offset = 0;
+ sg = sg_next(sg);
+ if (!sg) {
+ dbg("[%s] phy memory not enough\n", __func__);
+ break;
+ }
+ } else {
+ sg_offset += map_size;
+ /*if physcial memory of this node is exhausted,
+ * we choose next node
+ */
+ if (sg_offset == sg->length) {
+ sg_offset = 0;
+ sg = sg_next(sg);
+ }
+ break;
+ }
+ }
+ /*save current position*/
+ map_position->sg = sg;
+ map_position->offset = sg_offset;
+
+ return mapped_size;
+}
+
+/*
+ *domain:the iommu domain for mapping
+ *iova:the start virtual address
+ *sg: the scatter list of physical memory
+ *size:the total size of all virtual memory
+ *port:the property of page table of virtual memory
+ *format:the parameter of tile mapping
+ *this function map physical memory in tile mode
+ */
+static int hisi_smmu_map_tile_lpae(struct iommu_domain *domain,
+ unsigned long iova,
+ struct scatterlist *sg, size_t size, int prot,
+ struct tile_format *format){
+
+ unsigned int phys_length;
+ struct scatterlist *sg_node;
+ unsigned int row_number, row;
+ unsigned int size_virt, size_phys;
+ unsigned int sg_offset;
+ int ret = size;
+ unsigned int mapped_size, header_size;
+ struct hisi_map_tile_position_lpae map_position;
+
+ /* calculate the whole length of phys mem */
+ for (phys_length = 0, sg_node = sg; sg_node; sg_node = sg_next(sg_node))
+ phys_length += ALIGN(sg_node->length, PAGE_SIZE);
+
+ header_size = format->header_size;
+
+ /* calculate the number of raws*/
+ row_number = ((phys_length - header_size) >> PAGE_SHIFT)
+ / format->phys_page_line;
+ dbg("phys_length: 0x%x, rows: 0x%x, header_size: 0x%x\n",
+ phys_length, row_number, header_size);
+
+ /*caculate the need physical memory and virtual memory for one row*/
+ size_phys = (format->phys_page_line * PAGE_SIZE);
+ size_virt = (format->virt_page_line * PAGE_SIZE);
+
+ sg_offset = 0;
+ sg_node = sg;
+
+ /*set start position*/
+ map_position.sg = sg;
+ map_position.offset = 0;
+
+ /*map header*/
+ if (header_size) {
+ mapped_size = hisi_map_tile_row_lpae(domain, iova,
+ header_size, sg_node,
+ sg_offset, &map_position,
+ prot);
+ if (mapped_size != header_size) {
+ WARN(1, "map head fail\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ iova += ALIGN(header_size, size_virt);
+ }
+ /* map row by row */
+ for (row = 0; row < row_number; row++) {
+ /* get physical memory position */
+ if (map_position.sg) {
+ sg_node = map_position.sg;
+ sg_offset = map_position.offset;
+ } else {
+ dbg("[%s]:physical memory is not enough\n", __func__);
+ break;
+ }
+ /* map one row*/
+ mapped_size = hisi_map_tile_row_lpae(domain,
+ iova + (size_virt * row),
+ size_phys, sg_node, sg_offset,
+ &map_position, prot);
+ if (mapped_size != size_phys) {
+ WARN(1, "hisi_map_tile_row failed!\n");
+ ret = -EINVAL;
+ break;
+ }
+ };
+error:
+ return ret;
+}
+
+static size_t hisi_smmu_unmap_tile_lpae(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ return hisi_smmu_unmap_lpae(domain, iova, size);
+}
+
+size_t hisi_iommu_map_sg_lpae(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ struct scatterlist *s;
+ size_t mapped = 0;
+ unsigned int i, min_pagesz;
+ int ret;
+
+ if (domain->ops->pgsize_bitmap == 0UL)
+ return 0;
+
+ min_pagesz = (unsigned int)1 << __ffs(domain->ops->pgsize_bitmap);
+
+ for_each_sg(sg, s, nents, i) {
+ phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+
+ /*
+ * We are mapping on IOMMU page boundaries, so offset within
+ * the page must be 0. However, the IOMMU may support pages
+ * smaller than PAGE_SIZE, so s->offset may still represent
+ * an offset of that boundary within the CPU page.
+ */
+ if (!IS_ALIGNED(s->offset, min_pagesz))
+ goto out_err;
+
+ ret = hisi_smmu_map_lpae(domain, iova + mapped, phys,
+ (size_t)s->length, prot);
+ if (ret)
+ goto out_err;
+
+ mapped += s->length;
+ }
+
+ return mapped;
+
+out_err:
+ /* undo mappings already done */
+ hisi_smmu_unmap_lpae(domain, iova, mapped);
+
+ return 0;
+}
+
+static struct iommu_ops hisi_smmu_ops = {
+ .domain_alloc = hisi_smmu_domain_alloc_lpae,
+ .domain_free = hisi_smmu_domain_free_lpae,
+ .map = hisi_smmu_map_lpae,
+ .unmap = hisi_smmu_unmap_lpae,
+ .map_sg = hisi_iommu_map_sg_lpae,
+ .attach_dev = hisi_attach_dev_lpae,
+ .detach_dev = hisi_detach_dev_lpae,
+ .iova_to_phys = hisi_smmu_iova_to_phys_lpae,
+ .pgsize_bitmap = SMMU_PAGE_SIZE,
+ .map_tile = hisi_smmu_map_tile_lpae,
+ .unmap_tile = hisi_smmu_unmap_tile_lpae,
+};
+
+static int hisi_smmu_probe_lpae(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ dbg("enter %s\n", __func__);
+ hisi_smmu_dev = devm_kzalloc(dev,
+ sizeof(struct hisi_smmu_device_lpae), GFP_KERNEL);
+
+ hisi_smmu_dev->smmu_pgd = devm_kzalloc(dev, SZ_64, GFP_KERNEL | __GFP_DMA);
+ if (!hisi_smmu_dev)
+ goto smmu_device_error;
+ hisi_smmu_dev->dev = dev;
+ INIT_LIST_HEAD(&hisi_smmu_dev->domain_list);
+ spin_lock_init(&hisi_smmu_dev->lock);
+
+ hisi_smmu_dev->smmu_pgd = (smmu_pgd_t *)(ALIGN((unsigned long)(hisi_smmu_dev->smmu_pgd), SZ_32));
+
+ hisi_smmu_dev->smmu_phy_pgtable_addr =
+ virt_to_phys(hisi_smmu_dev->smmu_pgd);
+ printk(KERN_ERR "%s, smmu_phy_pgtable_addr is = %llx\n", __func__, hisi_smmu_dev->smmu_phy_pgtable_addr);
+
+ hisi_smmu_dev->va_pgtable_addr = (unsigned long)(hisi_smmu_dev->smmu_pgd);
+ bus_set_iommu(&platform_bus_type, &hisi_smmu_ops);
+ return 0;
+
+smmu_device_error:
+ return -ENOMEM;
+}
+
+static int hisi_smmu_remove_lpae(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id hisi_smmu_of_match_lpae[] = {
+ { .compatible = "hisi,hisi-smmu-lpae"},
+ { },
+};
+MODULE_DEVICE_TABLE(of, hisi_smmu_of_match_lpae);
+
+static struct platform_driver hisi_smmu_driver_lpae = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hisi-smmu-lpae",
+ .of_match_table = of_match_ptr(hisi_smmu_of_match_lpae),
+ },
+ .probe = hisi_smmu_probe_lpae,
+ .remove = hisi_smmu_remove_lpae,
+};
+
+static int __init hisi_smmu_init_lpae(void)
+{
+ int ret = 0;
+
+ ret = platform_driver_register(&hisi_smmu_driver_lpae);
+ return ret;
+}
+
+static void __exit hisi_smmu_exit_lpae(void)
+{
+ return platform_driver_unregister(&hisi_smmu_driver_lpae);
+}
+
+subsys_initcall(hisi_smmu_init_lpae);
+module_exit(hisi_smmu_exit_lpae);
+
+MODULE_DESCRIPTION("IOMMU API for HI3660 architected SMMU implementations");
+MODULE_AUTHOR("huawei hisilicon company");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/hisilicon/Kconfig b/drivers/iommu/hisilicon/Kconfig
new file mode 100644
index 000000000000..e760c7e31d6e
--- /dev/null
+++ b/drivers/iommu/hisilicon/Kconfig
@@ -0,0 +1,40 @@
+# Hisilicon IOMMU support
+
+config HISI_IODOMAIN_API
+ bool
+
+config HISI_IOMMU
+ bool "Hisilicon IOMMU Support"
+ select IOMMU_API
+ select HISI_IODOMAIN_API
+ help
+ Hisilicon IOMMU Support.
+
+config HISI_IOMMU_LPAE
+ bool "Hisilicon IOMMU LPAE Support"
+ select IOMMU_API
+ depends on HISI_IOMMU
+ help
+ Hisilicon IOMMU Support.
+
+config HISI_IOMMU_LEGACY
+ bool "Hisilicon IOMMU SECOND LEVEL PAGE TABLE Support"
+ select IOMMU_API
+ depends on HISI_IOMMU && !HISI_IOMMU_LPAE
+ help
+ Hisilicon IOMMU Support.
+
+config HISI_IOMMU_COMPACT
+ bool "Hisilicon IOMMU compact linux4.1"
+ help
+ On linux4.1, IOMMU framework has some change point,
+ you need compact it.If you build on linux4.1 and need
+ hisilicon iommu,you should select this 'y'.
+ Then, you should select this 'n'.
+
+config HISI_IOMMU_TEST
+ bool "Hisilicon IOMMU TEST Support"
+ depends on HISI_IOMMU_LPAE
+ select IOMMU_API
+ help
+ Hisilicon IOMMU Support.
diff --git a/drivers/iommu/hisilicon/Makefile b/drivers/iommu/hisilicon/Makefile
new file mode 100644
index 000000000000..3971520fdc91
--- /dev/null
+++ b/drivers/iommu/hisilicon/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_HISI_IOMMU_LEGACY) += hisi_smmu.o
+obj-$(CONFIG_HISI_IODOMAIN_API) += ion-iommu-map.o
+obj-$(CONFIG_HISI_IOMMU_LPAE) += hisi_smmu_lpae.o
+obj-$(CONFIG_HISI_IOMMU_TEST) += hisi_smmu_test.o
+obj-$(CONFIG_HISI_IOMMU_TEST) += hisi_smmu_unittest.o
diff --git a/drivers/iommu/hisilicon/hisi_smmu_test.c b/drivers/iommu/hisilicon/hisi_smmu_test.c
new file mode 100644
index 000000000000..8ed884c64c52
--- /dev/null
+++ b/drivers/iommu/hisilicon/hisi_smmu_test.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) 2013-2013 ...
+ * ...
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/hisi/hisi_ion.h>
+#include <linux/hisi/ion-iommu.h>
+#include <linux/hisi/hisi-iommu.h>
+#include <linux/of.h>
+
+/*#define IOMMU_DEBUG*/
+#ifdef IOMMU_DEBUG
+#define D(format, arg...) \
+ do {\
+ printk(KERN_ERR "[iommutest] " format, ##arg);\
+ } while (0)
+#else
+#define D(format, arg...)
+#endif
+#undef IOMMU_DEBUG
+
+
+static struct sg_table *table;
+
+enum {
+ IOMMU_TEST,
+ ION_TEST,
+ TILE_TEST
+};
+
+struct smmu_tester{
+ struct ion_handle *ion_handle;
+ struct ion_client *ion_client;
+ struct iommu_domain *domain;
+};
+
+struct iommu_page_info {
+ struct page *page;
+ unsigned int order;
+ struct list_head list;
+};
+
+static struct iommu_domain_data *info;
+static struct sg_table *table;
+struct smmu_tester *smmu_tester;
+
+/*
+ *iova:the start io virtual address.
+ *size:the size of io virtual memory.
+ *this function unmap a section io virtual memory
+ */
+static int iommu_unmap_test(unsigned int iova, unsigned int size)
+{
+ int ret;
+ if (!smmu_tester->domain) {
+ D("domain is null");
+ return -EINVAL;
+ }
+ /* iommu unmap */
+ ret = iommu_unmap(smmu_tester->domain, iova, size);
+ if (!ret) {
+ D("hisi iommu unmap domain failed!\n");
+ }
+ return ret;
+}
+
+/*this function do iommu map*/
+ssize_t iommu_map_test(unsigned int start , unsigned int size)
+{
+ unsigned int iova_start = 0;
+ unsigned int map_size = 0;
+ unsigned int i;
+ struct scatterlist *sg = NULL ;
+ ktime_t time_start, time_end;
+ unsigned long diff;
+ int ret = 0;
+
+ /*get parameter from buffer*/
+ iova_start = start;
+ map_size = size;
+
+ D("%s: iova_start: 0x%x, size: 0x%x ", __func__, iova_start, map_size);
+ time_start = ktime_get();
+
+ if (smmu_tester->domain && table) {
+ for_each_sg(table->sgl, sg, table->nents, i){
+ ret = iommu_map(smmu_tester->domain, iova_start, page_to_phys(sg_page(sg)), sg->length, IOMMU_READ|IOMMU_WRITE|IOMMU_CACHE);
+ if (ret) {
+ D("failed to map devmem: %d\n",ret);
+ goto out;
+ }
+ iova_start += sg->length;
+ }
+ }
+
+ time_end = ktime_get();
+ diff = ktime_to_us(ktime_sub(time_end,time_start));
+ D("%s: mapping time is 0x%lx \n", __func__, diff);
+ return size;
+
+out:
+ return -ENOSPC;
+}
+
+/*get physical address according to io vritual address*/
+ssize_t iova_to_phy(void)
+{
+ unsigned int iova_addr = 0;
+ unsigned int phys_addr;
+
+ phys_addr = iommu_iova_to_phys(smmu_tester->domain, iova_addr);
+ D("iova_addr=0x%x and phys_addr=0x%x\n", iova_addr, phys_addr);
+ return 0;
+}
+
+/*free physical memory*/
+static int free_memory(void)
+{
+ int i;
+ struct scatterlist *sg = NULL;
+ unsigned int mem_size = 0;
+ if (table) {
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ __free_pages(sg_page(sg), get_order(sg->length));
+ mem_size += sg->length;
+ }
+ D("%s:free total memory 0x%x \n", __func__, mem_size);
+ sg_free_table(table);
+ kfree(table);
+ }
+ table = NULL;
+ return 0;
+}
+
+/*
+ *this function allocate physical memory,
+ *and make them to scatter lista.
+ *table is global .
+ */
+static struct iommu_page_info *create_node(void)
+{
+ struct iommu_page_info *info = NULL;
+ struct page *page = NULL ;
+ info = kmalloc(sizeof(struct iommu_page_info), GFP_KERNEL);
+ if (!info) {
+ D("%s: kmalloc info failed!\n", __func__);
+ return NULL;
+ }
+ page = alloc_pages(GFP_KERNEL, 1);
+ /*alloc 8kb each
+ * time*/
+ if (!page) {
+ D("alloc page error \n");
+ kfree(info);
+ return NULL;
+ }
+ info->page = page;
+ info->order = 0;
+ INIT_LIST_HEAD(&info->list);
+ return info;
+}
+
+static int alloc_memory (unsigned int size)
+{
+ int map_size = 0;
+ unsigned int sum = 0;
+ struct list_head pages;
+ struct iommu_page_info *info, *tmp_info;
+ unsigned int i = 0, ret = 0;
+ struct scatterlist *sg = NULL;
+
+ INIT_LIST_HEAD(&pages);
+ map_size = size;
+
+ if (map_size < 0)
+ return -EINVAL;
+ D("%s: map_size=0x%x \n", __func__, map_size);
+ do {
+ info = create_node();
+ if (!info)
+ goto error;
+ list_add_tail(&info->list, &pages);
+ sum += (1 << info->order) *PAGE_SIZE;
+ i++;
+ } while (sum < map_size);
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table) {
+ goto error;
+ }
+
+ ret = sg_alloc_table(table,i, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ goto error;
+ }
+ sg = table->sgl;
+ list_for_each_entry_safe(info, tmp_info, &pages, list)
+ {
+ struct page *page = info->page;
+ sg_set_page(sg, page, (1 << info->order)*PAGE_SIZE, 0);
+ sg = sg_next(sg);
+ list_del(&info->list);
+ kfree(info);
+ }
+ D("sglist is ok \n");
+ return map_size;
+error:
+ list_for_each_entry_safe(info, tmp_info, &pages, list)
+ {
+ list_del(&info->list);
+ kfree(info);
+ }
+ return 0;
+}
+/*
+ *test mapping address from ion device
+ */
+int test_smmu_ion_tile_map(unsigned int global_map_start, unsigned int global_map_size)
+{
+ int ret;
+ struct iommu_map_format format = {0};
+ format.iova_size = global_map_size;
+ format.phys_page_line = 60;
+ format.virt_page_line = 64;
+ format.is_tile = 0x01;
+ format.prot = 0xff;
+
+ smmu_tester->ion_handle = ion_alloc(smmu_tester->ion_client, global_map_size, SZ_4K, ION_HEAP(ION_SYSTEM_HEAP_ID),0x0);
+ if (IS_ERR(smmu_tester->ion_handle)) {
+ D("alloc is fail\n");
+ return 0;
+ }
+
+ ret = ion_map_iommu(smmu_tester->ion_client, smmu_tester->ion_handle, &format);
+ if (ret)
+ {
+ D("ion map iommu is failed\n");
+ return 0;
+ }
+ D("%s end\n", __func__);
+ return format.iova_start;
+}
+
+/*
+ * test unmapping address from ion device
+ */
+static int test_smmu_ion_tile_unmap(void)
+{
+ printk(KERN_ERR"%s start\n", __func__);
+ ion_unmap_iommu(smmu_tester->ion_client, smmu_tester->ion_handle);
+ return 0;
+}
+
+int test_smmu_ion_map(unsigned int global_map_start, unsigned int global_map_size)
+{
+ int ret;
+ unsigned int size = global_map_size;
+ struct iommu_map_format format = {0};
+ unsigned int align = SZ_4K;
+ format.iova_size = size;
+ format.prot = 0xff;
+ smmu_tester->ion_handle = ion_alloc(smmu_tester->ion_client, size,align, ION_HEAP(ION_SYSTEM_HEAP_ID), 0x0);
+ if (IS_ERR(smmu_tester->ion_handle)) {
+ pr_err("alloc is fail\n");
+ return 0;
+ }
+ ret = ion_map_iommu(smmu_tester->ion_client, smmu_tester->ion_handle, &format);
+ if (ret) {
+ pr_err("ion_map_iommu is failed\n");
+ return 0;
+ }
+ return format.iova_start;
+}
+
+int test_smmu_ion_unmap(void)
+{
+ D("%s start\n", __func__);
+ ion_unmap_iommu(smmu_tester->ion_client, smmu_tester->ion_handle);
+ return 1;
+}
+
+extern void smmu_print_pgtable(void);
+extern void set_smmu_param(unsigned int start, unsigned int size);
+ssize_t smmu_test_main(int type, unsigned int global_map_size, unsigned int global_map_start)
+{
+ unsigned int ret = 0;
+
+ ret = alloc_memory(global_map_size);
+ if (!ret) {
+ D("ret = %d\n", ret);
+ return -ENOMEM;
+ }
+ switch (type) {
+ case IOMMU_TEST:
+ iommu_map_test(global_map_start, global_map_size);
+ set_smmu_param(global_map_start, global_map_size);
+ smmu_print_pgtable();
+ iommu_unmap_test(global_map_start, global_map_size);
+ smmu_print_pgtable();
+ break;
+ case ION_TEST:
+ ret = test_smmu_ion_map(global_map_start, global_map_size);
+ set_smmu_param(ret, global_map_size);
+ smmu_print_pgtable();
+ test_smmu_ion_unmap();
+ smmu_print_pgtable();
+ break;
+ case TILE_TEST:
+ ret = test_smmu_ion_tile_map(global_map_start, global_map_size);
+ set_smmu_param(ret, global_map_size);
+ smmu_print_pgtable();
+ test_smmu_ion_tile_unmap();
+ smmu_print_pgtable();
+ break;
+ default:
+ return -EINVAL;
+ break;
+ };
+ free_memory();
+ return 0;
+}
+
+static int hisi_iommutest_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ smmu_tester = devm_kzalloc(&pdev->dev, sizeof(struct smmu_tester), GFP_KERNEL);
+ if (!smmu_tester) {
+ D("in %s devm_kzalloc is failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ smmu_tester->ion_client = hisi_ion_client_create("smmu_test");
+ if (IS_ERR(smmu_tester->ion_client)) {
+ D("hisi ion client create failed \n");
+ return -ENODEV;
+ }
+ smmu_tester->domain = iommu_domain_alloc(dev->bus);
+ if (!smmu_tester->domain) {
+ D("create domain fail \n");
+ return -ENOMEM;
+ } else {
+ iommu_attach_device(smmu_tester->domain, dev);
+ info = (struct iommu_domain_data *)smmu_tester->domain->priv;
+ D("%s,iova_start=0x%lx,iova_size=0x%lx \n", __func__,
+ info->iova_start,
+ info->iova_size);
+ }
+ return 0;
+}
+
+
+static struct of_device_id hisi_smmu_of_table[] = {
+ { .compatible = "hisi,hisi-smmu-tester"},
+ { },
+};
+MODULE_DEVICE_TABLE(of, hisi_smmu_of_table);
+
+static struct platform_driver hisi_iommutest_drv = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hisi-smmu-tester",
+ .of_match_table = of_match_ptr(hisi_smmu_of_table),
+ },
+ .probe = hisi_iommutest_probe,
+};
+
+/*the inital function for iommu test module*/
+static int __init init_iommu_test(void)
+{
+ int err = -EBUSY;
+ err = platform_driver_register(&hisi_iommutest_drv);
+ if (err)
+ D("register device error \n");
+
+ return 0;
+
+}
+module_init(init_iommu_test);
+
diff --git a/drivers/iommu/ion-iommu-map.c b/drivers/iommu/ion-iommu-map.c
new file mode 100644
index 000000000000..52aa9a7bf699
--- /dev/null
+++ b/drivers/iommu/ion-iommu-map.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright (C) 20013-2013 hisilicon. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/debugfs.h>
+#include <linux/genalloc.h>
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/hisi/hisi-iommu.h>
+#include <linux/hisi/ion-iommu.h>
+
+#ifdef IOMMU_DEBUG
+#define dbg(format, arg...) \
+ pr_info("[hisi_iommu_domain]"format, ##arg)
+#else
+#define dbg(format, arg...)
+#endif
+
+struct map_result {
+ unsigned long iova_start;
+ unsigned long iova_size;
+ unsigned long iommu_ptb_base;
+ unsigned long iommu_iova_base;
+ unsigned long is_tile;
+};
+
+struct iommu_domain_data *domain_info;
+static struct hisi_iommu_domain *m_hisi_domain;
+DEFINE_MUTEX(iova_pool_mutex);
+
+static unsigned long hisi_alloc_iova(struct gen_pool *pool,
+ unsigned long size, unsigned long align)
+{
+ unsigned long iova = 0;
+
+ mutex_lock(&iova_pool_mutex);
+
+ iova = gen_pool_alloc(pool, size);
+ if (!iova) {
+ mutex_unlock(&iova_pool_mutex);
+ pr_err("hisi iommu gen_pool_alloc failed! size = %lu\n", size);
+ return 0;
+ }
+
+ if (align > (1 << pool->min_alloc_order))
+ WARN(1, "hisi iommu domain cant align to 0x%lx\n", align);
+
+ mutex_unlock(&iova_pool_mutex);
+ return iova;
+}
+
+static void hisi_free_iova(struct gen_pool *pool,
+ unsigned long iova, size_t size)
+{
+ mutex_lock(&iova_pool_mutex);
+ gen_pool_free(pool, iova, size);
+
+ mutex_unlock(&iova_pool_mutex);
+}
+
+unsigned long hisi_iommu_alloc_iova(size_t size, unsigned long align)
+{
+ struct hisi_iommu_domain *hisi_domain = m_hisi_domain;
+
+ return hisi_alloc_iova(hisi_domain->iova_pool, size, align);
+}
+EXPORT_SYMBOL_GPL(hisi_iommu_alloc_iova);
+
+void hisi_iommu_free_iova(unsigned long iova, size_t size)
+{
+ int ret;
+ struct hisi_iommu_domain *hisi_domain = m_hisi_domain;
+
+ ret = addr_in_gen_pool(hisi_domain->iova_pool, iova, size);
+ if(!ret) {
+ pr_err("%s:illegal para!!iova = %lx, size = %lx\n",
+ __func__, iova, size);
+ }
+ hisi_free_iova(hisi_domain->iova_pool, iova, size);
+}
+EXPORT_SYMBOL_GPL(hisi_iommu_free_iova);
+
+static struct gen_pool *iova_pool_setup(unsigned long start,
+ unsigned long size, unsigned long align)
+{
+ struct gen_pool *pool = NULL;
+ int ret = 0;
+
+ pool = gen_pool_create(order_base_2(align), -1);/*lint !e666 */
+ if (!pool) {
+ pr_err("Create gen pool failed!\n");
+ return NULL;
+ }
+ /* iova start should not be 0, because return
+ 0 when alloc iova is considered as error */
+ if (!start)
+ WARN(1, "iova start should not be 0!\n");
+
+ ret = gen_pool_add(pool, start, size, -1);
+ if (ret) {
+ pr_err("Gen pool add failed!\n");
+ gen_pool_destroy(pool);
+ return NULL;
+ }
+
+ return pool;
+}
+
+
+static void iova_pool_destroy(struct gen_pool *pool)
+{
+ gen_pool_destroy(pool);
+}
+
+static int do_iommu_domain_map(struct hisi_iommu_domain *hisi_domain,
+ struct scatterlist *sgl, struct iommu_map_format *format,
+ struct map_result *result)
+{
+ int ret;
+ unsigned long phys_len, iova_size;
+ unsigned long iova_start;
+
+ struct gen_pool *pool;
+ struct iommu_domain *domain;
+ struct scatterlist *sg;
+ struct tile_format fmt;
+ /* calculate whole phys mem length */
+ for (phys_len = 0, sg = sgl; sg; sg = sg_next(sg))
+ phys_len += (unsigned long)ALIGN(sg->length, PAGE_SIZE);
+
+ /* get io virtual address size */
+ if (format->is_tile) {
+ unsigned long lines;
+ unsigned long body_size;
+
+ body_size = phys_len - format->header_size;
+ lines = body_size / (format->phys_page_line * PAGE_SIZE);
+
+ /*header need more lines virtual space*/
+ if (format->header_size) {
+ unsigned long header_size;
+
+ header_size = ALIGN(format->header_size,
+ format->virt_page_line * PAGE_SIZE);
+ lines += header_size
+ / (format->virt_page_line * PAGE_SIZE);
+ }
+
+ iova_size = lines * format->virt_page_line * PAGE_SIZE;
+ } else {
+ iova_size = phys_len;
+ }
+
+ /* alloc iova */
+ pool = hisi_domain->iova_pool;
+ domain = hisi_domain->domain;
+ iova_start = hisi_alloc_iova(pool, iova_size, hisi_domain->range.align);
+ if (!iova_start) {
+ pr_err("[%s]hisi_alloc_iova alloc size 0x%lx failed!"
+ "hisi ion pool avail 0x%lx\n",
+ __func__, iova_size, gen_pool_avail(pool));
+ return -EINVAL;
+ }
+
+ if (0x100000000 < (iova_start + iova_size)) {
+ pr_err("hisi iommu can not deal with iova 0x%lx size 0x%lx\n",
+ iova_start, iova_size);
+ }
+
+ /* do map */
+ if (format->is_tile) {
+ fmt.is_tile = format->is_tile;
+ fmt.phys_page_line = format->phys_page_line;
+ fmt.virt_page_line = format->virt_page_line;
+ fmt.header_size = format->header_size;
+ ret = iommu_map_tile(domain, iova_start,
+ sgl, iova_size, 0, &fmt);
+ } else {
+ ret = iommu_map_sg(domain, iova_start, sgl,
+ sg_nents(sgl), format->prot);
+ }
+
+ if (ret != iova_size) {
+ pr_err("[%s]map failed!iova_start = %lx, iova_size = %lx\n",
+ __func__, iova_start, iova_size);
+ hisi_free_iova(pool, iova_start, iova_size);
+ return ret;
+ }
+
+ /* out put result */
+ result->iova_start = iova_start;
+ result->iova_size = iova_size;
+
+ return 0;
+}
+
+int hisi_iommu_map_domain(struct scatterlist *sgl,
+ struct iommu_map_format *format)
+{
+ int ret = 0;
+ struct map_result result;
+ struct hisi_iommu_domain *hisi_domain;
+
+ hisi_domain = m_hisi_domain;
+
+ memset(&result, 0, sizeof(result));
+
+ ret = do_iommu_domain_map(hisi_domain, sgl, format, &result);
+ if (ret) {
+ dbg("alloc iova fail\n");
+ return ret;
+ }
+ format->iova_start = result.iova_start;
+ format->iova_size = result.iova_size;
+
+ /* get value which write into iommu register */
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_iommu_map_domain);
+
+static int do_iommu_domain_unmap(struct map_result *result)
+{
+ int ret;
+ unsigned long unmaped_size;
+ struct hisi_iommu_domain *hisi_domain = m_hisi_domain;
+ struct gen_pool *pool = hisi_domain->iova_pool;
+
+ /* never unmap a zero length address space */
+ if (!result->iova_size) {
+ pr_err("[%s]unmap failed! iova_start=%lx, iova_size=%lu\n",
+ __func__, result->iova_start, result->iova_size);
+ return -EINVAL;
+ }
+
+ /* unmap tile equals to unmpa range */
+ if (result->is_tile) {
+ unmaped_size = iommu_unmap_tile(hisi_domain->domain,
+ result->iova_start, result->iova_size);
+ } else {
+ unmaped_size = iommu_unmap(hisi_domain->domain,
+ result->iova_start, result->iova_size);
+ }
+
+ if (unmaped_size != result->iova_size) {
+ dbg("[%s]unmap failed!\n", __func__);
+ return -EINVAL;
+ }
+ /* free iova */
+ if (pool) {
+ ret = addr_in_gen_pool(pool, result->iova_start,
+ result->iova_size);
+ if(!ret) {
+ pr_err("[%s]illegal para!!iova = %lx, size = %lx\n",
+ __func__, result->iova_start, result->iova_size);
+ }
+ hisi_free_iova(pool, result->iova_start, result->iova_size);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_ARM64_64K_PAGES
+#error hisi iommu can not deal with 64k pages!
+#endif
+
+/**
+ * Called by ION
+ */
+int hisi_iommu_unmap_domain(struct iommu_map_format *format)
+{
+ struct map_result result;
+
+ result.iova_start = format->iova_start;
+ result.iova_size = format->iova_size;
+ result.is_tile = format->is_tile;
+
+ return do_iommu_domain_unmap(&result);
+}
+EXPORT_SYMBOL_GPL(hisi_iommu_unmap_domain);
+
+/*only used to test*/
+phys_addr_t hisi_iommu_domain_iova_to_phys(unsigned long iova)
+{
+ struct iommu_domain *domain;
+ domain = m_hisi_domain->domain;
+ return iommu_iova_to_phys(domain, iova);
+}
+EXPORT_SYMBOL_GPL(hisi_iommu_domain_iova_to_phys);
+
+int hisi_ion_enable_iommu(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct hisi_iommu_domain *hisi_domain;
+
+ pr_info("in %s start\n", __func__);
+ hisi_domain = kzalloc(sizeof(*hisi_domain), GFP_KERNEL);
+ if (!hisi_domain) {
+ dbg("alloc hisi_domain object fail\n");
+ return -ENOMEM;
+ }
+
+ if (!iommu_present(dev->bus)) {
+ dbg("iommu not found\n");
+ kfree(hisi_domain);
+ return 0;
+ }
+
+ /* create iommu domain */
+ hisi_domain->domain = iommu_domain_alloc(dev->bus);
+ if (!hisi_domain->domain) {
+ ret = -EINVAL;
+ goto error;
+ }
+ iommu_attach_device(hisi_domain->domain, dev);
+ domain_info = (struct iommu_domain_data *)hisi_domain->domain->priv;
+
+ /**
+ * Current align is 256K
+ */
+ hisi_domain->iova_pool = iova_pool_setup(domain_info->iova_start,
+ domain_info->iova_size, domain_info->iova_align);
+ if (!hisi_domain->iova_pool) {
+ ret = -EINVAL;
+ goto error;
+ }
+ /* this is a global pointer */
+ m_hisi_domain = hisi_domain;
+
+ dbg("in %s end\n", __func__);
+ return 0;
+
+error:
+ WARN(1, "hisi_iommu_domain_init failed!\n");
+ if (hisi_domain->iova_pool)
+ iova_pool_destroy(hisi_domain->iova_pool);
+ if (hisi_domain->domain)
+ iommu_domain_free(hisi_domain->domain);
+ kfree(hisi_domain);
+
+ return ret;
+}
+EXPORT_SYMBOL(hisi_ion_enable_iommu);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index d6c404b3584d..48bf938862c2 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -290,6 +290,13 @@ static int gic_irq_get_irqchip_state(struct irq_data *d,
return 0;
}
+static int gic_irq_retrigger(struct irq_data *d)
+{
+ gic_poke_irq(d, GIC_DIST_PENDING_SET);
+
+ return 1;
+}
+
static int gic_set_type(struct irq_data *d, unsigned int type)
{
void __iomem *base = gic_dist_base(d);
@@ -414,6 +421,7 @@ static struct irq_chip gic_chip = {
.irq_mask = gic_mask_irq,
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoi_irq,
+ .irq_retrigger = gic_irq_retrigger,
.irq_set_type = gic_set_type,
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 11eebfe8a4cb..5f29338637d3 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -116,6 +116,12 @@ config HI6220_MBOX
between application processors and MCU. Say Y here if you want to
build Hi6220 mailbox controller driver.
+config HI3660_MBOX
+ tristate "Hi3660 Mailbox"
+ depends on ARCH_HISI
+ help
+ Mailbox implementation for Hi3660.
+
config MAILBOX_TEST
tristate "Mailbox Test Client"
depends on OF
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index ace6fed8fea9..27ee42aa59cb 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -29,3 +29,5 @@ obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o
obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o
obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o
+
+obj-$(CONFIG_HI3660_MBOX) += hi3660-mailbox.o
diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c
new file mode 100644
index 000000000000..14f469dc5797
--- /dev/null
+++ b/drivers/mailbox/hi3660-mailbox.c
@@ -0,0 +1,688 @@
+/*
+ * Hisilicon's Hi3660 mailbox driver
+ *
+ * Copyright (c) 2017 Hisilicon Limited.
+ * Copyright (c) 2017 Linaro Limited.
+ *
+ * Author: Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include "mailbox.h"
+
+#define MBOX_CHAN_MAX 32
+
+#define MBOX_TX 0x1
+
+/* Mailbox message length: 2 words */
+#define MBOX_MSG_LEN 2
+
+#define MBOX_OFF(m) (0x40 * (m))
+#define MBOX_SRC_REG(m) MBOX_OFF(m)
+#define MBOX_DST_REG(m) (MBOX_OFF(m) + 0x04)
+#define MBOX_DCLR_REG(m) (MBOX_OFF(m) + 0x08)
+#define MBOX_DSTAT_REG(m) (MBOX_OFF(m) + 0x0C)
+#define MBOX_MODE_REG(m) (MBOX_OFF(m) + 0x10)
+#define MBOX_IMASK_REG(m) (MBOX_OFF(m) + 0x14)
+#define MBOX_ICLR_REG(m) (MBOX_OFF(m) + 0x18)
+#define MBOX_SEND_REG(m) (MBOX_OFF(m) + 0x1C)
+#define MBOX_DATA_REG(m, i) (MBOX_OFF(m) + 0x20 + ((i) << 2))
+
+#define MBOX_CPU_IMASK(cpu) (((cpu) << 3) + 0x800)
+#define MBOX_CPU_IRST(cpu) (((cpu) << 3) + 0x804)
+#define MBOX_IPC_LOCK (0xA00)
+
+#define MBOX_IPC_UNLOCKED 0x00000000
+#define AUTOMATIC_ACK_CONFIG (1 << 0)
+#define NO_FUNC_CONFIG (0 << 0)
+
+#define MBOX_MANUAL_ACK 0
+#define MBOX_AUTO_ACK 1
+
+#define MBOX_STATE_IDLE (1 << 4)
+#define MBOX_STATE_OUT (1 << 5)
+#define MBOX_STATE_IN (1 << 6)
+#define MBOX_STATE_ACK (1 << 7)
+
+#define MBOX_DESTINATION_STATUS (1 << 6)
+
+struct hi3660_mbox_chan {
+
+ /*
+ * Description for channel's hardware info:
+ * - direction: tx or rx
+ * - dst irq: peer core's irq number
+ * - ack irq: local irq number
+ * - slot number
+ */
+ unsigned int dir, dst_irq, ack_irq;
+ unsigned int slot;
+
+ unsigned int *buf;
+
+ unsigned int irq_mode;
+
+ struct hi3660_mbox *parent;
+};
+
+struct hi3660_mbox {
+ struct device *dev;
+
+ int irq;
+
+ /* flag of enabling tx's irq mode */
+ bool tx_irq_mode;
+
+ /* region for mailbox */
+ void __iomem *base;
+
+ unsigned int chan_num;
+ struct hi3660_mbox_chan *mchan;
+
+ void *irq_map_chan[MBOX_CHAN_MAX];
+ struct mbox_chan *chan;
+ struct mbox_controller controller;
+};
+
+static inline void __ipc_lock(void __iomem *base, unsigned int lock_key)
+{
+ pr_debug("%s: base %p key %d\n", __func__, base, lock_key);
+
+ __raw_writel(lock_key, base + MBOX_IPC_LOCK);
+}
+
+static inline void __ipc_unlock(void __iomem *base, unsigned int key)
+{
+ pr_debug("%s: base %p key %d\n", __func__, base, key);
+
+ __raw_writel(key, base + MBOX_IPC_LOCK);
+}
+
+static inline unsigned int __ipc_lock_status(void __iomem *base)
+{
+ pr_debug("%s: base %p status %d\n",
+ __func__, base, __raw_readl(base + MBOX_IPC_LOCK));
+
+ return __raw_readl(base + MBOX_IPC_LOCK);
+}
+
+static inline void __ipc_set_src(void __iomem *base, int source, int mdev)
+{
+ pr_debug("%s: base %p src %x mdev %d\n",
+ __func__, base, source, mdev);
+
+ __raw_writel(BIT(source), base + MBOX_SRC_REG(mdev));
+}
+
+static inline unsigned int __ipc_read_src(void __iomem *base, int mdev)
+{
+ pr_debug("%s: base %p src %x mdev %d\n",
+ __func__, base, __raw_readl(base + MBOX_SRC_REG(mdev)), mdev);
+
+ return __raw_readl(base + MBOX_SRC_REG(mdev));
+}
+
+static inline void __ipc_set_des(void __iomem *base, int source, int mdev)
+{
+ pr_debug("%s: base %p src %x mdev %d\n",
+ __func__, base, source, mdev);
+
+ __raw_writel(BIT(source), base + MBOX_DST_REG(mdev));
+}
+
+static inline void __ipc_clr_des(void __iomem *base, int source, int mdev)
+{
+ pr_debug("%s: base %p src %x mdev %d\n",
+ __func__, base, source, mdev);
+
+ __raw_writel(BIT(source), base + MBOX_DCLR_REG(mdev));
+}
+
+static inline unsigned int __ipc_des_status(void __iomem *base, int mdev)
+{
+ pr_debug("%s: base %p src %x mdev %d\n",
+ __func__, base, __raw_readl(base + MBOX_DSTAT_REG(mdev)), mdev);
+
+ return __raw_readl(base + MBOX_DSTAT_REG(mdev));
+}
+
+static inline void __ipc_send(void __iomem *base, unsigned int tosend, int mdev)
+{
+ pr_debug("%s: base %p tosend %x mdev %d\n",
+ __func__, base, tosend, mdev);
+
+ __raw_writel(tosend, base + MBOX_SEND_REG(mdev));
+}
+
+static inline unsigned int __ipc_read(void __iomem *base, int mdev, int index)
+{
+ pr_debug("%s: base %p index %d data %x mdev %d\n",
+ __func__, base, index, __raw_readl(base + MBOX_DATA_REG(mdev, index)), mdev);
+
+ return __raw_readl(base + MBOX_DATA_REG(mdev, index));
+}
+
+static inline void __ipc_write(void __iomem *base, u32 data, int mdev, int index)
+{
+ pr_debug("%s: base %p index %d data %x mdev %d\n",
+ __func__, base, index, data, mdev);
+
+ __raw_writel(data, base + MBOX_DATA_REG(mdev, index));
+}
+
+static inline unsigned int __ipc_cpu_imask_get(void __iomem *base, int mdev)
+{
+ pr_debug("%s: base %p imask %x mdev %d\n",
+ __func__, base, __raw_readl(base + MBOX_IMASK_REG(mdev)), mdev);
+
+ return __raw_readl(base + MBOX_IMASK_REG(mdev));
+}
+
+static inline void __ipc_cpu_imask_clr(void __iomem *base, unsigned int toclr, int mdev)
+{
+ unsigned int reg;
+
+ pr_debug("%s: base %p toclr %x mdev %d\n",
+ __func__, base, toclr, mdev);
+
+ reg = __raw_readl(base + MBOX_IMASK_REG(mdev));
+ reg = reg & (~(toclr));
+
+ __raw_writel(reg, base + MBOX_IMASK_REG(mdev));
+}
+
+static inline void __ipc_cpu_imask_all(void __iomem *base, int mdev)
+{
+ pr_debug("%s: base %p mdev %d\n", __func__, base, mdev);
+
+ __raw_writel((~0), base + MBOX_IMASK_REG(mdev));
+}
+
+static inline void __ipc_cpu_iclr(void __iomem *base, unsigned int toclr, int mdev)
+{
+ pr_debug("%s: base %p toclr %x mdev %d\n",
+ __func__, base, toclr, mdev);
+
+ __raw_writel(toclr, base + MBOX_ICLR_REG(mdev));
+}
+
+static inline int __ipc_cpu_istatus(void __iomem *base, int mdev)
+{
+ pr_debug("%s: base %p mdev %d\n", __func__, base, mdev);
+
+ return __raw_readl(base + MBOX_ICLR_REG(mdev));
+}
+
+static inline unsigned int __ipc_mbox_istatus(void __iomem *base, int cpu)
+{
+ pr_debug("%s: base %p cpu %d\n", __func__, base, cpu);
+
+ return __raw_readl(base + MBOX_CPU_IMASK(cpu));
+}
+
+static inline unsigned int __ipc_mbox_irstatus(void __iomem *base, int cpu)
+{
+ pr_debug("%s: base %p cpu %d\n", __func__, base, cpu);
+
+ return __raw_readl(base + MBOX_CPU_IRST(cpu));
+}
+
+static inline unsigned int __ipc_status(void __iomem *base, int mdev)
+{
+ pr_debug("%s: base %p mdev %d status %x\n",
+ __func__, base, mdev, __raw_readl(base + MBOX_MODE_REG(mdev)));
+
+ return __raw_readl(base + MBOX_MODE_REG(mdev));
+}
+
+static inline void __ipc_mode(void __iomem *base, unsigned int mode, int mdev)
+{
+ pr_debug("%s: base %p mdev %d mode %u\n",
+ __func__, base, mdev, mode);
+
+ __raw_writel(mode, base + MBOX_MODE_REG(mdev));
+}
+
+static int _mdev_check_state_machine(struct hi3660_mbox_chan *mchan,
+ unsigned int state)
+{
+ struct hi3660_mbox *mbox = mchan->parent;
+ int is_same = 0;
+
+ if ((state & __ipc_status(mbox->base, mchan->slot)))
+ is_same = 1;
+
+ pr_debug("%s: stm %u ch %d is_stm %d\n", __func__,
+ state, mchan->slot, is_same);
+
+ return is_same;
+}
+
+static void _mdev_release(struct hi3660_mbox_chan *mchan)
+{
+ struct hi3660_mbox *mbox = mchan->parent;
+
+ __ipc_cpu_imask_all(mbox->base, mchan->slot);
+ __ipc_set_src(mbox->base, mchan->ack_irq, mchan->slot);
+
+ asm volatile ("sev");
+ return;
+}
+
+static void _mdev_ensure_channel(struct hi3660_mbox_chan *mchan)
+{
+ int timeout = 0, loop = 60 + 1000;
+
+ if (_mdev_check_state_machine(mchan, MBOX_STATE_IDLE))
+ /*IDLE STATUS, return directly */
+ return;
+
+ if (_mdev_check_state_machine(mchan, MBOX_STATE_ACK))
+ /*ACK STATUS, release the channel directly */
+ goto release;
+
+ /*
+ * The worst situation is to delay:
+ * 1000 * 5us + 60 * 5ms = 305ms
+ */
+ while (timeout < loop) {
+
+ if (timeout < 1000)
+ udelay(5);
+ else
+ usleep_range(3000, 5000);
+
+ /* If the ack status is ready, bail out */
+ if (_mdev_check_state_machine(mchan, MBOX_STATE_ACK))
+ break;
+
+ timeout++;
+ }
+
+ if (unlikely(timeout == loop)) {
+ printk("\n %s ipc timeout...\n", __func__);
+
+ /* TODO: add dump function */
+ }
+
+release:
+ /*release the channel */
+ _mdev_release(mchan);
+}
+
+static int _mdev_unlock(struct hi3660_mbox_chan *mchan)
+{
+ struct hi3660_mbox *mbox = mchan->parent;
+ int retry = 3;
+
+ do {
+ __ipc_unlock(mbox->base, 0x1ACCE551);
+ if (MBOX_IPC_UNLOCKED == __ipc_lock_status(mbox->base))
+ break;
+
+ udelay(10);
+ retry--;
+ } while (retry);
+
+ if (!retry)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int _mdev_occupy(struct hi3660_mbox_chan *mchan)
+{
+ struct hi3660_mbox *mbox = mchan->parent;
+ unsigned int slot = mchan->slot;
+ int retry = 10;
+
+ do {
+ /*
+ * hardware locking for exclusive accesing between
+ * CPUs without exclusive monitor mechanism.
+ */
+ if (!(__ipc_status(mbox->base, slot) & MBOX_STATE_IDLE))
+ __asm__ volatile ("wfe");
+ else {
+ /* Set the source processor bit */
+ __ipc_set_src(mbox->base, mchan->ack_irq, slot);
+ if (__ipc_read_src(mbox->base, slot) & BIT(mchan->ack_irq))
+ break;
+ }
+
+ retry--;
+ /* Hardware unlock */
+ } while (retry);
+
+ if (!retry)
+ return -ENODEV;
+
+ return 0;
+}
+
+static bool hi3660_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ return 1;
+}
+
+static int _mdev_hw_send(struct hi3660_mbox_chan *mchan, u32 *msg, u32 len)
+{
+ struct hi3660_mbox *mbox = mchan->parent;
+ int i, ack_mode;
+ unsigned int temp;
+
+ if (mchan->irq_mode)
+ ack_mode = MBOX_MANUAL_ACK;
+ else
+ ack_mode = MBOX_AUTO_ACK;
+
+ /* interrupts unmask */
+ __ipc_cpu_imask_all(mbox->base, mchan->slot);
+
+ if (MBOX_AUTO_ACK == ack_mode)
+ temp = BIT(mchan->dst_irq);
+ else
+ temp = BIT(mchan->ack_irq) | BIT(mchan->dst_irq);
+
+ __ipc_cpu_imask_clr(mbox->base, temp, mchan->slot);
+
+ /* des config */
+ __ipc_set_des(mbox->base, mchan->dst_irq, mchan->slot);
+
+ /* ipc mode config */
+ if (MBOX_AUTO_ACK == ack_mode)
+ temp = AUTOMATIC_ACK_CONFIG;
+ else
+ temp = NO_FUNC_CONFIG;
+
+ __ipc_mode(mbox->base, temp, mchan->slot);
+
+ /* write data */
+ for (i = 0; i < len; i++)
+ __ipc_write(mbox->base, msg[i], mchan->slot, i);
+
+ mchan->buf = msg;
+
+ /* enable sending */
+ __ipc_send(mbox->base, BIT(mchan->ack_irq), mchan->slot);
+ return 0;
+}
+
+static int hi3660_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct hi3660_mbox_chan *mchan = chan->con_priv;
+ int err = 0;
+
+ /* indicate as a TX channel */
+ mchan->dir = MBOX_TX;
+
+ _mdev_ensure_channel(mchan);
+
+ if (_mdev_unlock(mchan)) {
+ pr_err("%s: can not be unlocked\n", __func__);
+ err = -EIO;
+ goto out;
+ }
+
+ if (_mdev_occupy(mchan)) {
+ pr_err("%s: can not be occupied\n", __func__);
+ err = -EBUSY;
+ goto out;
+ }
+
+ _mdev_hw_send(mchan, msg, MBOX_MSG_LEN);
+
+out:
+ return err;
+}
+
+static irqreturn_t hi3660_mbox_interrupt(int irq, void *p)
+{
+ struct hi3660_mbox *mbox = p;
+ struct hi3660_mbox_chan *mchan;
+ struct mbox_chan *chan;
+ unsigned int state, intr_bit, i;
+ unsigned int status, imask, todo;
+ u32 msg[MBOX_MSG_LEN];
+
+ state = __ipc_mbox_istatus(mbox->base, 0);
+
+ if (!state) {
+ dev_warn(mbox->dev, "%s: spurious interrupt\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ while (state) {
+ intr_bit = __ffs(state);
+ state &= (state - 1);
+
+ chan = mbox->irq_map_chan[intr_bit];
+ if (!chan) {
+ dev_warn(mbox->dev, "%s: unexpected irq vector %d\n",
+ __func__, intr_bit);
+ continue;
+ }
+
+ mchan = chan->con_priv;
+
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ mchan->buf[i] = __ipc_read(mbox->base, mchan->slot, i);
+
+ if (mchan->dir == MBOX_TX)
+ mbox_chan_txdone(chan, 0);
+ else
+ mbox_chan_received_data(chan, (void *)msg);
+
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ __ipc_write(mbox->base, 0x0, mchan->slot, i);
+
+ imask = __ipc_cpu_imask_get(mbox->base, mchan->slot);
+ todo = ((1<<0) | (1<<1)) & (~imask);
+ __ipc_cpu_iclr(mbox->base, todo, mchan->slot);
+
+
+ status = __ipc_status(mbox->base, mchan->slot);
+ if ((MBOX_DESTINATION_STATUS & status) &&
+ (!(AUTOMATIC_ACK_CONFIG & status)))
+ __ipc_send(mbox->base, todo, mchan->slot);
+
+ /*release the channel */
+ _mdev_release(mchan);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hi3660_mbox_startup(struct mbox_chan *chan)
+{
+ struct hi3660_mbox_chan *mchan;
+
+ mchan = chan->con_priv;
+
+ if (mchan->irq_mode)
+ chan->txdone_method = TXDONE_BY_IRQ;
+
+ return 0;
+}
+
+static void hi3660_mbox_shutdown(struct mbox_chan *chan)
+{
+ return;
+}
+
+static struct mbox_chan_ops hi3660_mbox_ops = {
+ .send_data = hi3660_mbox_send_data,
+ .startup = hi3660_mbox_startup,
+ .shutdown = hi3660_mbox_shutdown,
+ .last_tx_done = hi3660_mbox_last_tx_done,
+};
+
+static struct mbox_chan *hi3660_mbox_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *spec)
+{
+ struct hi3660_mbox *mbox = dev_get_drvdata(controller->dev);
+ struct hi3660_mbox_chan *mchan;
+ struct mbox_chan *chan;
+ unsigned int i = spec->args[0];
+ unsigned int dst_irq = spec->args[1];
+ unsigned int ack_irq = spec->args[2];
+
+ /* Bounds checking */
+ if (i >= mbox->chan_num) {
+ dev_err(mbox->dev, "Invalid channel idx %d\n", i);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Is requested channel free? */
+ chan = &mbox->chan[i];
+ if (mbox->irq_map_chan[i] == (void *)chan) {
+ dev_err(mbox->dev, "Channel in use\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ mchan = chan->con_priv;
+ mchan->dst_irq = dst_irq;
+ mchan->ack_irq = ack_irq;
+
+ mbox->irq_map_chan[i] = (void *)chan;
+ return chan;
+}
+
+static const struct of_device_id hi3660_mbox_of_match[] = {
+ { .compatible = "hisilicon,hi3660-mbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hi3660_mbox_of_match);
+
+static int hi3660_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hi3660_mbox *mbox;
+ struct resource *res;
+ int i, err;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->dev = dev;
+ mbox->chan_num = MBOX_CHAN_MAX;
+ mbox->mchan = devm_kzalloc(dev,
+ mbox->chan_num * sizeof(*mbox->mchan), GFP_KERNEL);
+ if (!mbox->mchan)
+ return -ENOMEM;
+
+ mbox->chan = devm_kzalloc(dev,
+ mbox->chan_num * sizeof(*mbox->chan), GFP_KERNEL);
+ if (!mbox->chan)
+ return -ENOMEM;
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq < 0)
+ return mbox->irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mbox->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mbox->base)) {
+ dev_err(dev, "ioremap buffer failed\n");
+ return PTR_ERR(mbox->base);
+ }
+
+ err = devm_request_irq(dev, mbox->irq, hi3660_mbox_interrupt, 0,
+ dev_name(dev), mbox);
+ if (err) {
+ dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
+ err);
+ return -ENODEV;
+ }
+
+ mbox->controller.dev = dev;
+ mbox->controller.chans = &mbox->chan[0];
+ mbox->controller.num_chans = mbox->chan_num;
+ mbox->controller.ops = &hi3660_mbox_ops;
+ mbox->controller.of_xlate = hi3660_mbox_xlate;
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+
+ for (i = 0; i < mbox->chan_num; i++) {
+ mbox->chan[i].con_priv = &mbox->mchan[i];
+ mbox->irq_map_chan[i] = NULL;
+
+ mbox->mchan[i].parent = mbox;
+ mbox->mchan[i].slot = i;
+
+ if (i == 28)
+ /* channel 28 is used for thermal with irq mode */
+ mbox->mchan[i].irq_mode = 1;
+ else
+ /* other channels use automatic mode */
+ mbox->mchan[i].irq_mode = 0;
+ }
+
+ err = mbox_controller_register(&mbox->controller);
+ if (err) {
+ dev_err(dev, "Failed to register mailbox %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+ dev_info(dev, "Mailbox enabled\n");
+ return 0;
+}
+
+static int hi3660_mbox_remove(struct platform_device *pdev)
+{
+ struct hi3660_mbox *mbox = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&mbox->controller);
+ return 0;
+}
+
+static struct platform_driver hi3660_mbox_driver = {
+ .driver = {
+ .name = "hi3660-mbox",
+ .owner = THIS_MODULE,
+ .of_match_table = hi3660_mbox_of_match,
+ },
+ .probe = hi3660_mbox_probe,
+ .remove = hi3660_mbox_remove,
+};
+
+static int __init hi3660_mbox_init(void)
+{
+ return platform_driver_register(&hi3660_mbox_driver);
+}
+core_initcall(hi3660_mbox_init);
+
+static void __exit hi3660_mbox_exit(void)
+{
+ platform_driver_unregister(&hi3660_mbox_driver);
+}
+module_exit(hi3660_mbox_exit);
+
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("Hi3660 mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/hi6421-pmic-core.c b/drivers/mfd/hi6421-pmic-core.c
index 3fd703fe3aba..6fb7ba272e09 100644
--- a/drivers/mfd/hi6421-pmic-core.c
+++ b/drivers/mfd/hi6421-pmic-core.c
@@ -1,40 +1,35 @@
/*
- * Device driver for Hi6421 IC
+ * Device driver for Hi6421 PMIC
*
* Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
* http://www.hisilicon.com
- * Copyright (c) <2013-2014> Linaro Ltd.
+ * Copyright (c) <2013-2017> Linaro Ltd.
* http://www.linaro.org
*
* Author: Guodong Xu <guodong.xu@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/mfd/core.h>
+#include <linux/mfd/hi6421-pmic.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/mfd/hi6421-pmic.h>
static const struct mfd_cell hi6421_devs[] = {
{ .name = "hi6421-regulator", },
};
+static const struct mfd_cell hi6421v530_devs[] = {
+ { .name = "hi6421v530-regulator", },
+};
+
static const struct regmap_config hi6421_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -42,12 +37,33 @@ static const struct regmap_config hi6421_regmap_config = {
.max_register = HI6421_REG_TO_BUS_ADDR(HI6421_REG_MAX),
};
+static const struct of_device_id of_hi6421_pmic_match[] = {
+ {
+ .compatible = "hisilicon,hi6421-pmic",
+ .data = (void *)HI6421
+ },
+ {
+ .compatible = "hisilicon,hi6421v530-pmic",
+ .data = (void *)HI6421_V530
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_hi6421_pmic_match);
+
static int hi6421_pmic_probe(struct platform_device *pdev)
{
struct hi6421_pmic *pmic;
struct resource *res;
+ const struct of_device_id *id;
+ const struct mfd_cell *subdevs;
+ enum hi6421_type type;
void __iomem *base;
- int ret;
+ int n_subdevs, ret;
+
+ id = of_match_device(of_hi6421_pmic_match, &pdev->dev);
+ if (!id)
+ return -EINVAL;
+ type = (enum hi6421_type)id->data;
pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
@@ -61,41 +77,50 @@ static int hi6421_pmic_probe(struct platform_device *pdev)
pmic->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
&hi6421_regmap_config);
if (IS_ERR(pmic->regmap)) {
- dev_err(&pdev->dev,
- "regmap init failed: %ld\n", PTR_ERR(pmic->regmap));
+ dev_err(&pdev->dev, "Failed to initialise Regmap: %ld\n",
+ PTR_ERR(pmic->regmap));
return PTR_ERR(pmic->regmap);
}
- /* set over-current protection debounce 8ms */
- regmap_update_bits(pmic->regmap, HI6421_OCP_DEB_CTRL_REG,
+ platform_set_drvdata(pdev, pmic);
+
+ switch (type) {
+ case HI6421:
+ /* set over-current protection debounce 8ms */
+ regmap_update_bits(pmic->regmap, HI6421_OCP_DEB_CTRL_REG,
(HI6421_OCP_DEB_SEL_MASK
| HI6421_OCP_EN_DEBOUNCE_MASK
| HI6421_OCP_AUTO_STOP_MASK),
(HI6421_OCP_DEB_SEL_8MS
| HI6421_OCP_EN_DEBOUNCE_ENABLE));
- platform_set_drvdata(pdev, pmic);
+ subdevs = hi6421_devs;
+ n_subdevs = ARRAY_SIZE(hi6421_devs);
+ break;
+ case HI6421_V530:
+ subdevs = hi6421v530_devs;
+ n_subdevs = ARRAY_SIZE(hi6421v530_devs);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown device type %d\n",
+ (unsigned int)type);
+ return -EINVAL;
+ }
- ret = devm_mfd_add_devices(&pdev->dev, 0, hi6421_devs,
- ARRAY_SIZE(hi6421_devs), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
+ subdevs, n_subdevs, NULL, 0, NULL);
if (ret) {
- dev_err(&pdev->dev, "add mfd devices failed: %d\n", ret);
+ dev_err(&pdev->dev, "Failed to add child devices: %d\n", ret);
return ret;
}
return 0;
}
-static const struct of_device_id of_hi6421_pmic_match_tbl[] = {
- { .compatible = "hisilicon,hi6421-pmic", },
- { },
-};
-MODULE_DEVICE_TABLE(of, of_hi6421_pmic_match_tbl);
-
static struct platform_driver hi6421_pmic_driver = {
.driver = {
- .name = "hi6421_pmic",
- .of_match_table = of_hi6421_pmic_match_tbl,
+ .name = "hi6421_pmic",
+ .of_match_table = of_hi6421_pmic_match,
},
.probe = hi6421_pmic_probe,
};
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 9360e6ebb4ea..5a126f6973e3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -799,4 +799,5 @@ source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/microchip/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 4a25950ce85f..21d43ab8db76 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_PANEL) += panel.o
+obj-y += microchip/
obj-$(CONFIG_UID_SYS_STATS) += uid_sys_stats.o
obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o
diff --git a/drivers/misc/microchip/Kconfig b/drivers/misc/microchip/Kconfig
new file mode 100644
index 000000000000..9e4e4cd505a6
--- /dev/null
+++ b/drivers/misc/microchip/Kconfig
@@ -0,0 +1,7 @@
+#
+# Microchip devices
+#
+
+menu "Microchip devices"
+source "drivers/misc/microchip/hub/Kconfig"
+endmenu
diff --git a/drivers/misc/microchip/Makefile b/drivers/misc/microchip/Makefile
new file mode 100644
index 000000000000..cc71719d7ecf
--- /dev/null
+++ b/drivers/misc/microchip/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for microchip devices that really don't fit anywhere else.
+#
+obj-y += hub/ \ No newline at end of file
diff --git a/drivers/misc/microchip/hub/Kconfig b/drivers/misc/microchip/hub/Kconfig
new file mode 100644
index 000000000000..258be0073082
--- /dev/null
+++ b/drivers/misc/microchip/hub/Kconfig
@@ -0,0 +1,6 @@
+config HUB_USB5734
+ tristate "HUB_USB5734"
+ depends on GPIOLIB
+ default n
+ help
+ If you say yes here you get support for HUB_USB5734.
diff --git a/drivers/misc/microchip/hub/Makefile b/drivers/misc/microchip/hub/Makefile
new file mode 100644
index 000000000000..3ff884cd5da4
--- /dev/null
+++ b/drivers/misc/microchip/hub/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_HUB_USB5734) +=hub_usb5734.o
diff --git a/drivers/misc/microchip/hub/hub_usb5734.c b/drivers/misc/microchip/hub/hub_usb5734.c
new file mode 100644
index 000000000000..e71c9934b0b8
--- /dev/null
+++ b/drivers/misc/microchip/hub/hub_usb5734.c
@@ -0,0 +1,300 @@
+/*
+ * otgid_gpio_hub.c
+ *
+ * Copyright (c) Hisilicon Tech. Co., Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/timer.h>
+#include <linux/param.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/hisi/log/hisi_log.h>
+#include <linux/hisi/usb/hisi_usb.h>
+#include <linux/tifm.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/hisi/usb/hub/hisi_hub.h>
+#define DEVICE_DRIVER_NAME "gpio_hub_for_usb5734"
+
+#define GPIO_HUB_OTG_HOST 1
+#define GPIO_HUB_OTG_DEVICE 0
+#define GPIO_TYPEC_VBUS_POWER 1
+#define GPIO_TYPEC_NO_POWER 0
+#define GPIO_HUB_VBUS_POWER 1
+#define GPIO_HUB_VBUS_NO_POWER 0
+#define GPIO_HUB_HUB_VBUS_POWER 1
+
+/* SOC_CRGPERIPH_PEREN1_UNION */
+#define SOC_CRGPERIPH_PEREN1_ADDR(base) ((base) + (0x010))
+
+#define HISILOG_TAG GPIO_HUB
+HISILOG_REGIST();
+
+struct gpio_hub_info {
+ struct platform_device *pdev;
+ int otg_switch_gpio;
+ int typec_vbus_gpio;
+ int typec_vbus_enable_val;
+ int hub_vbus_gpio;
+};
+
+static struct gpio_hub_info gpio_hub_driver_info = {
+ .otg_switch_gpio = -1,
+ .typec_vbus_gpio = -1,
+ .typec_vbus_enable_val = -1,
+ .hub_vbus_gpio = -1,
+};
+
+void gpio_hub_power_off(void)
+{
+ if (gpio_is_valid(gpio_hub_driver_info.hub_vbus_gpio)) {
+ gpio_set_value(gpio_hub_driver_info.hub_vbus_gpio,
+ GPIO_HUB_VBUS_NO_POWER);
+ hisilog_info("%s: gpio hub hub vbus no power set success",
+ __func__);
+ } else {
+ hisilog_err("%s: gpio hub hub vbus no power set err",
+ __func__);
+ }
+}
+
+void gpio_hub_power_on(void)
+{
+ if (gpio_is_valid(gpio_hub_driver_info.hub_vbus_gpio))
+ gpio_set_value(gpio_hub_driver_info.hub_vbus_gpio,
+ GPIO_HUB_VBUS_POWER);
+ else
+ hisilog_err("%s: gpio hub hub vbus set err", __func__);
+}
+
+void gpio_hub_switch_to_hub(void)
+{
+ int gpio = gpio_hub_driver_info.otg_switch_gpio;
+
+ if (!gpio_is_valid(gpio)) {
+ hisilog_err("%s: otg_switch_gpio is err\n", __func__);
+ return;
+ }
+
+ if (gpio_get_value(gpio)) {
+ hisilog_info("%s: already switch to hub\n", __func__);
+ return;
+ }
+
+ gpio_direction_output(gpio, 1);
+ hisilog_err("%s: switch to hub\n", __func__);
+}
+EXPORT_SYMBOL_GPL(gpio_hub_switch_to_hub);
+
+void gpio_hub_switch_to_typec(void)
+{
+ int gpio = gpio_hub_driver_info.otg_switch_gpio;
+
+ if (!gpio_is_valid(gpio)) {
+ hisilog_err("%s: otg_switch_gpio is err\n", __func__);
+ return;
+ }
+
+ if (!gpio_get_value(gpio)) {
+ hisilog_info("%s: already switch to typec\n", __func__);
+ return;
+ }
+
+ gpio_direction_output(gpio, 0);
+ hisilog_err("%s: switch to typec\n", __func__);
+}
+EXPORT_SYMBOL_GPL(gpio_hub_switch_to_typec);
+
+static void gpio_hub_change_typec_power(int gpio, int on)
+{
+ if (!gpio_is_valid(gpio)) {
+ hisilog_err("%s: typec power gpio is err\n", __func__);
+ return;
+ }
+
+ if (gpio_get_value(gpio) == on) {
+ hisilog_info("%s: typec power no change\n", __func__);
+ return;
+ }
+
+ gpio_direction_output(gpio, on);
+ hisilog_info("%s: set typec vbus gpio to %d\n", __func__, on);
+}
+
+void gpio_hub_typec_power_on(void)
+{
+ struct gpio_hub_info *info = &gpio_hub_driver_info;
+
+ gpio_hub_change_typec_power(info->typec_vbus_gpio,
+ info->typec_vbus_enable_val);
+}
+EXPORT_SYMBOL_GPL(gpio_hub_typec_power_on);
+
+void gpio_hub_typec_power_off(void)
+{
+ struct gpio_hub_info *info = &gpio_hub_driver_info;
+
+ gpio_hub_change_typec_power(info->typec_vbus_gpio,
+ !info->typec_vbus_enable_val);
+}
+EXPORT_SYMBOL_GPL(gpio_hub_typec_power_off);
+
+static int gpio_hub_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *root = pdev->dev.of_node;
+ struct gpio_hub_info *info = &gpio_hub_driver_info;
+
+ hisilog_info("%s: step in\n", __func__);
+
+ info->pdev = pdev;
+ if (!pdev)
+ return -EBUSY;
+
+ info->hub_vbus_gpio = of_get_named_gpio(root, "hub_vdd33_en_gpio", 0);
+ if (!gpio_is_valid(info->hub_vbus_gpio)) {
+ hisilog_err("%s: hub_vbus_gpio is err\n", __func__);
+ return info->hub_vbus_gpio;
+ }
+ ret = gpio_request(info->hub_vbus_gpio, "hub_vbus_int_gpio");
+ if (ret) {
+ hisilog_err("%s: request hub_vbus_gpio err\n", __func__);
+ return ret;
+ }
+
+ info->typec_vbus_gpio = of_get_named_gpio(root,
+ "typc_vbus_int_gpio,typec-gpios", 0);
+ if (!gpio_is_valid(info->hub_vbus_gpio)) {
+ hisilog_err("%s: typec_vbus_gpio is err\n", __func__);
+ ret = info->typec_vbus_gpio;
+ goto free_gpio1;
+ }
+ ret = gpio_request(info->typec_vbus_gpio, "typc_vbus_int_gpio");
+ if (ret) {
+ hisilog_err("%s: request typec_vbus_gpio err\n", __func__);
+ goto free_gpio1;
+ }
+
+ ret = of_property_read_u32(root, "typc_vbus_enable_val",
+ &info->typec_vbus_enable_val);
+ if (ret) {
+ hisilog_err("%s: typc_vbus_enable_val can't get\n", __func__);
+ goto free_gpio2;
+ }
+ info->typec_vbus_enable_val = !!info->typec_vbus_enable_val;
+
+ /* only for v2 */
+ info->otg_switch_gpio = of_get_named_gpio(root, "otg_gpio", 0);
+ if (!gpio_is_valid(info->otg_switch_gpio)) {
+ hisilog_info("%s: otg_switch_gpio is err\n", __func__);
+ info->otg_switch_gpio = -1;
+ }
+
+ ret = gpio_direction_output(info->hub_vbus_gpio, GPIO_HUB_VBUS_POWER);
+ if (ret) {
+ hisilog_err("%s: power on hub vbus err\n", __func__);
+ goto free_gpio2;
+ }
+
+ ret = gpio_direction_output(info->typec_vbus_gpio,
+ info->typec_vbus_enable_val);
+ if (ret) {
+ hisilog_err("%s: power on typec vbus err", __func__);
+ goto free_gpio2;
+ }
+
+ return 0;
+
+free_gpio2:
+ gpio_free(info->typec_vbus_gpio);
+ info->typec_vbus_gpio = -1;
+free_gpio1:
+ gpio_free(info->hub_vbus_gpio);
+ info->hub_vbus_gpio = -1;
+
+ return ret;
+}
+
+static int gpio_hub_remove(struct platform_device *pdev)
+{
+ struct gpio_hub_info *info = &gpio_hub_driver_info;
+
+ if (gpio_is_valid(info->otg_switch_gpio)) {
+ gpio_free(info->otg_switch_gpio);
+ info->otg_switch_gpio = -1;
+ }
+
+ if (gpio_is_valid(info->typec_vbus_gpio)) {
+ gpio_free(info->typec_vbus_gpio);
+ info->typec_vbus_gpio = -1;
+ }
+
+ if (gpio_is_valid(info->hub_vbus_gpio)) {
+ gpio_free(info->hub_vbus_gpio);
+ info->hub_vbus_gpio = -1;
+ }
+ return 0;
+}
+
+static const struct of_device_id id_table_for_gpio_hub[] = {
+ {.compatible = "hisilicon,gpio_hubv1"},
+ {.compatible = "hisilicon,gpio_hubv2"},
+ {}
+};
+
+static struct platform_driver gpio_hub_driver = {
+ .probe = gpio_hub_probe,
+ .remove = gpio_hub_remove,
+ .driver = {
+ .name = DEVICE_DRIVER_NAME,
+ .of_match_table = of_match_ptr(id_table_for_gpio_hub),
+
+ },
+};
+
+static int __init gpio_hub_init(void)
+{
+ int ret = platform_driver_register(&gpio_hub_driver);
+
+ hisilog_info("%s:gpio hub init status:%d\n", __func__, ret);
+ return ret;
+}
+
+static void __exit gpio_hub_exit(void)
+{
+ platform_driver_unregister(&gpio_hub_driver);
+}
+
+module_init(gpio_hub_init);
+module_exit(gpio_hub_exit);
+
+MODULE_AUTHOR("wangbinghui<wangbinghui@hisilicon.com>");
+MODULE_DESCRIPTION("HUB GPIO FOR OTG ID driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig
index f34dcc514730..f2df2c7352e2 100644
--- a/drivers/misc/ti-st/Kconfig
+++ b/drivers/misc/ti-st/Kconfig
@@ -14,4 +14,12 @@ config TI_ST
are returned to relevant protocol drivers based on their
packet types.
+config ST_HCI
+ tristate "HCI TTY emulation driver for Bluetooth"
+ depends on TI_ST
+ help
+ This enables the TTY device like emulation for HCI used by
+ user-space Bluetooth stacks.
+ It will provide a character device for user space Bluetooth stack to
+ send/receive data over shared transport.
endmenu
diff --git a/drivers/misc/ti-st/Makefile b/drivers/misc/ti-st/Makefile
index 78d7ebb14749..454621909fb7 100644
--- a/drivers/misc/ti-st/Makefile
+++ b/drivers/misc/ti-st/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_TI_ST) += st_drv.o
st_drv-objs := st_core.o st_kim.o st_ll.o
+obj-$(CONFIG_ST_HCI) += tty_hci.o
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index bf0d7708beac..a67d59b1d709 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -36,6 +36,8 @@
#include <linux/skbuff.h>
#include <linux/ti_wilink_st.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
@@ -43,6 +45,9 @@ static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
/**********************************************************************/
/* internal functions */
+struct ti_st_plat_data *dt_pdata;
+static struct ti_st_plat_data *get_platform_data(struct device *dev);
+
/**
* st_get_plat_device -
* function which returns the reference to the platform device
@@ -463,7 +468,12 @@ long st_kim_start(void *kim_data)
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
pr_info(" %s", __func__);
- pdata = kim_gdata->kim_pdev->dev.platform_data;
+ if (kim_gdata->kim_pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = kim_gdata->kim_pdev->dev.platform_data;
+ }
do {
/* platform specific enabling code here */
@@ -523,12 +533,18 @@ long st_kim_stop(void *kim_data)
{
long err = 0;
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
- struct ti_st_plat_data *pdata =
- kim_gdata->kim_pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
struct tty_struct *tty = kim_gdata->core_data->tty;
reinit_completion(&kim_gdata->ldisc_installed);
+ if (kim_gdata->kim_pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else
+ pdata = kim_gdata->kim_pdev->dev.platform_data;
+
+
if (tty) { /* can be called before ldisc is installed */
/* Flush any pending characters in the driver and discipline. */
tty_ldisc_flush(tty);
@@ -720,13 +736,52 @@ static const struct file_operations list_debugfs_fops = {
* board-*.c file
*/
+static const struct of_device_id kim_of_match[] = {
+{
+ .compatible = "kim",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, kim_of_match);
+
+static struct ti_st_plat_data *get_platform_data(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ const u32 *dt_property;
+ int len;
+
+ dt_pdata = kzalloc(sizeof(*dt_pdata), GFP_KERNEL);
+ if (!dt_pdata)
+ return NULL;
+
+ dt_property = of_get_property(np, "dev_name", &len);
+ if (dt_property)
+ memcpy(&dt_pdata->dev_name, dt_property, len);
+ of_property_read_u32(np, "nshutdown_gpio",
+ &dt_pdata->nshutdown_gpio);
+ of_property_read_u32(np, "flow_cntrl", &dt_pdata->flow_cntrl);
+ of_property_read_u32(np, "baud_rate", &dt_pdata->baud_rate);
+
+ return dt_pdata;
+}
+
static struct dentry *kim_debugfs_dir;
static int kim_probe(struct platform_device *pdev)
{
struct kim_data_s *kim_gdata;
- struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
int err;
+ if (pdev->dev.of_node)
+ pdata = get_platform_data(&pdev->dev);
+ else
+ pdata = pdev->dev.platform_data;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "Platform Data is missing\n");
+ return -ENXIO;
+ }
+
if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
/* multiple devices could exist */
st_kim_devices[pdev->id] = pdev;
@@ -807,9 +862,16 @@ err_core_init:
static int kim_remove(struct platform_device *pdev)
{
/* free the GPIOs requested */
- struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
struct kim_data_s *kim_gdata;
+ if (pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = pdev->dev.platform_data;
+ }
+
kim_gdata = platform_get_drvdata(pdev);
/* Free the Bluetooth/FM/GPIO
@@ -827,12 +889,22 @@ static int kim_remove(struct platform_device *pdev)
kfree(kim_gdata);
kim_gdata = NULL;
+ kfree(dt_pdata);
+ dt_pdata = NULL;
+
return 0;
}
static int kim_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
+
+ if (pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = pdev->dev.platform_data;
+ }
if (pdata->suspend)
return pdata->suspend(pdev, state);
@@ -842,7 +914,14 @@ static int kim_suspend(struct platform_device *pdev, pm_message_t state)
static int kim_resume(struct platform_device *pdev)
{
- struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
+
+ if (pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = pdev->dev.platform_data;
+ }
if (pdata->resume)
return pdata->resume(pdev);
@@ -859,6 +938,7 @@ static struct platform_driver kim_platform_driver = {
.resume = kim_resume,
.driver = {
.name = "kim",
+ .of_match_table = of_match_ptr(kim_of_match),
},
};
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
index 93b4d67cc4a3..518e1b7f2f95 100644
--- a/drivers/misc/ti-st/st_ll.c
+++ b/drivers/misc/ti-st/st_ll.c
@@ -26,6 +26,7 @@
#include <linux/ti_wilink_st.h>
/**********************************************************************/
+
/* internal functions */
static void send_ll_cmd(struct st_data_s *st_data,
unsigned char cmd)
@@ -53,7 +54,13 @@ static void ll_device_want_to_sleep(struct st_data_s *st_data)
/* communicate to platform about chip asleep */
kim_data = st_data->kim_data;
- pdata = kim_data->kim_pdev->dev.platform_data;
+ if (kim_data->kim_pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = kim_data->kim_pdev->dev.platform_data;
+ }
+
if (pdata->chip_asleep)
pdata->chip_asleep(NULL);
}
@@ -86,7 +93,13 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data)
/* communicate to platform about chip wakeup */
kim_data = st_data->kim_data;
- pdata = kim_data->kim_pdev->dev.platform_data;
+ if (kim_data->kim_pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = kim_data->kim_pdev->dev.platform_data;
+ }
+
if (pdata->chip_awake)
pdata->chip_awake(NULL);
}
diff --git a/drivers/misc/ti-st/tty_hci.c b/drivers/misc/ti-st/tty_hci.c
new file mode 100644
index 000000000000..1f0986ae36d5
--- /dev/null
+++ b/drivers/misc/ti-st/tty_hci.c
@@ -0,0 +1,543 @@
+/*
+ * TTY emulation for user-space Bluetooth stacks over HCI-H4
+ * Copyright (C) 2011-2012 Texas Instruments
+ * Author: Pavan Savoy <pavan_savoy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/** define one of the following for debugging
+#define DEBUG
+#define VERBOSE
+*/
+
+#define pr_fmt(fmt) "(hci_tty): " fmt
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+
+#include <linux/uaccess.h>
+#include <linux/tty.h>
+#include <linux/sched.h>
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+
+#include <linux/ti_wilink_st.h>
+
+/* Number of seconds to wait for registration completion
+ * when ST returns PENDING status.
+ */
+#define BT_REGISTER_TIMEOUT 6000 /* 6 sec */
+
+/**
+ * struct ti_st - driver operation structure
+ * @hdev: hci device pointer which binds to bt driver
+ * @reg_status: ST registration callback status
+ * @st_write: write function provided by the ST driver
+ * to be used by the driver during send_frame.
+ * @wait_reg_completion - completion sync between ti_st_open
+ * and st_reg_completion_cb.
+ */
+struct ti_st {
+ struct hci_dev *hdev;
+ char reg_status;
+ long (*st_write)(struct sk_buff *);
+ struct completion wait_reg_completion;
+ wait_queue_head_t data_q;
+ struct sk_buff_head rx_list;
+};
+
+#define DEVICE_NAME "hci_tty"
+
+/***********Functions called from ST driver**********************************/
+/* Called by Shared Transport layer when receive data is
+ * available */
+static long st_receive(void *priv_data, struct sk_buff *skb)
+{
+ struct ti_st *hst = (void *)priv_data;
+
+ pr_debug("@ %s", __func__);
+#ifdef VERBOSE
+ print_hex_dump(KERN_INFO, ">rx>", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, skb->len, 0);
+#endif
+ skb_queue_tail(&hst->rx_list, skb);
+ wake_up_interruptible(&hst->data_q);
+ return 0;
+}
+
+/* Called by ST layer to indicate protocol registration completion
+ * status.ti_st_open() function will wait for signal from this
+ * API when st_register() function returns ST_PENDING.
+ */
+static void st_reg_completion_cb(void *priv_data, int data)
+{
+ struct ti_st *lhst = (void *)priv_data;
+
+ pr_info("@ %s\n", __func__);
+ /* Save registration status for use in ti_st_open() */
+ lhst->reg_status = data;
+ /* complete the wait in ti_st_open() */
+ complete(&lhst->wait_reg_completion);
+}
+
+/* protocol structure registered with shared transport */
+#define MAX_BT_CHNL_IDS 3
+static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
+ {
+ .chnl_id = 0x04, /* HCI Events */
+ .hdr_len = 2,
+ .offset_len_in_hdr = 1,
+ .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
+ .reserve = 8,
+ },
+ {
+ .chnl_id = 0x02, /* ACL */
+ .hdr_len = 4,
+ .offset_len_in_hdr = 2,
+ .len_size = 2, /* sizeof(dlen) in struct hci_acl_hdr */
+ .reserve = 8,
+ },
+ {
+ .chnl_id = 0x03, /* SCO */
+ .hdr_len = 3,
+ .offset_len_in_hdr = 2,
+ .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
+ .reserve = 8,
+ },
+};
+/** hci_tty_open Function
+ * This function will perform an register on ST driver.
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @inod :
+ * Returns 0 - on success
+ * else suitable error code
+ */
+int hci_tty_open(struct inode *inod, struct file *file)
+{
+ int i = 0, err = 0;
+ unsigned long timeleft;
+ struct ti_st *hst;
+
+ pr_info("inside %s (%p, %p)\n", __func__, inod, file);
+
+ hst = kzalloc(sizeof(*hst), GFP_KERNEL);
+ file->private_data = hst;
+ hst = file->private_data;
+
+ for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+ ti_st_proto[i].priv_data = hst;
+ ti_st_proto[i].max_frame_size = 1026;
+ ti_st_proto[i].recv = st_receive;
+ ti_st_proto[i].reg_complete_cb = st_reg_completion_cb;
+
+ /* Prepare wait-for-completion handler */
+ init_completion(&hst->wait_reg_completion);
+ /* Reset ST registration callback status flag,
+ * this value will be updated in
+ * st_reg_completion_cb()
+ * function whenever it called from ST driver.
+ */
+ hst->reg_status = -EINPROGRESS;
+
+ err = st_register(&ti_st_proto[i]);
+ if (!err)
+ goto done;
+
+ if (err != -EINPROGRESS) {
+ pr_err("st_register failed %d", err);
+ goto error;
+ }
+
+ /* ST is busy with either protocol
+ * registration or firmware download.
+ */
+ pr_debug("waiting for registration completion signal from ST");
+ timeleft = wait_for_completion_timeout
+ (&hst->wait_reg_completion,
+ msecs_to_jiffies(BT_REGISTER_TIMEOUT));
+ if (!timeleft) {
+ pr_err("Timeout(%d sec),didn't get reg completion signal from ST",
+ BT_REGISTER_TIMEOUT / 1000);
+ err = -ETIMEDOUT;
+ goto error;
+ }
+
+ /* Is ST registration callback
+ * called with ERROR status? */
+ if (hst->reg_status != 0) {
+ pr_err("ST registration completed with invalid status %d",
+ hst->reg_status);
+ err = -EAGAIN;
+ goto error;
+ }
+
+done:
+ hst->st_write = ti_st_proto[i].write;
+ if (!hst->st_write) {
+ pr_err("undefined ST write function");
+ for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+ /* Undo registration with ST */
+ err = st_unregister(&ti_st_proto[i]);
+ if (err)
+ pr_err("st_unregister() failed with error %d",
+ err);
+ hst->st_write = NULL;
+ }
+ return -EIO;
+ }
+ }
+
+ skb_queue_head_init(&hst->rx_list);
+ init_waitqueue_head(&hst->data_q);
+
+ return 0;
+
+error:
+ kfree(hst);
+ return err;
+}
+
+/** hci_tty_release Function
+ * This function will un-registers from the ST driver.
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @inod :
+ * Returns 0 - on success
+ * else suitable error code
+ */
+int hci_tty_release(struct inode *inod, struct file *file)
+{
+ int err, i;
+ struct ti_st *hst = file->private_data;
+
+ pr_info("inside %s (%p, %p)\n", __func__, inod, file);
+
+ for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+ err = st_unregister(&ti_st_proto[i]);
+ if (err)
+ pr_err("st_unregister(%d) failed with error %d",
+ ti_st_proto[i].chnl_id, err);
+ }
+
+ hst->st_write = NULL;
+ skb_queue_purge(&hst->rx_list);
+ kfree(hst);
+ return err;
+}
+
+/** hci_tty_read Function
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @data : Data which needs to be passed to APP
+ * @size : Length of the data passesd
+ * offset :
+ * Returns Size of packet received - on success
+ * else suitable error code
+ */
+ssize_t hci_tty_read(struct file *file, char __user *data, size_t size,
+ loff_t *offset)
+{
+ int len = 0, tout;
+ struct sk_buff *skb = NULL, *rskb = NULL;
+ struct ti_st *hst;
+
+ pr_debug("inside %s (%p, %p, %zu, %p)\n",
+ __func__, file, data, size, offset);
+
+ /* Validate input parameters */
+ if ((NULL == file) || (((NULL == data) || (0 == size)))) {
+ pr_err("Invalid input params passed to %s", __func__);
+ return -EINVAL;
+ }
+
+ hst = file->private_data;
+
+ /* cannot come here if poll-ed before reading
+ * if not poll-ed wait on the same wait_q
+ */
+ tout = wait_event_interruptible_timeout(hst->data_q,
+ !skb_queue_empty(&hst->rx_list),
+ msecs_to_jiffies(1000));
+ /* Check for timed out condition */
+ if (0 == tout) {
+ pr_err("Device Read timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* hst->rx_list not empty skb already present */
+ skb = skb_dequeue(&hst->rx_list);
+ if (!skb) {
+ pr_err("dequed skb is null?\n");
+ return -EIO;
+ }
+
+#ifdef VERBOSE
+ print_hex_dump(KERN_INFO, ">in>", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, skb->len, 0);
+#endif
+
+ /* Forward the data to the user */
+ if (skb->len >= size) {
+ pr_err("FIONREAD not done before read\n");
+ return -ENOMEM;
+ } else {
+ /* returning skb */
+ rskb = alloc_skb(size, GFP_KERNEL);
+ if (!rskb) {
+ pr_err("alloc_skb error\n");
+ return -ENOMEM;
+ }
+
+ /* cb[0] has the pkt_type 0x04 or 0x02 or 0x03 */
+ memcpy(skb_put(rskb, 1), &skb->cb[0], 1);
+ memcpy(skb_put(rskb, skb->len), skb->data, skb->len);
+
+ if (copy_to_user(data, rskb->data, rskb->len)) {
+ pr_err("unable to copy to user space\n");
+ /* Queue the skb back to head */
+ skb_queue_head(&hst->rx_list, skb);
+ kfree_skb(rskb);
+ return -EIO;
+ }
+ }
+
+ len = rskb->len; /* len of returning skb */
+ kfree_skb(skb);
+ kfree_skb(rskb);
+ pr_debug("total size read= %d\n", len);
+ return len;
+}
+
+/* hci_tty_write Function
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @data : packet data from BT application
+ * @size : Size of the packet data
+ * @offset :
+ * Returns Size of packet on success
+ * else suitable error code
+ */
+ssize_t hci_tty_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct ti_st *hst = file->private_data;
+ struct sk_buff *skb;
+
+ pr_debug("inside %s (%p, %p, %zu, %p)\n",
+ __func__, file, data, size, offset);
+
+ if (!hst->st_write) {
+ pr_err(" Can't write to ST, hhci_tty->st_write null ?");
+ return -EINVAL;
+ }
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ /* Validate Created SKB */
+ if (NULL == skb) {
+ pr_err("Error aaloacting SKB");
+ return -ENOMEM;
+ }
+
+ /* Forward the data from the user space to ST core */
+ if (copy_from_user(skb_put(skb, size), data, size)) {
+ pr_err(" Unable to copy from user space");
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+#ifdef VERBOSE
+ pr_debug("start data..");
+ print_hex_dump(KERN_INFO, "<out<", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, size, 0);
+ pr_debug("\n..end data");
+#endif
+
+ hst->st_write(skb);
+ return size;
+}
+
+/** hci_tty_ioctl Function
+ * This will peform the functions as directed by the command and command
+ * argument.
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @cmd : IOCTL Command
+ * @arg : Command argument for IOCTL command
+ * Returns 0 on success
+ * else suitable error code
+ */
+static long hci_tty_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct sk_buff *skb = NULL;
+ int retcode = 0;
+ struct ti_st *hst;
+ unsigned int value = 0;
+
+ pr_debug("inside %s (%p, %u, %lx)", __func__, file, cmd, arg);
+
+ /* Validate input parameters */
+ if ((NULL == file) || (0 == cmd)) {
+ pr_err("invalid input parameters passed to %s", __func__);
+ return -EINVAL;
+ }
+
+ hst = file->private_data;
+
+ switch (cmd) {
+ case FIONREAD:
+ /* Deque the SKB from the head if rx_list is not empty
+ * update the argument with skb->len to provide amount of data
+ * available in the available SKB +1 for the PKT_TYPE
+ * field not provided in data by TI-ST.
+ */
+ skb = skb_dequeue(&hst->rx_list);
+ if (skb != NULL) {
+ value = skb->len + 1;
+ /* Re-Store the SKB for furtur Read operations */
+ skb_queue_head(&hst->rx_list, skb);
+ }
+ pr_debug("returning %d\n", value);
+ if (copy_to_user((void __user *)arg, &value, sizeof(value)))
+ return -EFAULT;
+ break;
+ default:
+ pr_debug("Un-Identified IOCTL %d", cmd);
+ retcode = 0;
+ break;
+ }
+
+ return retcode;
+}
+
+/** hci_tty_poll Function
+ * This function will wait till some data is received to the hci_tty driver from ST
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @wait : POLL wait information
+ * Returns status of POLL on success
+ * else suitable error code
+ */
+static unsigned int hci_tty_poll(struct file *file, poll_table *wait)
+{
+ struct ti_st *hst = file->private_data;
+ unsigned long mask = 0;
+
+ pr_debug("@ %s\n", __func__);
+
+ /* wait to be completed by st_receive */
+ poll_wait(file, &hst->data_q, wait);
+ pr_debug("poll broke\n");
+
+ if (!skb_queue_empty(&hst->rx_list)) {
+ pr_debug("rx list que !empty\n");
+ mask |= POLLIN; /* TODO: check app for mask */
+ }
+
+ return mask;
+}
+
+/* BT Char driver function pointers
+ * These functions are called from USER space by pefroming File Operations
+ * on /dev/hci_tty node exposed by this driver during init
+ */
+const struct file_operations hci_tty_chrdev_ops = {
+ .owner = THIS_MODULE,
+ .open = hci_tty_open,
+ .read = hci_tty_read,
+ .write = hci_tty_write,
+ .unlocked_ioctl = hci_tty_ioctl,
+ .poll = hci_tty_poll,
+ .release = hci_tty_release,
+};
+
+/*********Functions called during insmod and delmod****************************/
+
+static int hci_tty_major; /* major number */
+static struct class *hci_tty_class; /* class during class_create */
+static struct device *hci_tty_dev; /* dev during device_create */
+/** hci_tty_init Function
+ * This function Initializes the hci_tty driver parametes and exposes
+ * /dev/hci_tty node to user space
+ *
+ * Parameters : NULL
+ * Returns 0 on success
+ * else suitable error code
+ */
+static int __init hci_tty_init(void)
+{
+ pr_info("inside %s\n", __func__);
+
+ /* Expose the device DEVICE_NAME to user space
+ * And obtain the major number for the device
+ */
+ hci_tty_major = register_chrdev(0, DEVICE_NAME, &hci_tty_chrdev_ops);
+
+ if (0 > hci_tty_major) {
+ pr_err("Error when registering to char dev");
+ return hci_tty_major;
+ }
+
+ /* udev */
+ hci_tty_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(hci_tty_class)) {
+ pr_err("Something went wrong in class_create");
+ unregister_chrdev(hci_tty_major, DEVICE_NAME);
+ return -1;
+ }
+
+ hci_tty_dev =
+ device_create(hci_tty_class, NULL, MKDEV(hci_tty_major, 0),
+ NULL, DEVICE_NAME);
+ if (IS_ERR(hci_tty_dev)) {
+ pr_err("Error in device create");
+ unregister_chrdev(hci_tty_major, DEVICE_NAME);
+ class_destroy(hci_tty_class);
+ return -1;
+ }
+ pr_info("allocated %d, %d\n", hci_tty_major, 0);
+ return 0;
+}
+
+/** hci_tty_exit Function
+ * This function Destroys the hci_tty driver parametes and /dev/hci_tty node
+ *
+ * Parameters : NULL
+ * Returns NULL
+ */
+static void __exit hci_tty_exit(void)
+{
+ pr_info("inside %s\n", __func__);
+ pr_info("bye.. freeing up %d\n", hci_tty_major);
+
+ device_destroy(hci_tty_class, MKDEV(hci_tty_major, 0));
+ class_destroy(hci_tty_class);
+ unregister_chrdev(hci_tty_major, DEVICE_NAME);
+}
+
+module_init(hci_tty_init);
+module_exit(hci_tty_exit);
+
+MODULE_AUTHOR("Pavan Savoy <pavan_savoy@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 624789496dce..74bee944d7d6 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -8,6 +8,8 @@
* (at your option) any later version.
*/
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/mmc/host.h>
@@ -28,7 +30,38 @@
#define AO_SCTRL_SEL18 BIT(10)
#define AO_SCTRL_CTRL3 0x40C
+#define DWMMC_SDIO_ID 2
+
+#define SOC_SCTRL_SCPERCTRL5 (0x314)
+#define SDCARD_IO_SEL18 BIT(2)
+
+#define SDCARD_RD_THRESHOLD (512)
+
+#define GENCLK_DIV (7)
+
+#define GPIO_CLK_ENABLE BIT(16)
+#define GPIO_CLK_DIV_MASK GENMASK(11, 8)
+#define GPIO_USE_SAMPLE_DLY_MASK GENMASK(13, 13)
+#define UHS_REG_EXT_SAMPLE_PHASE_MASK GENMASK(20, 16)
+#define UHS_REG_EXT_SAMPLE_DRVPHASE_MASK GENMASK(25, 21)
+#define UHS_REG_EXT_SAMPLE_DLY_MASK GENMASK(30, 26)
+
+#define TIMING_MODE 3
+#define TIMING_CFG_NUM 10
+
+#define PULL_DOWN BIT(1)
+#define PULL_UP BIT(0)
+
+#define NUM_PHASES (40)
+
+#define ENABLE_SHIFT_MIN_SMPL (4)
+#define ENABLE_SHIFT_MAX_SMPL (12)
+#define USE_DLY_MIN_SMPL (11)
+#define USE_DLY_MAX_SMPL (14)
+
struct k3_priv {
+ int ctrl_id;
+ u32 cur_speed;
struct regmap *reg;
};
@@ -38,6 +71,41 @@ static unsigned long dw_mci_hi6220_caps[] = {
0
};
+struct hs_timing {
+ u32 drv_phase;
+ u32 smpl_dly;
+ u32 smpl_phase_max;
+ u32 smpl_phase_min;
+};
+
+struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = {
+ { /* reserved */ },
+ { /* SD */
+ {7, 0, 15, 15,}, /* 0: LEGACY 400k */
+ {6, 0, 4, 4,}, /* 1: MMC_HS */
+ {6, 0, 3, 3,}, /* 2: SD_HS */
+ {6, 0, 15, 15,}, /* 3: SDR12 */
+ {6, 0, 2, 2,}, /* 4: SDR25 */
+ {4, 0, 11, 0,}, /* 5: SDR50 */
+ {6, 4, 15, 0,}, /* 6: SDR104 */
+ {0}, /* 7: DDR50 */
+ {0}, /* 8: DDR52 */
+ {0}, /* 9: HS200 */
+ },
+ { /* SDIO */
+ {7, 0, 15, 15,}, /* 0: LEGACY 400k */
+ {0}, /* 1: MMC_HS */
+ {6, 0, 15, 15,}, /* 2: SD_HS */
+ {6, 0, 15, 15,}, /* 3: SDR12 */
+ {6, 0, 0, 0,}, /* 4: SDR25 */
+ {4, 0, 12, 0,}, /* 5: SDR50 */
+ {5, 4, 15, 0,}, /* 6: SDR104 */
+ {0}, /* 7: DDR50 */
+ {0}, /* 8: DDR52 */
+ {0}, /* 9: HS200 */
+ }
+};
+
static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
int ret;
@@ -66,6 +134,10 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host)
if (IS_ERR(priv->reg))
priv->reg = NULL;
+ priv->ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
+ if (priv->ctrl_id < 0)
+ priv->ctrl_id = 0;
+
host->priv = priv;
return 0;
}
@@ -144,7 +216,245 @@ static const struct dw_mci_drv_data hi6220_data = {
.execute_tuning = dw_mci_hi6220_execute_tuning,
};
+static void dw_mci_hs_set_timing(struct dw_mci *host, int timing,
+ int smpl_phase)
+{
+ u32 drv_phase;
+ u32 smpl_dly;
+ u32 use_smpl_dly = 0;
+ u32 enable_shift = 0;
+ u32 reg_value;
+ int ctrl_id;
+ struct k3_priv *priv;
+
+ priv = host->priv;
+ ctrl_id = priv->ctrl_id;
+
+ drv_phase = hs_timing_cfg[ctrl_id][timing].drv_phase;
+ smpl_dly = hs_timing_cfg[ctrl_id][timing].smpl_dly;
+ if (smpl_phase == -1)
+ smpl_phase = (hs_timing_cfg[ctrl_id][timing].smpl_phase_max +
+ hs_timing_cfg[ctrl_id][timing].smpl_phase_min) / 2;
+
+ switch (timing) {
+ case MMC_TIMING_UHS_SDR104:
+ if (smpl_phase >= USE_DLY_MIN_SMPL &&
+ smpl_phase <= USE_DLY_MAX_SMPL)
+ use_smpl_dly = 1;
+ /* fallthrough */
+ case MMC_TIMING_UHS_SDR50:
+ if (smpl_phase >= ENABLE_SHIFT_MIN_SMPL &&
+ smpl_phase <= ENABLE_SHIFT_MAX_SMPL)
+ enable_shift = 1;
+ break;
+ }
+
+ mci_writel(host, GPIO, 0x0);
+ usleep_range(5, 10);
+
+ reg_value = FIELD_PREP(UHS_REG_EXT_SAMPLE_PHASE_MASK, smpl_phase) |
+ FIELD_PREP(UHS_REG_EXT_SAMPLE_DLY_MASK, smpl_dly) |
+ FIELD_PREP(UHS_REG_EXT_SAMPLE_DRVPHASE_MASK, drv_phase);
+ mci_writel(host, UHS_REG_EXT, reg_value);
+
+ mci_writel(host, ENABLE_SHIFT, enable_shift);
+
+ reg_value = FIELD_PREP(GPIO_CLK_DIV_MASK, GENCLK_DIV) |
+ FIELD_PREP(GPIO_USE_SAMPLE_DLY_MASK, use_smpl_dly);
+ mci_writel(host, GPIO, (unsigned int)reg_value | GPIO_CLK_ENABLE);
+
+ /* We should delay 1ms wait for timing setting finished. */
+ usleep_range(1000, 2000);
+}
+
+static int dw_mci_hi3660_init(struct dw_mci *host)
+{
+ mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(SDCARD_RD_THRESHOLD,
+ SDMMC_CARD_RD_THR_EN));
+
+ dw_mci_hs_set_timing(host, MMC_TIMING_LEGACY, -1);
+ host->bus_hz /= (GENCLK_DIV + 1);
+
+ return 0;
+}
+
+static int dw_mci_set_sel18(struct dw_mci *host, bool set)
+{
+ int ret;
+ unsigned int val;
+ struct k3_priv *priv;
+
+ priv = host->priv;
+
+ val = set ? SDCARD_IO_SEL18 : 0;
+ ret = regmap_update_bits(priv->reg, SOC_SCTRL_SCPERCTRL5,
+ SDCARD_IO_SEL18, val);
+ if (ret) {
+ dev_err(host->dev, "sel18 %u error\n", val);
+ return ret;
+ }
+
+ return 0;
+}
+
+void dw_mci_hi3660_set_ios(struct dw_mci *host, struct mmc_ios *ios)
+{
+ int ret;
+ unsigned long wanted;
+ unsigned long actual;
+ struct k3_priv *priv = host->priv;
+
+ if (!ios->clock || ios->clock == priv->cur_speed)
+ return;
+
+ wanted = ios->clock * (GENCLK_DIV + 1);
+ ret = clk_set_rate(host->ciu_clk, wanted);
+ if (ret) {
+ dev_err(host->dev, "failed to set rate %luHz\n", wanted);
+ return;
+ }
+ actual = clk_get_rate(host->ciu_clk);
+
+ dw_mci_hs_set_timing(host, ios->timing, -1);
+ host->bus_hz = actual / (GENCLK_DIV + 1);
+ host->current_speed = 0;
+ priv->cur_speed = host->bus_hz;
+}
+
+static int dw_mci_get_best_clksmpl(unsigned int sample_flag)
+{
+ int i;
+ int interval;
+ unsigned int v;
+ unsigned int len;
+ unsigned int range_start = 0;
+ unsigned int range_length = 0;
+ unsigned int middle_range = 0;
+
+ if (!sample_flag)
+ return -EIO;
+
+ if (~sample_flag == 0)
+ return 0;
+
+ i = ffs(sample_flag) - 1;
+
+ /*A clock cycle is divided into 32 phases,
+ *each of which is represented by a bit,
+ *finding the optimal phase.
+ */
+ while (i < 32) {
+ v = ror32(sample_flag, i);
+ len = ffs(~v) - 1;
+
+ if (len > range_length) {
+ range_length = len;
+ range_start = i;
+ }
+
+ interval = ffs(v >> len) - 1;
+ if (interval < 0)
+ break;
+
+ i += len + interval;
+ }
+
+ middle_range = range_start + range_length / 2;
+ if (middle_range >= 32)
+ middle_range %= 32;
+
+ return middle_range;
+}
+
+static int dw_mci_hi3660_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
+{
+ int i = 0;
+ struct dw_mci *host = slot->host;
+ struct mmc_host *mmc = slot->mmc;
+ int smpl_phase = 0;
+ u32 tuning_sample_flag = 0;
+ int best_clksmpl = 0;
+
+ for (i = 0; i < NUM_PHASES; ++i, ++smpl_phase) {
+ smpl_phase %= 32;
+
+ mci_writel(host, TMOUT, ~0);
+ dw_mci_hs_set_timing(host, mmc->ios.timing, smpl_phase);
+
+ if (!mmc_send_tuning(mmc, opcode, NULL))
+ tuning_sample_flag |= (1 << smpl_phase);
+ else
+ tuning_sample_flag &= ~(1 << smpl_phase);
+ }
+
+ best_clksmpl = dw_mci_get_best_clksmpl(tuning_sample_flag);
+ if (best_clksmpl < 0) {
+ dev_err(host->dev, "All phases bad!\n");
+ return -EIO;
+ }
+
+ dw_mci_hs_set_timing(host, mmc->ios.timing, best_clksmpl);
+
+ dev_info(host->dev, "tuning ok best_clksmpl %u tuning_sample_flag %x\n",
+ best_clksmpl, tuning_sample_flag);
+ return 0;
+}
+
+static int dw_mci_hi3660_switch_voltage(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ int ret;
+ int min_uv = 0;
+ int max_uv = 0;
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct k3_priv *priv;
+ struct dw_mci *host;
+
+ host = slot->host;
+ priv = host->priv;
+
+ if (!priv || !priv->reg)
+ return 0;
+
+ if (priv->ctrl_id == DWMMC_SDIO_ID)
+ return 0;
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ ret = dw_mci_set_sel18(host, 0);
+ if (ret)
+ return ret;
+ min_uv = 2950000;
+ max_uv = 2950000;
+ } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ ret = dw_mci_set_sel18(host, 1);
+ if (ret)
+ return ret;
+ min_uv = 1800000;
+ max_uv = 1800000;
+ }
+
+ if (IS_ERR_OR_NULL(mmc->supply.vqmmc))
+ return 0;
+
+ ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
+ if (ret) {
+ dev_err(host->dev, "Regulator set error %d: %d - %d\n",
+ ret, min_uv, max_uv);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dw_mci_drv_data hi3660_data = {
+ .init = dw_mci_hi3660_init,
+ .set_ios = dw_mci_hi3660_set_ios,
+ .parse_dt = dw_mci_hi6220_parse_dt,
+ .execute_tuning = dw_mci_hi3660_execute_tuning,
+ .switch_voltage = dw_mci_hi3660_switch_voltage,
+};
+
static const struct of_device_id dw_mci_k3_match[] = {
+ { .compatible = "hisilicon,hi3660-dw-mshc", .data = &hi3660_data, },
{ .compatible = "hisilicon,hi4511-dw-mshc", .data = &k3_drv_data, },
{ .compatible = "hisilicon,hi6220-dw-mshc", .data = &hi6220_data, },
{},
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index f81f4175f49a..612938e6e60d 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -3060,6 +3060,12 @@ int dw_mci_probe(struct dw_mci *host)
goto err_clk_ciu;
}
+ if (!IS_ERR(host->pdata->rstc)) {
+ reset_control_assert(host->pdata->rstc);
+ usleep_range(10, 50);
+ reset_control_deassert(host->pdata->rstc);
+ }
+
if (drv_data && drv_data->init) {
ret = drv_data->init(host);
if (ret) {
@@ -3069,12 +3075,6 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- if (!IS_ERR(host->pdata->rstc)) {
- reset_control_assert(host->pdata->rstc);
- usleep_range(10, 50);
- reset_control_deassert(host->pdata->rstc);
- }
-
setup_timer(&host->cmd11_timer,
dw_mci_cmd11_timer, (unsigned long)host);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index e8cd2dec3263..cc7224448f5d 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -56,6 +56,8 @@
#define SDMMC_DSCADDR 0x094
#define SDMMC_BUFADDR 0x098
#define SDMMC_CDTHRCTL 0x100
+#define SDMMC_UHS_REG_EXT 0x108
+#define SDMMC_ENABLE_SHIFT 0x110
#define SDMMC_DATA(x) (x)
/*
* Registers to support idmac 64-bit address mode
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index ba7b034b2b91..5132e414a808 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -112,4 +112,14 @@ config OF_OVERLAY
config OF_NUMA
bool
+config OF_OVERLAY_MGR
+ bool "Enable Overlay manager"
+ default n
+ depends on OF_OVERLAY
+ help
+ Enables Overlay manager - it accepts DT entry from command line
+ overlay_mgr.overlay_dt_entry=<name> and applies all overlays in
+ it to current DT. It is also possible to apply predefined DT
+ entry on the fly by writing it to 'current_overlay' sysfs entry.
+
endif # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index d7efd9d458aa..5b07ad146ce9 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -14,5 +14,6 @@ obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
obj-$(CONFIG_OF_RESOLVE) += resolver.o
obj-$(CONFIG_OF_OVERLAY) += overlay.o
obj-$(CONFIG_OF_NUMA) += of_numa.o
+obj-$(CONFIG_OF_OVERLAY_MGR) += overlay_mgr.o
obj-$(CONFIG_OF_UNITTEST) += unittest-data/
diff --git a/drivers/of/overlay_mgr.c b/drivers/of/overlay_mgr.c
new file mode 100644
index 000000000000..73f3894a8362
--- /dev/null
+++ b/drivers/of/overlay_mgr.c
@@ -0,0 +1,152 @@
+/*
+ * Overlay manager that allows to apply list of overlays from DT entry
+ *
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+static char *of_overlay_dt_entry;
+module_param_named(overlay_dt_entry, of_overlay_dt_entry, charp, 0644);
+
+static char *of_overlay_dt_apply;
+DEFINE_MUTEX(of_overlay_mgr_mutex);
+
+static int of_overlay_mgr_apply_overlay(struct device_node *onp)
+{
+ int ret;
+
+ ret = of_overlay_create(onp);
+ if (ret < 0) {
+ pr_err("overlay_mgr: fail to create overlay: %d\n", ret);
+ of_node_put(onp);
+ return ret;
+ }
+ pr_info("overlay_mgr: %s overlay applied\n", onp->name);
+ return 0;
+}
+
+static int of_overlay_mgr_apply_dt(struct device *dev, char *dt_entry)
+{
+ struct device_node *enp = dev->of_node;
+ struct device_node *next;
+ struct device_node *prev = NULL;
+
+ if (!enp) {
+ pr_err("overlay_mgr: no dt entry\n");
+ return -ENODEV;
+ }
+
+ enp = of_get_child_by_name(enp, dt_entry);
+ if (!enp) {
+ pr_err("overlay_mgr: dt entry %s not found\n", dt_entry);
+ return -ENODEV;
+ }
+ pr_info("overlay_mgr: apply %s dt entry\n", enp->name);
+ while ((next = of_get_next_available_child(enp, prev)) != NULL) {
+ if (strncmp(next->name, "overlay", 7) == 0)
+ of_overlay_mgr_apply_overlay(next);
+ prev = next;
+ }
+ return 0;
+}
+
+static ssize_t current_overlay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ size_t len;
+
+ mutex_lock(&of_overlay_mgr_mutex);
+ if (!of_overlay_dt_apply) {
+ mutex_unlock(&of_overlay_mgr_mutex);
+ return 0;
+ }
+ len = strlen(of_overlay_dt_apply);
+ if (len >= PAGE_SIZE)
+ len = PAGE_SIZE - 1;
+ memcpy(buf, of_overlay_dt_apply, len + 1);
+ mutex_unlock(&of_overlay_mgr_mutex);
+ return len;
+}
+
+static ssize_t current_overlay_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ mutex_lock(&of_overlay_mgr_mutex);
+ kfree(of_overlay_dt_apply);
+ of_overlay_dt_apply = kmalloc(size, GFP_KERNEL);
+ if (!of_overlay_dt_apply) {
+ pr_err("overlay_mgr: fail to allocate memory\n");
+ mutex_unlock(&of_overlay_mgr_mutex);
+ return 0;
+ }
+ memcpy(of_overlay_dt_apply, buf, size);
+ of_overlay_dt_apply[size - 1] = '\0';
+
+ if (of_overlay_mgr_apply_dt(dev, of_overlay_dt_apply)) {
+ kfree(of_overlay_dt_apply);
+ of_overlay_dt_apply = NULL;
+ size = 0;
+ }
+ mutex_unlock(&of_overlay_mgr_mutex);
+ return size;
+}
+
+static DEVICE_ATTR(current_overlay, 0644, current_overlay_show,
+ current_overlay_store);
+
+static int of_overlay_mgr_probe(struct platform_device *pdev)
+{
+ char *cur_entry;
+ char *next_entry;
+
+ if (device_create_file(&pdev->dev, &dev_attr_current_overlay))
+ pr_err("overlay_mgr: fail to register apply entry\n");
+
+ if (!of_overlay_dt_entry)
+ return 0;
+ next_entry = of_overlay_dt_entry;
+ do {
+ cur_entry = next_entry;
+ next_entry = strchr(cur_entry, ',');
+ if (next_entry)
+ *next_entry++ = '\0';
+ of_overlay_mgr_apply_dt(&pdev->dev, cur_entry);
+ } while (next_entry);
+ return 0;
+}
+
+static const struct of_device_id of_overlay_mgr_match[] = {
+ { .compatible = "linux,overlay_manager", },
+ {}
+};
+
+static struct platform_driver of_overlay_mgr_driver = {
+ .probe = of_overlay_mgr_probe,
+ .driver = {
+ .name = "overlay_manager",
+ .of_match_table = of_match_ptr(of_overlay_mgr_match),
+ },
+};
+
+static int __init of_overlay_mgr_init(void)
+{
+ return platform_driver_register(&of_overlay_mgr_driver);
+}
+
+subsys_initcall(of_overlay_mgr_init);
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index d7e7c0a827c3..c991fe505cba 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -301,4 +301,13 @@ config VMD
To compile this driver as a module, choose M here: the
module will be called vmd.
+config PCIE_KIRIN
+ depends on OF && ARM64
+ depends on ARCH_HISI
+ select PCIE_DW
+ bool "Hisilicon Kirin960 PCIe controller"
+ help
+ Say Y here if you want PCIe controller support on Hisilicon
+ Kirin960 SoC
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 084cb4983645..8799ee289d6d 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -33,3 +33,4 @@ obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
obj-$(CONFIG_VMD) += vmd.o
+obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
diff --git a/drivers/pci/host/pcie-kirin.c b/drivers/pci/host/pcie-kirin.c
new file mode 100644
index 000000000000..d55b63ab0fee
--- /dev/null
+++ b/drivers/pci/host/pcie-kirin.c
@@ -0,0 +1,442 @@
+/*
+ * PCIe host controller driver for Kirin Phone SoCs
+ *
+ * Copyright (C) 2015 Hilisicon Electronics Co., Ltd.
+ * http://www.huawei.com
+ *
+ * Author: Xiaowei Song <songxiaowei@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "pcie-kirin.h"
+
+struct kirin_pcie *g_kirin_pcie;
+
+static int kirin_pcie_link_up(struct pcie_port *pp);
+
+static inline void kirin_elb_writel(struct kirin_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->apb_base + reg);
+}
+
+static inline u32 kirin_elb_readl(struct kirin_pcie *pcie, u32 reg)
+{
+ return readl(pcie->apb_base + reg);
+}
+
+/*Registers in PCIePHY*/
+static inline void kirin_phy_writel(struct kirin_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->phy_base + reg);
+}
+
+static inline u32 kirin_phy_readl(struct kirin_pcie *pcie, u32 reg)
+{
+ return readl(pcie->phy_base + reg);
+}
+
+static int32_t kirin_pcie_get_clk(struct kirin_pcie *pcie,
+ struct platform_device *pdev)
+{
+ pcie->phy_ref_clk = devm_clk_get(&pdev->dev, "pcie_phy_ref");
+ if (IS_ERR(pcie->phy_ref_clk))
+ return PTR_ERR(pcie->phy_ref_clk);
+
+ pcie->pcie_aux_clk = devm_clk_get(&pdev->dev, "pcie_aux");
+ if (IS_ERR(pcie->pcie_aux_clk))
+ return PTR_ERR(pcie->pcie_aux_clk);
+
+ pcie->apb_phy_clk = devm_clk_get(&pdev->dev, "pcie_apb_phy");
+ if (IS_ERR(pcie->apb_phy_clk))
+ return PTR_ERR(pcie->apb_phy_clk);
+
+ pcie->apb_sys_clk = devm_clk_get(&pdev->dev, "pcie_apb_sys");
+ if (IS_ERR(pcie->apb_sys_clk))
+ return PTR_ERR(pcie->apb_sys_clk);
+
+ pcie->pcie_aclk = devm_clk_get(&pdev->dev, "pcie_aclk");
+ if (IS_ERR(pcie->pcie_aclk))
+ return PTR_ERR(pcie->pcie_aclk);
+
+ return 0;
+}
+
+static int32_t kirin_pcie_get_resource(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ struct resource *apb;
+ struct resource *phy;
+ struct resource *dbi;
+ struct kirin_pcie *pcie = to_kirin_pcie(pp);
+
+ apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
+ pcie->apb_base = devm_ioremap_resource(&pdev->dev, apb);
+ if (IS_ERR(pcie->apb_base))
+ return PTR_ERR(pcie->apb_base);
+
+ phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+ pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy);
+ if (IS_ERR(pcie->phy_base))
+ return PTR_ERR(pcie->phy_base);
+
+ dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi);
+ if (IS_ERR(pp->dbi_base))
+ return PTR_ERR(pp->dbi_base);
+
+ pcie->crgctrl =
+ syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
+ if (IS_ERR(pcie->crgctrl))
+ return PTR_ERR(pcie->crgctrl);
+
+ pcie->sysctrl =
+ syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
+ if (IS_ERR(pcie->sysctrl))
+ return PTR_ERR(pcie->sysctrl);
+
+ return 0;
+}
+
+static int kirin_pcie_phy_init(struct kirin_pcie *pcie)
+{
+ u32 reg_val;
+ u32 pipe_clk_stable = 0x1 << 19;
+ u32 time = 10;
+
+ reg_val = kirin_phy_readl(pcie, 0x4);
+ reg_val &= ~(0x1 << 8);
+ kirin_phy_writel(pcie, reg_val, 0x4);
+
+ reg_val = kirin_phy_readl(pcie, 0x0);
+ reg_val &= ~(0x1 << 22);
+ kirin_phy_writel(pcie, reg_val, 0x0);
+ udelay(10);
+
+ reg_val = kirin_phy_readl(pcie, 0x4);
+ reg_val &= ~(0x1 << 16);
+ kirin_phy_writel(pcie, reg_val, 0x4);
+
+ reg_val = kirin_phy_readl(pcie, 0x400);
+ while (reg_val & pipe_clk_stable) {
+ udelay(100);
+ if (time == 0) {
+ dev_err(pcie->pp.dev, "PIPE clk is not stable\n");
+ return -EINVAL;
+ }
+ time--;
+ reg_val = kirin_phy_readl(pcie, 0x400);
+ }
+
+ return 0;
+}
+
+static void kirin_pcie_oe_enable(struct kirin_pcie *pcie)
+{
+ u32 val;
+
+ regmap_read(pcie->sysctrl, 0x1a4, &val);
+ val |= 0xF0F400;
+ val &= ~(0x3 << 28);
+ regmap_write(pcie->sysctrl, 0x1a4, val);
+}
+
+static int kirin_pcie_clk_ctrl(struct kirin_pcie *pcie, bool enable)
+{
+ int ret = 0;
+
+ if (!enable)
+ goto close_clk;
+
+ ret = clk_set_rate(pcie->phy_ref_clk, REF_CLK_FREQ);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(pcie->phy_ref_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(pcie->apb_sys_clk);
+ if (ret)
+ goto apb_sys_fail;
+
+ ret = clk_prepare_enable(pcie->apb_phy_clk);
+ if (ret)
+ goto apb_phy_fail;
+
+ ret = clk_prepare_enable(pcie->pcie_aclk);
+ if (ret)
+ goto aclk_fail;
+
+ ret = clk_prepare_enable(pcie->pcie_aux_clk);
+ if (ret)
+ goto aux_clk_fail;
+
+ return 0;
+close_clk:
+ clk_disable_unprepare(pcie->pcie_aux_clk);
+aux_clk_fail:
+ clk_disable_unprepare(pcie->pcie_aclk);
+aclk_fail:
+ clk_disable_unprepare(pcie->apb_phy_clk);
+apb_phy_fail:
+ clk_disable_unprepare(pcie->apb_sys_clk);
+apb_sys_fail:
+ clk_disable_unprepare(pcie->phy_ref_clk);
+ return ret;
+}
+
+static int kirin_pcie_power_on(struct kirin_pcie *pcie)
+{
+ int ret;
+
+ /*Power supply for Host*/
+ regmap_write(pcie->sysctrl, 0x60, 0x10);
+ udelay(100);
+ kirin_pcie_oe_enable(pcie);
+
+ ret = kirin_pcie_clk_ctrl(pcie, true);
+ if (ret)
+ return ret;
+
+ /*deasset PCIeCtrl&PCIePHY*/
+ regmap_write(pcie->sysctrl, 0x44, 0x30);
+ regmap_write(pcie->crgctrl, 0x88, 0x8c000000);
+ regmap_write(pcie->sysctrl, 0x190, 0x184000);
+
+ ret = kirin_pcie_phy_init(pcie);
+ if (ret)
+ goto close_clk;
+
+ /*perst assert*/
+ mdelay(20);
+ if (!gpio_request(pcie->gpio_id_reset, "pcie_perst")) {
+ ret = gpio_direction_output(pcie->gpio_id_reset, 1);
+ if (ret)
+ goto close_clk;
+ mdelay(10);
+
+ return 0;
+ }
+
+close_clk:
+ kirin_pcie_clk_ctrl(pcie, false);
+ return -1;
+}
+
+static void kirin_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
+{
+ u32 val;
+ struct kirin_pcie *pcie = to_kirin_pcie(pp);
+
+ val = kirin_elb_readl(pcie, SOC_PCIECTRL_CTRL0_ADDR);
+ if (on)
+ val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+ kirin_elb_writel(pcie, val, SOC_PCIECTRL_CTRL0_ADDR);
+}
+
+static void kirin_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
+{
+ u32 val;
+ struct kirin_pcie *pcie = to_kirin_pcie(pp);
+
+ val = kirin_elb_readl(pcie, SOC_PCIECTRL_CTRL1_ADDR);
+ if (on)
+ val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+ kirin_elb_writel(pcie, val, SOC_PCIECTRL_CTRL1_ADDR);
+}
+
+static int kirin_pcie_rd_own_conf(struct pcie_port *pp,
+ int where, int size, u32 *val)
+{
+ kirin_pcie_sideband_dbi_r_mode(pp, true);
+
+ *val = readl(pp->dbi_base + (where & ~0x3));
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 3))) & 0xffff;
+ else if (size != 4)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ kirin_pcie_sideband_dbi_r_mode(pp, false);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int kirin_pcie_wr_own_conf(struct pcie_port *pp,
+ int where, int size, u32 val)
+{
+ int ret;
+
+ kirin_pcie_sideband_dbi_w_mode(pp, true);
+
+ if (size == 4)
+ writel(val, pp->dbi_base + (where & ~0x3));
+ else if (size == 2)
+ writew(val, pp->dbi_base + (where & ~0x3) + (where & 2));
+ else if (size == 1)
+ writeb(val, pp->dbi_base + (where & ~0x3) + (where & 3));
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ kirin_pcie_sideband_dbi_w_mode(pp, false);
+
+ return ret;
+}
+
+static u32 kirin_pcie_readl_rc(struct pcie_port *pp, u32 reg)
+{
+ u32 val;
+
+ kirin_pcie_sideband_dbi_r_mode(pp, true);
+ val = readl(pp->dbi_base + reg);
+ kirin_pcie_sideband_dbi_r_mode(pp, false);
+
+ return val;
+}
+
+static void kirin_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
+{
+ kirin_pcie_sideband_dbi_w_mode(pp, true);
+ writel(val, pp->dbi_base + reg);
+ kirin_pcie_sideband_dbi_w_mode(pp, false);
+}
+
+static int kirin_pcie_link_up(struct pcie_port *pp)
+{
+ struct kirin_pcie *pcie = to_kirin_pcie(pp);
+ u32 val = kirin_elb_readl(pcie, PCIE_ELBI_RDLH_LINKUP);
+
+ if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
+ return 1;
+
+ return 0;
+}
+
+static int kirin_pcie_establish_link(struct pcie_port *pp)
+{
+ int count = 0;
+
+ struct kirin_pcie *pcie = to_kirin_pcie(pp);
+
+ if (kirin_pcie_link_up(pp))
+ return 0;
+
+ dw_pcie_setup_rc(pp);
+
+ /* assert LTSSM enable */
+ kirin_elb_writel(pcie, PCIE_LTSSM_ENABLE_BIT,
+ PCIE_APP_LTSSM_ENABLE);
+
+ /* check if the link is up or not */
+ while (!kirin_pcie_link_up(pp)) {
+ mdelay(1);
+ count++;
+ if (count == 1000) {
+ dev_err(pp->dev, "Link Fail\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void kirin_pcie_host_init(struct pcie_port *pp)
+{
+ kirin_pcie_establish_link(pp);
+}
+
+static struct pcie_host_ops kirin_pcie_host_ops = {
+ .readl_rc = kirin_pcie_readl_rc,
+ .writel_rc = kirin_pcie_writel_rc,
+ .rd_own_conf = kirin_pcie_rd_own_conf,
+ .wr_own_conf = kirin_pcie_wr_own_conf,
+ .link_up = kirin_pcie_link_up,
+ .host_init = kirin_pcie_host_init,
+};
+
+static int __init kirin_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ pp->root_bus_nr = -1;
+ pp->ops = &kirin_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+
+ return ret;
+}
+
+static int kirin_pcie_probe(struct platform_device *pdev)
+{
+ struct kirin_pcie *pcie;
+ struct pcie_port *pp;
+ int ret;
+
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "NULL node\n");
+ return -EINVAL;
+ }
+
+ pcie = devm_kzalloc(&pdev->dev,
+ sizeof(struct kirin_pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pp = &pcie->pp;
+ g_kirin_pcie = pcie;
+ pp->dev = &pdev->dev;
+
+ ret = kirin_pcie_get_clk(pcie, pdev);
+ if (ret != 0)
+ return -ENODEV;
+
+ ret = kirin_pcie_get_resource(pp, pdev);
+ if (ret != 0)
+ return -ENODEV;
+
+ pcie->gpio_id_reset = of_get_named_gpio(pdev->dev.of_node,
+ "reset-gpio", 0);
+ if (pcie->gpio_id_reset < 0)
+ return -ENODEV;
+
+ ret = kirin_pcie_power_on(pcie);
+ if (ret)
+ return ret;
+
+ ret = kirin_add_pcie_port(pp, pdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pcie);
+
+ dev_dbg(&pdev->dev, "probe Done\n");
+ return 0;
+}
+
+static const struct of_device_id kirin_pcie_match[] = {
+ { .compatible = "hisilicon,kirin-pcie" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, kirin_pcie_match);
+
+struct platform_driver kirin_pcie_driver = {
+ .probe = kirin_pcie_probe,
+ .driver = {
+ .name = "Kirin-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = kirin_pcie_match,
+ },
+};
+
+module_platform_driver(kirin_pcie_driver);
diff --git a/drivers/pci/host/pcie-kirin.h b/drivers/pci/host/pcie-kirin.h
new file mode 100644
index 000000000000..c26102eba425
--- /dev/null
+++ b/drivers/pci/host/pcie-kirin.h
@@ -0,0 +1,71 @@
+/*
+ * PCIe host controller driver for Kirin 960 SoCs
+ *
+ * Copyright (C) 2015 Huawei Electronics Co., Ltd.
+ * http://www.huawei.com
+ *
+ * Author: Xiaowei Song <songxiaowei@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PCIE_KIRIN_H
+#define _PCIE_KIRIN_H
+
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <asm/compiler.h>
+#include <linux/compiler.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/pci_regs.h>
+
+#include "pcie-designware.h"
+
+#define to_kirin_pcie(x) container_of(x, struct kirin_pcie, pp)
+
+#define REF_CLK_FREQ 100000000
+
+/* PCIe ELBI registers */
+#define SOC_PCIECTRL_CTRL0_ADDR 0x000
+#define SOC_PCIECTRL_CTRL1_ADDR 0x004
+#define SOC_PCIEPHY_CTRL2_ADDR 0x008
+#define SOC_PCIEPHY_CTRL3_ADDR 0x00c
+#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
+
+#define PCIE_APP_LTSSM_ENABLE 0x01c
+#define PCIE_ELBI_RDLH_LINKUP 0x400
+#define PCIE_LINKUP_ENABLE (0x8020)
+#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11)
+
+struct kirin_pcie {
+ void __iomem *apb_base;
+ void __iomem *phy_base;
+ struct regmap *crgctrl;
+ struct regmap *sysctrl;
+ struct clk *apb_sys_clk;
+ struct clk *apb_phy_clk;
+ struct clk *phy_ref_clk;
+ struct clk *pcie_aclk;
+ struct clk *pcie_aux_clk;
+ int gpio_id_reset;
+ struct pcie_port pp;
+};
+
+#endif
+
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index c11db8bceea1..6bfb6fb8637a 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -8,3 +8,5 @@ endif
source "drivers/platform/goldfish/Kconfig"
source "drivers/platform/chrome/Kconfig"
+
+source "drivers/platform/hisi/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index ca2692510733..57bc1d19ab52 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_MIPS) += mips/
obj-$(CONFIG_OLPC) += olpc/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
+obj-$(CONFIG_ARCH_HISI) += hisi/
diff --git a/drivers/platform/hisi/Kconfig b/drivers/platform/hisi/Kconfig
new file mode 100644
index 000000000000..776cfe0cc2af
--- /dev/null
+++ b/drivers/platform/hisi/Kconfig
@@ -0,0 +1,6 @@
+config HISI_FIQ_DEBUGGER
+ bool "Enable the FIQ serial debugger on Hisi"
+ default n
+ select FIQ_DEBUGGER
+ help
+ Enables the FIQ serial debugger on Hisi
diff --git a/drivers/platform/hisi/Makefile b/drivers/platform/hisi/Makefile
new file mode 100644
index 000000000000..483c227b18bc
--- /dev/null
+++ b/drivers/platform/hisi/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_HISI_FIQ_DEBUGGER) += hi6220_fiq_debugger.o
+
diff --git a/drivers/platform/hisi/hi6220_fiq_debugger.c b/drivers/platform/hisi/hi6220_fiq_debugger.c
new file mode 100644
index 000000000000..7953d10fd1d6
--- /dev/null
+++ b/drivers/platform/hisi/hi6220_fiq_debugger.c
@@ -0,0 +1,312 @@
+/*
+ * platform/hisilicon/hi6220_fiq_debugger.c
+ *
+ * Serial Debugger Interface for Hi6220
+ *
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <linux/uaccess.h>
+#include <linux/amba/serial.h>
+#include <linux/trusty/trusty.h>
+#include <linux/trusty/smcall.h>
+
+#include "../../staging/android/fiq_debugger/fiq_debugger.h"
+
+struct hi6220_fiq_debugger {
+ struct fiq_debugger_pdata pdata;
+ void __iomem *debug_port_base;
+ struct device *trusty_dev;
+ int lfiq;
+ int tfiq;
+};
+
+static inline void uart_write(struct hi6220_fiq_debugger *t,
+ unsigned int val, unsigned int off)
+{
+ __raw_writew(val, t->debug_port_base + off);
+}
+
+static inline unsigned int uart_read(struct hi6220_fiq_debugger *t,
+ unsigned int off)
+{
+ return __raw_readw(t->debug_port_base + off);
+}
+
+static int debug_port_init(struct platform_device *pdev)
+{
+ struct hi6220_fiq_debugger *t =
+ container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata);
+
+ uart_write(t, UART011_OEIS | UART011_BEIS | UART011_PEIS |
+ UART011_FEIS | UART011_RTIS | UART011_RXIS, UART011_ICR);
+ uart_write(t, UART011_RTIM | UART011_RXIM, UART011_IMSC);
+
+ /* uart_write(t, 0x1b, UART011_FBRD); */
+
+ return 0;
+}
+
+static int debug_getc(struct platform_device *pdev)
+{
+ int ch;
+ struct hi6220_fiq_debugger *t =
+ container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata);
+
+ if (uart_read(t, UART01x_FR) & UART01x_FR_RXFE)
+ return FIQ_DEBUGGER_NO_CHAR;
+ ch = uart_read(t, UART01x_DR);
+ if (ch & UART011_DR_BE)
+ return FIQ_DEBUGGER_BREAK;
+ return ch & 0x00FF;
+}
+
+static void debug_putc(struct platform_device *pdev, unsigned int c)
+{
+ struct hi6220_fiq_debugger *t =
+ container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata);
+
+ while (uart_read(t, UART01x_FR) & UART01x_FR_TXFF)
+ cpu_relax();
+ uart_write(t, c, UART01x_DR);
+ while (uart_read(t, UART01x_FR) & UART01x_FR_BUSY)
+ cpu_relax();
+}
+
+static void debug_flush(struct platform_device *pdev)
+{
+ struct hi6220_fiq_debugger *t =
+ container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata);
+
+ while (uart_read(t, UART01x_FR) & UART01x_FR_BUSY)
+ cpu_relax();
+}
+
+static int debug_suspend(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int debug_resume(struct platform_device *pdev)
+{
+ return debug_port_init(pdev);
+}
+
+static void trusty_fiq_enable(struct platform_device *pdev,
+ unsigned int fiq, bool enable)
+{
+ int ret;
+ struct hi6220_fiq_debugger *t;
+
+ t = container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata);
+
+ if (fiq != t->lfiq) {
+ dev_err(&pdev->dev, "unexpected virtual fiq, %d != %d\n",
+ fiq, t->lfiq);
+ return;
+ }
+
+ ret = trusty_fast_call32(t->trusty_dev, SMC_FC_REQUEST_FIQ,
+ t->tfiq, enable, 0);
+ if (ret)
+ dev_err(&pdev->dev, "SMC_FC_REQUEST_FIQ failed: %d\n", ret);
+}
+
+static int hi6220_fiq_debugger_id;
+
+static int __hi6220_serial_debug_init(unsigned int base,
+ int lfiq, int tfiq, int irq,
+ struct clk *clk,
+ int signal_irq, int wakeup_irq,
+ struct device *trusty_dev)
+{
+ struct hi6220_fiq_debugger *t;
+ struct platform_device *pdev;
+ struct resource *res;
+ int res_count = 1;
+ int ret = -ENOMEM;
+
+ t = kzalloc(sizeof(struct hi6220_fiq_debugger), GFP_KERNEL);
+ if (!t) {
+ pr_err("Failed to allocate for fiq debugger\n");
+ return ret;
+ }
+
+ t->pdata.uart_init = debug_port_init;
+ t->pdata.uart_getc = debug_getc;
+ t->pdata.uart_putc = debug_putc;
+ t->pdata.uart_flush = debug_flush;
+ t->pdata.uart_dev_suspend = debug_suspend;
+ t->pdata.uart_dev_resume = debug_resume;
+
+ if (trusty_dev)
+ t->pdata.fiq_enable = trusty_fiq_enable;
+ t->trusty_dev = trusty_dev;
+
+ t->debug_port_base = ioremap(base, PAGE_SIZE);
+ if (!t->debug_port_base) {
+ pr_err("Failed to ioremap for fiq debugger\n");
+ goto out1;
+ }
+
+ res = kzalloc(sizeof(struct resource) * 3, GFP_KERNEL);
+ if (!res) {
+ pr_err("Failed to alloc fiq debugger resources\n");
+ goto out2;
+ }
+
+ pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
+ if (!pdev) {
+ pr_err("Failed to alloc fiq debugger platform device\n");
+ goto out3;
+ };
+
+ if (lfiq >= 0) {
+ res[0].flags = IORESOURCE_IRQ;
+ res[0].start = lfiq;
+ res[0].end = lfiq;
+ res[0].name = "fiq";
+ t->lfiq = lfiq;
+ t->tfiq = tfiq;
+ } else {
+ res[0].flags = IORESOURCE_IRQ;
+ res[0].start = irq;
+ res[0].end = irq;
+ res[0].name = "uart_irq";
+ }
+
+ if (signal_irq >= 0) {
+ res[1].flags = IORESOURCE_IRQ;
+ res[1].start = signal_irq;
+ res[1].end = signal_irq;
+ res[1].name = "signal";
+ res_count++;
+ }
+
+ if (wakeup_irq >= 0) {
+ res[2].flags = IORESOURCE_IRQ;
+ res[2].start = wakeup_irq;
+ res[2].end = wakeup_irq;
+ res[2].name = "wakeup";
+ res_count++;
+ }
+
+ pdev->name = "fiq_debugger";
+ pdev->id = hi6220_fiq_debugger_id++;
+ pdev->dev.platform_data = &t->pdata;
+ pdev->resource = res;
+ pdev->num_resources = res_count;
+
+ ret = platform_device_register(pdev);
+ if (ret) {
+ pr_err("Failed to register fiq debugger\n");
+ goto out4;
+ }
+
+ return 0;
+
+out4:
+ kfree(pdev);
+out3:
+ kfree(res);
+out2:
+ iounmap(t->debug_port_base);
+out1:
+ kfree(t);
+ return ret;
+}
+
+#define HI6220_SERIAL_DEBUG_MODE_IRQ ((void *)0)
+#define HI6220_SERIAL_DEBUG_MODE_FIQ ((void *)1)
+
+static const struct of_device_id hi6220_serial_debug_match[] = {
+ {
+ .compatible = "android,irq-hi6220-uart",
+ .data = HI6220_SERIAL_DEBUG_MODE_IRQ,
+ },
+ {
+ .compatible = "android,fiq-hi6220-uart",
+ .data = HI6220_SERIAL_DEBUG_MODE_FIQ,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, hi6220_serial_debug_match);
+
+static int hi6220_serial_debug_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int fiq;
+ int lfiq = -1;
+ int tfiq = -1;
+ int irq = -1;
+ int signal_irq;
+ int wakeup_irq;
+ const struct of_device_id *of_id;
+ struct device *trusty_dev = NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No mem resource\n");
+ return -EINVAL;
+ }
+
+ fiq = platform_get_irq_byname(pdev, "fiq");
+ if (fiq < 0) {
+ dev_err(&pdev->dev, "No IRQ for fiq, error=%d\n", fiq);
+ return fiq;
+ }
+
+ signal_irq = platform_get_irq_byname(pdev, "signal");
+ if (signal_irq < 0) {
+ dev_err(&pdev->dev, "No signal IRQ, error=%d\n", signal_irq);
+ signal_irq = -1;
+ }
+
+ wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
+ if (wakeup_irq < 0) {
+ dev_err(&pdev->dev, "No wakeup IRQ, error=%d\n", wakeup_irq);
+ wakeup_irq = -1;
+ }
+
+ of_id = of_match_node(hi6220_serial_debug_match, pdev->dev.of_node);
+ if (of_id->data == HI6220_SERIAL_DEBUG_MODE_FIQ) {
+ trusty_dev = pdev->dev.parent->parent;
+ lfiq = fiq;
+ tfiq = irqd_to_hwirq(irq_get_irq_data(fiq));
+ } else {
+ irq = fiq;
+ }
+
+ return __hi6220_serial_debug_init(res->start, lfiq, tfiq, irq, NULL,
+ signal_irq, wakeup_irq, trusty_dev);
+}
+
+static struct platform_driver hi6220_serial_debug_driver = {
+ .probe = hi6220_serial_debug_probe,
+ .driver = {
+ .name = "hi6220-serial-debug",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(hi6220_serial_debug_match),
+ },
+};
+
+module_platform_driver(hi6220_serial_debug_driver);
diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c
index f69387e12c1e..17e5dd17840f 100644
--- a/drivers/power/reset/hisi-reboot.c
+++ b/drivers/power/reset/hisi-reboot.c
@@ -11,23 +11,61 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mfd/syscon.h>
#include <linux/notifier.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <asm/system_misc.h>
#include <asm/proc-fns.h>
static void __iomem *base;
static u32 reboot_offset;
+static struct regmap *pmu_regmap;
+static struct regmap *sctrl_regmap;
+
+#define REBOOT_REASON_BOOTLOADER (0x01)
+#define REBOOT_REASON_COLDBOOT (0x00)
+#define DDR_BYPASS BIT(31)
+
+#define RST_FLAG_MASK GENMASK(7, 0)
+
+#define PMU_HRST_OFFSET ((0x101) << 2)
+#define SCPEREN1_OFFSET (0x170)
static int hisi_restart_handler(struct notifier_block *this,
unsigned long mode, void *cmd)
{
- writel_relaxed(0xdeadbeef, base + reboot_offset);
+ int ret;
+ char reboot_reason;
+
+ if (!cmd || !strcmp(cmd, "bootloader"))
+ reboot_reason = REBOOT_REASON_BOOTLOADER;
+ else
+ reboot_reason = REBOOT_REASON_COLDBOOT;
+
+ if (base) {
+ writel_relaxed(0xdeadbeef, base + reboot_offset);
+ } else {
+ ret = regmap_update_bits(pmu_regmap, PMU_HRST_OFFSET,
+ RST_FLAG_MASK, reboot_reason);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(sctrl_regmap, SCPEREN1_OFFSET, DDR_BYPASS);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(sctrl_regmap, reboot_offset, 0xdeadbeef);
+ if (ret)
+ return ret;
+ }
while (1)
cpu_do_idle();
@@ -47,8 +85,17 @@ static int hisi_reboot_probe(struct platform_device *pdev)
base = of_iomap(np, 0);
if (!base) {
- WARN(1, "failed to map base address");
- return -ENODEV;
+ pmu_regmap = syscon_regmap_lookup_by_phandle(np, "pmu-regmap");
+ if (!pmu_regmap) {
+ WARN(1, "failed to regmap pmu address");
+ return -ENODEV;
+ }
+
+ sctrl_regmap = syscon_regmap_lookup_by_phandle(np, "sctrl-regmap");
+ if (!sctrl_regmap) {
+ WARN(1, "failed to regmap sctrl address");
+ return -ENODEV;
+ }
}
if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) {
@@ -64,11 +111,13 @@ static int hisi_reboot_probe(struct platform_device *pdev)
iounmap(base);
}
+ arm_pm_restart = NULL;
return err;
}
static const struct of_device_id hisi_reboot_of_match[] = {
{ .compatible = "hisilicon,sysctrl" },
+ { .compatible = "hisilicon,hi3660-reboot" },
{}
};
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 936f7ccc9736..60fc57c1960f 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -270,6 +270,16 @@ config REGULATOR_HI6421
21 general purpose LDOs, 3 dedicated LDOs, and 5 BUCKs. All
of them come with support to either ECO (idle) or sleep mode.
+config REGULATOR_HI6421V530
+ tristate "HiSilicon Hi6421v530 PMIC voltage regulator support"
+ depends on MFD_HI6421_PMIC && OF
+ help
+ This driver provides support for the voltage regulators on
+ HiSilicon Hi6421v530 PMU / Codec IC.
+ Hi6421v530 is a multi-function device which, on regulator part,
+ provides 5 general purpose LDOs, and all of them come with support
+ to either ECO (idle) or sleep mode.
+
config REGULATOR_HI655X
tristate "Hisilicon HI655X PMIC regulators support"
depends on ARCH_HISI || COMPILE_TEST
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 2142a5d3fc08..384c5ce5a1f2 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_REGULATOR_FAN53555) += fan53555.o
obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
obj-$(CONFIG_REGULATOR_HI6421) += hi6421-regulator.o
+obj-$(CONFIG_REGULATOR_HI6421V530) += hi6421v530-regulator.o
obj-$(CONFIG_REGULATOR_HI655X) += hi655x-regulator.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
obj-$(CONFIG_REGULATOR_ISL9305) += isl9305.o
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
index 62c5f5445d44..259c3a865ac6 100644
--- a/drivers/regulator/hi6421-regulator.c
+++ b/drivers/regulator/hi6421-regulator.c
@@ -621,7 +621,14 @@ static int hi6421_regulator_probe(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id hi6421_regulator_table[] = {
+ { .name = "hi6421-regulator" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, hi6421_regulator_table);
+
static struct platform_driver hi6421_regulator_driver = {
+ .id_table = hi6421_regulator_table,
.driver = {
.name = "hi6421-regulator",
},
diff --git a/drivers/regulator/hi6421v530-regulator.c b/drivers/regulator/hi6421v530-regulator.c
new file mode 100644
index 000000000000..c09bc71538a5
--- /dev/null
+++ b/drivers/regulator/hi6421v530-regulator.c
@@ -0,0 +1,214 @@
+/*
+ * Device driver for regulators in Hi6421V530 IC
+ *
+ * Copyright (c) <2017> HiSilicon Technologies Co., Ltd.
+ * http://www.hisilicon.com
+ * Copyright (c) <2017> Linaro Ltd.
+ * http://www.linaro.org
+ *
+ * Author: Wang Xiaoyin <hw.wangxiaoyin@hisilicon.com>
+ * Guodong Xu <guodong.xu@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/hi6421-pmic.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+/*
+ * struct hi6421v530_regulator_info - hi6421v530 regulator information
+ * @desc: regulator description
+ * @mode_mask: ECO mode bitmask of LDOs; for BUCKs, this masks sleep
+ * @eco_microamp: eco mode load upper limit (in uA), valid for LDOs only
+ */
+struct hi6421v530_regulator_info {
+ struct regulator_desc rdesc;
+ u8 mode_mask;
+ u32 eco_microamp;
+};
+
+/* HI6421v530 regulators */
+enum hi6421v530_regulator_id {
+ HI6421V530_LDO3,
+ HI6421V530_LDO9,
+ HI6421V530_LDO11,
+ HI6421V530_LDO15,
+ HI6421V530_LDO16,
+};
+
+static const unsigned int ldo_3_voltages[] = {
+ 1800000, 1825000, 1850000, 1875000,
+ 1900000, 1925000, 1950000, 1975000,
+ 2000000, 2025000, 2050000, 2075000,
+ 2100000, 2125000, 2150000, 2200000,
+};
+
+static const unsigned int ldo_9_11_voltages[] = {
+ 1750000, 1800000, 1825000, 2800000,
+ 2850000, 2950000, 3000000, 3300000,
+};
+
+static const unsigned int ldo_15_16_voltages[] = {
+ 1750000, 1800000, 2400000, 2600000,
+ 2700000, 2850000, 2950000, 3000000,
+};
+
+static const struct regulator_ops hi6421v530_ldo_ops;
+
+#define HI6421V530_LDO_ENABLE_TIME (350)
+
+/*
+ * _id - LDO id name string
+ * v_table - voltage table
+ * vreg - voltage select register
+ * vmask - voltage select mask
+ * ereg - enable register
+ * emask - enable mask
+ * odelay - off/on delay time in uS
+ * ecomask - eco mode mask
+ * ecoamp - eco mode load uppler limit in uA
+ */
+#define HI6421V530_LDO(_ID, v_table, vreg, vmask, ereg, emask, \
+ odelay, ecomask, ecoamp) { \
+ .rdesc = { \
+ .name = #_ID, \
+ .of_match = of_match_ptr(#_ID), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &hi6421v530_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = HI6421V530_##_ID, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(v_table), \
+ .volt_table = v_table, \
+ .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \
+ .vsel_mask = vmask, \
+ .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \
+ .enable_mask = emask, \
+ .enable_time = HI6421V530_LDO_ENABLE_TIME, \
+ .off_on_delay = odelay, \
+ }, \
+ .mode_mask = ecomask, \
+ .eco_microamp = ecoamp, \
+}
+
+/* HI6421V530 regulator information */
+
+static struct hi6421v530_regulator_info hi6421v530_regulator_info[] = {
+ HI6421V530_LDO(LDO3, ldo_3_voltages, 0x061, 0xf, 0x060, 0x2,
+ 20000, 0x6, 8000),
+ HI6421V530_LDO(LDO9, ldo_9_11_voltages, 0x06b, 0x7, 0x06a, 0x2,
+ 40000, 0x6, 8000),
+ HI6421V530_LDO(LDO11, ldo_9_11_voltages, 0x06f, 0x7, 0x06e, 0x2,
+ 40000, 0x6, 8000),
+ HI6421V530_LDO(LDO15, ldo_15_16_voltages, 0x077, 0x7, 0x076, 0x2,
+ 40000, 0x6, 8000),
+ HI6421V530_LDO(LDO16, ldo_15_16_voltages, 0x079, 0x7, 0x078, 0x2,
+ 40000, 0x6, 8000),
+};
+
+static unsigned int hi6421v530_regulator_ldo_get_mode(
+ struct regulator_dev *rdev)
+{
+ struct hi6421v530_regulator_info *info;
+ unsigned int reg_val;
+
+ info = rdev_get_drvdata(rdev);
+ regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
+
+ if (reg_val & (info->mode_mask))
+ return REGULATOR_MODE_IDLE;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int hi6421v530_regulator_ldo_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct hi6421v530_regulator_info *info;
+ unsigned int new_mode;
+
+ info = rdev_get_drvdata(rdev);
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ new_mode = 0;
+ break;
+ case REGULATOR_MODE_IDLE:
+ new_mode = info->mode_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ info->mode_mask, new_mode);
+
+ return 0;
+}
+
+
+static const struct regulator_ops hi6421v530_ldo_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_mode = hi6421v530_regulator_ldo_get_mode,
+ .set_mode = hi6421v530_regulator_ldo_set_mode,
+};
+
+static int hi6421v530_regulator_probe(struct platform_device *pdev)
+{
+ struct hi6421_pmic *pmic;
+ struct regulator_dev *rdev;
+ struct regulator_config config = { };
+ unsigned int i;
+
+ pmic = dev_get_drvdata(pdev->dev.parent);
+ if (!pmic) {
+ dev_err(&pdev->dev, "no pmic in the regulator parent node\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hi6421v530_regulator_info); i++) {
+ config.dev = pdev->dev.parent;
+ config.regmap = pmic->regmap;
+ config.driver_data = &hi6421v530_regulator_info[i];
+
+ rdev = devm_regulator_register(&pdev->dev,
+ &hi6421v530_regulator_info[i].rdesc,
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ hi6421v530_regulator_info[i].rdesc.name);
+ return PTR_ERR(rdev);
+ }
+ }
+ return 0;
+}
+
+static const struct platform_device_id hi6421v530_regulator_table[] = {
+ { .name = "hi6421v530-regulator" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, hi6421v530_regulator_table);
+
+static struct platform_driver hi6421v530_regulator_driver = {
+ .id_table = hi6421v530_regulator_table,
+ .driver = {
+ .name = "hi6421v530-regulator",
+ },
+ .probe = hi6421v530_regulator_probe,
+};
+module_platform_driver(hi6421v530_regulator_driver);
+
+MODULE_AUTHOR("Wang Xiaoyin <hw.wangxiaoyin@hisilicon.com>");
+MODULE_DESCRIPTION("Hi6421v530 regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/hisilicon/Kconfig b/drivers/reset/hisilicon/Kconfig
index 1ff8b0c80980..10134dc03fe0 100644
--- a/drivers/reset/hisilicon/Kconfig
+++ b/drivers/reset/hisilicon/Kconfig
@@ -1,3 +1,10 @@
+config COMMON_RESET_HI3660
+ tristate "Hi3660 Reset Driver"
+ depends on ARCH_HISI || COMPILE_TEST
+ default ARCH_HISI
+ help
+ Build the Hisilicon Hi3660 reset driver.
+
config COMMON_RESET_HI6220
tristate "Hi6220 Reset Driver"
depends on ARCH_HISI || COMPILE_TEST
diff --git a/drivers/reset/hisilicon/Makefile b/drivers/reset/hisilicon/Makefile
index c932f86e2f10..ab8a7bfcbd8d 100644
--- a/drivers/reset/hisilicon/Makefile
+++ b/drivers/reset/hisilicon/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_COMMON_RESET_HI6220) += hi6220_reset.o
+obj-$(CONFIG_COMMON_RESET_HI3660) += reset-hi3660.o
diff --git a/drivers/reset/hisilicon/reset-hi3660.c b/drivers/reset/hisilicon/reset-hi3660.c
new file mode 100644
index 000000000000..17d8bb128e6e
--- /dev/null
+++ b/drivers/reset/hisilicon/reset-hi3660.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2016-2017 Linaro Ltd.
+ * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+struct hi3660_reset_controller {
+ struct reset_controller_dev rst;
+ struct regmap *map;
+};
+
+#define to_hi3660_reset_controller(_rst) \
+ container_of(_rst, struct hi3660_reset_controller, rst)
+
+static int hi3660_reset_program_hw(struct reset_controller_dev *rcdev,
+ unsigned long idx, bool assert)
+{
+ struct hi3660_reset_controller *rc = to_hi3660_reset_controller(rcdev);
+ unsigned int offset = idx >> 8;
+ unsigned int mask = BIT(idx & 0x1f);
+
+ if (assert)
+ return regmap_write(rc->map, offset, mask);
+ else
+ return regmap_write(rc->map, offset + 4, mask);
+}
+
+static int hi3660_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ return hi3660_reset_program_hw(rcdev, idx, true);
+}
+
+static int hi3660_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ return hi3660_reset_program_hw(rcdev, idx, false);
+}
+
+static int hi3660_reset_dev(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ int err;
+
+ err = hi3660_reset_assert(rcdev, idx);
+ if (err)
+ return err;
+
+ return hi3660_reset_deassert(rcdev, idx);
+}
+
+static struct reset_control_ops hi3660_reset_ops = {
+ .reset = hi3660_reset_dev,
+ .assert = hi3660_reset_assert,
+ .deassert = hi3660_reset_deassert,
+};
+
+static int hi3660_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ unsigned int offset, bit;
+
+ offset = reset_spec->args[0];
+ bit = reset_spec->args[1];
+
+ return (offset << 8) | bit;
+}
+
+static int hi3660_reset_probe(struct platform_device *pdev)
+{
+ struct hi3660_reset_controller *rc;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+
+ rc = devm_kzalloc(dev, sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ return -ENOMEM;
+
+ rc->map = syscon_regmap_lookup_by_phandle(np, "hisi,rst-syscon");
+ if (IS_ERR(rc->map)) {
+ dev_err(dev, "failed to get hi3660,rst-syscon\n");
+ return PTR_ERR(rc->map);
+ }
+
+ rc->rst.ops = &hi3660_reset_ops,
+ rc->rst.of_node = np;
+ rc->rst.of_reset_n_cells = 2;
+ rc->rst.of_xlate = hi3660_reset_xlate;
+
+ return reset_controller_register(&rc->rst);
+}
+
+static const struct of_device_id hi3660_reset_match[] = {
+ { .compatible = "hisilicon,hi3660-reset", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hi3660_reset_match);
+
+static struct platform_driver hi3660_reset_driver = {
+ .probe = hi3660_reset_probe,
+ .driver = {
+ .name = "hi3660-reset",
+ .of_match_table = hi3660_reset_match,
+ },
+};
+
+static int __init hi3660_reset_init(void)
+{
+ return platform_driver_register(&hi3660_reset_driver);
+}
+arch_initcall(hi3660_reset_init);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:hi3660-reset");
+MODULE_DESCRIPTION("HiSilicon Hi3660 Reset Driver");
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index e27b4d4e6ae2..119604ea0aae 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -80,6 +80,14 @@ config SCSI_UFSHCD_PLATFORM
If unsure, say N.
+config SCSI_UFS_HI3660
+ tristate "Hisilicon Hi3660 UFS controller platform driver"
+ depends on (ARCH_HISI || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
+ help
+ This selects the Hisilicon HI3660 additions to UFSHCD platform driver.
+
+ If unsure, say N.
+
config SCSI_UFS_DWC_TC_PLATFORM
tristate "DesignWare platform support using a G210 Test Chip"
depends on SCSI_UFSHCD_PLATFORM
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 6e77cb0bfee9..ae880189f018 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
+obj-$(CONFIG_SCSI_UFS_HI3660) += ufs-hi3660.o
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
diff --git a/drivers/scsi/ufs/ufs-hi3660.c b/drivers/scsi/ufs/ufs-hi3660.c
new file mode 100644
index 000000000000..d261dbce10d8
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-hi3660.c
@@ -0,0 +1,715 @@
+/*
+ * Copyright (c) 2016-2017 Linaro Ltd.
+ * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/gpio.h>
+#include <linux/time.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "ufshcd.h"
+#include "ufshcd-pltfrm.h"
+#include "unipro.h"
+#include "ufs-hi3660.h"
+#include "ufshci.h"
+
+static int ufs_hi3660_check_hibern8(struct ufs_hba *hba)
+{
+ int err;
+ u32 tx_fsm_val_0;
+ u32 tx_fsm_val_1;
+ unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
+
+ do {
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
+ &tx_fsm_val_0);
+ err |= ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1),
+ &tx_fsm_val_1);
+ if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 && tx_fsm_val_1 == TX_FSM_HIBERN8))
+ break;
+
+ /* sleep for max. 200us */
+ usleep_range(100, 200);
+ } while (time_before(jiffies, timeout));
+
+ /*
+ * we might have scheduled out for long during polling so
+ * check the state again.
+ */
+ if (time_after(jiffies, timeout)) {
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
+ &tx_fsm_val_0);
+ err |= ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1),
+ &tx_fsm_val_1);
+ }
+
+ if (err) {
+ dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
+ __func__, err);
+ } else if (tx_fsm_val_0 != TX_FSM_HIBERN8 || tx_fsm_val_1 != TX_FSM_HIBERN8) {
+ err = -1;
+ dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n",
+ __func__, tx_fsm_val_0, tx_fsm_val_1);
+ }
+
+ return err;
+}
+
+static void ufs_hi3660_clk_init(struct ufs_hba *hba)
+{
+ struct ufs_hi3660_host *host = ufshcd_get_variant(hba);
+
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+ if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN)
+ mdelay(1);
+ /* use abb clk */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL);
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN);
+ /* open mphy ref clk */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+}
+
+static void ufs_hi3660_soc_init(struct ufs_hba *hba)
+{
+ struct ufs_hi3660_host *host = ufshcd_get_variant(hba);
+ u32 reg;
+
+ if (!IS_ERR(host->rst))
+ reset_control_assert(host->rst);
+
+ /* HC_PSW powerup */
+ ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL);
+ udelay(10);
+ /* notify PWR ready */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL);
+ ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0,
+ UFS_DEVICE_RESET_CTRL);
+
+ if (gpio_is_valid(host->reset_gpio))
+ gpio_direction_output(host->reset_gpio, 0);
+
+ reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL);
+ reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK;
+ /* set cfg clk freq */
+ ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL);
+ /* set ref clk freq */
+ ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL);
+ /* bypass ufs clk gate */
+ ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS, CLOCK_GATE_BYPASS);
+ ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
+
+ /* open psw clk */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL);
+ /* disable ufshc iso */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL);
+ /* disable phy iso */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN);
+ /* notice iso disable */
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL);
+ if (!IS_ERR(host->assert))
+ reset_control_deassert(host->assert);
+ /* disable lp_reset_n */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN);
+ mdelay(1);
+
+ if (gpio_is_valid(host->reset_gpio))
+ gpio_direction_output(host->reset_gpio, 1);
+
+ ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET,
+ UFS_DEVICE_RESET_CTRL);
+
+ mdelay(20);
+
+ /*
+ * enable the fix of linereset recovery,
+ * and enable rx_reset/tx_rest beat
+ * enable ref_clk_en override(bit5) &
+ * override value = 1(bit4), with mask
+ */
+ ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL);
+
+ if (!IS_ERR(host->rst))
+ reset_control_deassert(host->rst);
+}
+
+static int ufs_hi3660_link_startup_pre_change(struct ufs_hba *hba)
+{
+ int err;
+ uint32_t value;
+ uint32_t reg;
+ struct ufs_hi3660_host *host = ufshcd_get_variant(hba);
+
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1);
+ if (host->caps & USE_RATE_B) {
+ /* PA_HSSeries */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2);
+ /* MPHY CBRATESEL */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1);
+ /* MPHY CBOVRCTRL2 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
+ /* MPHY CBOVRCTRL3 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
+ /* Unipro VS_MphyCfgUpdt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ /* MPHY RXOVRCTRL4 rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58);
+ /* MPHY RXOVRCTRL4 rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58);
+ /* MPHY RXOVRCTRL5 rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB);
+ /* MPHY RXOVRCTRL5 rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB);
+ /* MPHY RXSQCONTROL rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
+ /* MPHY RXSQCONTROL rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
+ /* Unipro VS_MphyCfgUpdt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ } else {
+ /* PA_HSSeries */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x1);
+ /* MPHY CBRATESEL */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x0);
+ /* MPHY CBOVRCTRL2 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x4C);
+ /* MPHY CBOVRCTRL3 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
+ /* Unipro VS_MphyCfgUpdt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ /* MPHY RXOVRCTRL4 rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x18);
+ /* MPHY RXOVRCTRL4 rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x18);
+ /* MPHY RXOVRCTRL5 rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xD);
+ /* MPHY RXOVRCTRL5 rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xD);
+ /* MPHY RXSQCONTROL rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
+ /* MPHY RXSQCONTROL rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
+ /* Unipro VS_MphyCfgUpdt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ }
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ /* Gear3 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4A);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4A);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4A);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4A);
+ /* Tactive RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
+ /* Thibernate Tx */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value);
+ if (value != 0x1)
+ dev_info(hba->dev,
+ "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value);
+
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0);
+ err = ufs_hi3660_check_hibern8(hba);
+ if (err)
+ dev_err(hba->dev, "ufs_hi3660_check_hibern8 error\n");
+
+ ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
+
+ /* disable auto H8 */
+ reg = ufshcd_readl(hba, REG_CONTROLLER_AHIT);
+ reg = reg & (~UFS_AHIT_AH8ITV_MASK);
+ ufshcd_writel(hba, reg, REG_CONTROLLER_AHIT);
+
+ /* Unipro PA_Local_TX_LCC_Enable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x155E, 0x0), 0x0);
+ /* close Unipro VS_Mk2ExtnSupport */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
+ if (0 != value) {
+ /* Ensure close success */
+ dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n");
+ }
+
+ return err;
+}
+
+static int ufs_hi3660_link_startup_post_change(struct ufs_hba *hba)
+{
+ struct ufs_hi3660_host *host = ufshcd_get_variant(hba);
+
+ /* Unipro DL_AFC0CreditThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0);
+ /* Unipro DL_TC0OutAckThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0);
+ /* Unipro DL_TC0TXFCThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9);
+
+ if (host->caps & BROKEN_CLK_GATE_BYPASS) {
+ /* not bypass ufs clk gate */
+ ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS, CLOCK_GATE_BYPASS);
+ ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
+ }
+
+ if (host->caps & USE_AUTO_H8) {
+ /* disable power-gating in auto hibernate 8 */
+ ufshcd_rmwl(hba, LP_AH8_PGE, 0, UFS_REG_OCPTHRTL);
+
+ /* enable auto H8 */
+ ufshcd_writel(hba, UFS_AHIT_AUTOH8_TIMER, REG_CONTROLLER_AHIT);
+ }
+
+ return 0;
+}
+
+static int ufs_hi3660_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ err = ufs_hi3660_link_startup_pre_change(hba);
+ break;
+ case POST_CHANGE:
+ err = ufs_hi3660_link_startup_post_change(hba);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+struct ufs_hi3660_dev_params {
+ u32 pwm_rx_gear; /* pwm rx gear to work in */
+ u32 pwm_tx_gear; /* pwm tx gear to work in */
+ u32 hs_rx_gear; /* hs rx gear to work in */
+ u32 hs_tx_gear; /* hs tx gear to work in */
+ u32 rx_lanes; /* number of rx lanes */
+ u32 tx_lanes; /* number of tx lanes */
+ u32 rx_pwr_pwm; /* rx pwm working pwr */
+ u32 tx_pwr_pwm; /* tx pwm working pwr */
+ u32 rx_pwr_hs; /* rx hs working pwr */
+ u32 tx_pwr_hs; /* tx hs working pwr */
+ u32 hs_rate; /* rate A/B to work in HS */
+ u32 desired_working_mode;
+};
+
+static int ufs_hi3660_get_pwr_dev_param(struct ufs_hi3660_dev_params *hi3660_param,
+ struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr)
+{
+ int min_hi3660_gear;
+ int min_dev_gear;
+ bool is_dev_sup_hs = false;
+ bool is_hi3660_max_hs = false;
+
+ if (dev_max->pwr_rx == FASTAUTO_MODE || dev_max->pwr_rx == FAST_MODE)
+ is_dev_sup_hs = true;
+
+ if (hi3660_param->desired_working_mode == FAST) {
+ is_hi3660_max_hs = true;
+ min_hi3660_gear = min_t(u32, hi3660_param->hs_rx_gear,
+ hi3660_param->hs_tx_gear);
+ } else {
+ min_hi3660_gear = min_t(u32, hi3660_param->pwm_rx_gear,
+ hi3660_param->pwm_tx_gear);
+ }
+
+ /*
+ * device doesn't support HS but
+ * hi3660_param->desired_working_mode is HS,
+ * thus device and hi3660_param don't agree
+ */
+ if (!is_dev_sup_hs && is_hi3660_max_hs) {
+ pr_err("%s: device not support HS\n", __func__);
+ return -ENOTSUPP;
+ } else if (is_dev_sup_hs && is_hi3660_max_hs) {
+ /*
+ * since device supports HS, it supports FAST_MODE.
+ * since hi3660_param->desired_working_mode is also HS
+ * then final decision (FAST/FASTAUTO) is done according
+ * to hi3660_params as it is the restricting factor
+ */
+ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+ hi3660_param->rx_pwr_hs;
+ } else {
+ /*
+ * here hi3660_param->desired_working_mode is PWM.
+ * it doesn't matter whether device supports HS or PWM,
+ * in both cases hi3660_param->desired_working_mode will
+ * determine the mode
+ */
+ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+ hi3660_param->rx_pwr_pwm;
+ }
+
+ /*
+ * we would like tx to work in the minimum number of lanes
+ * between device capability and vendor preferences.
+ * the same decision will be made for rx
+ */
+ agreed_pwr->lane_tx =
+ min_t(u32, dev_max->lane_tx, hi3660_param->tx_lanes);
+ agreed_pwr->lane_rx =
+ min_t(u32, dev_max->lane_rx, hi3660_param->rx_lanes);
+
+ /* device maximum gear is the minimum between device rx and tx gears */
+ min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
+
+ /*
+ * if both device capabilities and vendor pre-defined preferences are
+ * both HS or both PWM then set the minimum gear to be the chosen
+ * working gear.
+ * if one is PWM and one is HS then the one that is PWM get to decide
+ * what is the gear, as it is the one that also decided previously what
+ * pwr the device will be configured to.
+ */
+ if ((is_dev_sup_hs && is_hi3660_max_hs) ||
+ (!is_dev_sup_hs && !is_hi3660_max_hs))
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx =
+ min_t(u32, min_dev_gear, min_hi3660_gear);
+ else
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_hi3660_gear;
+
+ agreed_pwr->hs_rate = hi3660_param->hs_rate;
+
+ pr_info("ufs final power mode: gear = %d, lane = %d, pwr = %d, "
+ "rate = %d\n",
+ agreed_pwr->gear_rx, agreed_pwr->lane_rx, agreed_pwr->pwr_rx,
+ agreed_pwr->hs_rate);
+ return 0;
+}
+
+static void ufs_hi3660_pwr_change_pre_change(struct ufs_hba *hba)
+{
+ /* update */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
+ /* PA_TxSkip */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
+ /*PA_PWRModeUserData0 = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191);
+ /*PA_PWRModeUserData1 = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535);
+ /*PA_PWRModeUserData2 = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767);
+ /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191);
+ /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535);
+ /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767);
+ /*PA_PWRModeUserData3 = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191);
+ /*PA_PWRModeUserData4 = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535);
+ /*PA_PWRModeUserData5 = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767);
+ /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191);
+ /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535);
+ /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
+}
+
+static int ufs_hi3660_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_hi3660_dev_params ufs_hi3660_cap;
+ struct ufs_hi3660_host *host = ufshcd_get_variant(hba);
+ int ret = 0;
+ uint32_t value;
+
+ if (!dev_req_params) {
+ dev_err(hba->dev,
+ "%s: incoming dev_req_params is NULL\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (status) {
+ case PRE_CHANGE:
+ if (host->caps & USE_ONE_LANE) {
+ ufs_hi3660_cap.tx_lanes = 1;
+ ufs_hi3660_cap.rx_lanes = 1;
+ } else {
+ ufs_hi3660_cap.tx_lanes = 2;
+ ufs_hi3660_cap.rx_lanes = 2;
+ }
+
+ if (host->caps & USE_HS_GEAR3) {
+ ufs_hi3660_cap.hs_rx_gear = UFS_HS_G3;
+ ufs_hi3660_cap.hs_tx_gear = UFS_HS_G3;
+ ufs_hi3660_cap.desired_working_mode = FAST;
+ } else if (host->caps & USE_HS_GEAR2) {
+ ufs_hi3660_cap.hs_rx_gear = UFS_HS_G2;
+ ufs_hi3660_cap.hs_tx_gear = UFS_HS_G2;
+ ufs_hi3660_cap.desired_working_mode = FAST;
+ } else if (host->caps & USE_HS_GEAR1) {
+ ufs_hi3660_cap.hs_rx_gear = UFS_HS_G1;
+ ufs_hi3660_cap.hs_tx_gear = UFS_HS_G1;
+ ufs_hi3660_cap.desired_working_mode = FAST;
+ } else {
+ ufs_hi3660_cap.desired_working_mode = SLOW;
+ }
+
+ ufs_hi3660_cap.pwm_rx_gear = UFS_HI3660_LIMIT_PWMGEAR_RX;
+ ufs_hi3660_cap.pwm_tx_gear = UFS_HI3660_LIMIT_PWMGEAR_TX;
+ ufs_hi3660_cap.rx_pwr_pwm = UFS_HI3660_LIMIT_RX_PWR_PWM;
+ ufs_hi3660_cap.tx_pwr_pwm = UFS_HI3660_LIMIT_TX_PWR_PWM;
+ /*hynix not support fastauto now*/
+ if (host->caps & BROKEN_FASTAUTO) {
+ ufs_hi3660_cap.rx_pwr_hs = FAST_MODE;
+ ufs_hi3660_cap.tx_pwr_hs = FAST_MODE;
+ } else {
+ ufs_hi3660_cap.rx_pwr_hs = FASTAUTO_MODE;
+ ufs_hi3660_cap.tx_pwr_hs = FASTAUTO_MODE;
+ }
+
+ if (host->caps & USE_RATE_B)
+ ufs_hi3660_cap.hs_rate = PA_HS_MODE_B;
+ else
+ ufs_hi3660_cap.hs_rate = PA_HS_MODE_A;
+
+ ret = ufs_hi3660_get_pwr_dev_param(
+ &ufs_hi3660_cap, dev_max_params, dev_req_params);
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: failed to determine capabilities\n", __func__);
+ goto out;
+ }
+
+ dev_info(hba->dev, "set TX_EQUALIZER 3.5db\n");
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0037, 0x0), 0x1);
+ if ((dev_req_params->lane_tx > 1) && (dev_req_params->lane_rx > 1))
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0037, 0x1), 0x1);
+
+ ufs_hi3660_pwr_change_pre_change(hba);
+ break;
+ case POST_CHANGE:
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0x0037, 0x0), &value);
+ dev_info(hba->dev, "check TX_EQUALIZER DB value lane0 = 0x%x\n", value);
+ if ((dev_req_params->lane_tx > 1) && (dev_req_params->lane_rx > 1)) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0x0037, 0x1), &value);
+ dev_info(hba->dev, "check TX_EQUALIZER DB value lane1 = 0x%x\n", value);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+out:
+ return ret;
+}
+
+static int ufs_hi3660_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_hi3660_host *host = ufshcd_get_variant(hba);
+
+ if (ufshcd_is_runtime_pm(pm_op))
+ return 0;
+
+ if (host->in_suspend) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+ udelay(10);
+ /* set ref_dig_clk override of PHY PCS to 0 */
+ ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL);
+
+ host->in_suspend = true;
+
+ return 0;
+}
+
+static int ufs_hi3660_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_hi3660_host *host = ufshcd_get_variant(hba);
+
+ if (!host->in_suspend)
+ return 0;
+
+ /* set ref_dig_clk override of PHY PCS to 1 */
+ ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL);
+ udelay(10);
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+
+ host->in_suspend = false;
+ return 0;
+}
+
+static int ufs_hi3660_get_resource(struct ufs_hi3660_host *host)
+{
+ struct resource *mem_res;
+ struct device *dev = host->hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ /* get resource of ufs sys ctrl */
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ host->ufs_sys_ctrl = devm_ioremap_resource(dev, mem_res);
+ if (!host->ufs_sys_ctrl) {
+ dev_err(dev, "cannot ioremap for ufs sys ctrl register\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ufs_hi3660_set_pm_lvl(struct ufs_hba *hba)
+{
+ hba->rpm_lvl = UFS_PM_LVL_1;
+ hba->spm_lvl = UFS_PM_LVL_3;
+}
+
+static void ufs_hi3660_populate_dt(struct device *dev,
+ struct ufs_hi3660_host *host)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ if (!np) {
+ dev_err(dev, "can not find device node\n");
+ return;
+ }
+
+ if (of_find_property(np, "ufs-hi3660-use-rate-B", NULL))
+ host->caps |= USE_RATE_B;
+
+ if (of_find_property(np, "ufs-hi3660-broken-fastauto", NULL))
+ host->caps |= BROKEN_FASTAUTO;
+
+ if (of_find_property(np, "ufs-hi3660-use-one-line", NULL))
+ host->caps |= USE_ONE_LANE;
+
+ if (of_find_property(np, "ufs-hi3660-use-HS-GEAR3", NULL))
+ host->caps |= USE_HS_GEAR3;
+
+ if (of_find_property(np, "ufs-hi3660-use-HS-GEAR2", NULL))
+ host->caps |= USE_HS_GEAR2;
+
+ if (of_find_property(np, "ufs-hi3660-use-HS-GEAR1", NULL))
+ host->caps |= USE_HS_GEAR1;
+
+ if (of_find_property(np, "ufs-hi3660-broken-clk-gate-bypass", NULL))
+ host->caps |= BROKEN_CLK_GATE_BYPASS;
+
+ host->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ if (gpio_is_valid(host->reset_gpio)) {
+ ret = devm_gpio_request_one(dev, host->reset_gpio,
+ GPIOF_DIR_OUT, "hi3660_ufs_reset");
+ if (ret < 0)
+ dev_err(dev, "could not acquire gpio (err=%d)\n", ret);
+ }
+}
+
+/**
+ * ufs_hi3660_init
+ * @hba: host controller instance
+ */
+static int ufs_hi3660_init(struct ufs_hba *hba)
+{
+ int err;
+ struct device *dev = hba->dev;
+ struct ufs_hi3660_host *host;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ dev_err(dev, "%s: no memory for hi3660 ufs host\n", __func__);
+ return -ENOMEM;
+ }
+
+ host->hba = hba;
+ ufshcd_set_variant(hba, host);
+
+ host->rst = devm_reset_control_get(dev, "rst");
+ host->assert = devm_reset_control_get(dev, "assert");
+
+ ufs_hi3660_set_pm_lvl(hba);
+
+ ufs_hi3660_populate_dt(dev, host);
+
+ err = ufs_hi3660_get_resource(host);
+ if (err) {
+ ufshcd_set_variant(hba, NULL);
+ return err;
+ }
+
+ ufs_hi3660_clk_init(hba);
+
+ ufs_hi3660_soc_init(hba);
+
+ return 0;
+}
+
+static struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
+ .name = "hi3660",
+ .init = ufs_hi3660_init,
+ .link_startup_notify = ufs_hi3660_link_startup_notify,
+ .pwr_change_notify = ufs_hi3660_pwr_change_notify,
+ .suspend = ufs_hi3660_suspend,
+ .resume = ufs_hi3660_resume,
+};
+
+static int ufs_hi3660_probe(struct platform_device *pdev)
+{
+ return ufshcd_pltfrm_init(pdev, &ufs_hba_hi3660_vops);
+}
+
+static int ufs_hi3660_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ ufshcd_remove(hba);
+ return 0;
+}
+
+static const struct of_device_id ufs_hi3660_of_match[] = {
+ { .compatible = "hisilicon,hi3660-ufs" },
+ {},
+};
+
+static const struct dev_pm_ops ufs_hi3660_pm_ops = {
+ .suspend = ufshcd_pltfrm_suspend,
+ .resume = ufshcd_pltfrm_resume,
+ .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
+ .runtime_resume = ufshcd_pltfrm_runtime_resume,
+ .runtime_idle = ufshcd_pltfrm_runtime_idle,
+};
+
+static struct platform_driver ufs_hi3660_pltform = {
+ .probe = ufs_hi3660_probe,
+ .remove = ufs_hi3660_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "ufshcd-hi3660",
+ .pm = &ufs_hi3660_pm_ops,
+ .of_match_table = of_match_ptr(ufs_hi3660_of_match),
+ },
+};
+module_platform_driver(ufs_hi3660_pltform);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ufshcd-hi3660");
+MODULE_DESCRIPTION("HiSilicon Hi3660 UFS Driver");
diff --git a/drivers/scsi/ufs/ufs-hi3660.h b/drivers/scsi/ufs/ufs-hi3660.h
new file mode 100644
index 000000000000..04775cd2dddf
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-hi3660.h
@@ -0,0 +1,170 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_HI3660_H_
+#define UFS_HI3660_H_
+
+#define HBRN8_POLL_TOUT_MS 1000
+
+/*
+ * pericrg specific define
+ */
+#define PEREN5_OFFSET (0x050)
+#define PERRSTEN3_OFFSET (0x084)
+#define PERRSTDIS3_OFFSET (0x088)
+#define PERRSTSTAT3_OFFSET (0x08C)
+#define CLKDIV16_OFFSET (0x0E8)
+#define CLKDIV17_OFFSET (0x0EC)
+#define CLKDIV21_OFFSET (0x0FC)
+#define UFS_ARESET UFS_BIT(7)
+#define RST_UFS UFS_BIT(12)
+
+/*
+ * ufs sysctrl specific define
+ */
+#define PSW_POWER_CTRL (0x04)
+#define PHY_ISO_EN (0x08)
+#define HC_LP_CTRL (0x0C)
+#define PHY_CLK_CTRL (0x10)
+#define PSW_CLK_CTRL (0x14)
+#define CLOCK_GATE_BYPASS (0x18)
+#define RESET_CTRL_EN (0x1C)
+#define PHY_RESET_STATUS (0x28)
+#define UFS_SYSCTRL (0x5C)
+#define UFS_DEVICE_RESET_CTRL (0x60)
+#define UFS_APB_ADDR_MASK (0x64)
+
+#define BIT_UFS_PSW_ISO_CTRL (1 << 16)
+#define BIT_UFS_PSW_MTCMOS_EN (1 << 0)
+#define BIT_UFS_REFCLK_ISO_EN (1 << 16)
+#define BIT_UFS_PHY_ISO_CTRL (1 << 0)
+#define BIT_SYSCTRL_LP_ISOL_EN (1 << 16)
+#define BIT_SYSCTRL_LP_PWR_GATE (1 << 0)
+#define BIT_SYSCTRL_PWR_READY (1 << 8)
+#define BIT_SYSCTRL_REF_CLOCK_EN (1 << 24)
+#define MASK_SYSCTRL_REF_CLOCK_SEL (0x3 << 8)
+#define MASK_SYSCTRL_CFG_CLOCK_FREQ (0xFF)
+#define UFS_FREQ_CFG_CLK (0x39)
+#define BIT_SYSCTRL_PSW_CLK_EN (1 << 4)
+#define MASK_UFS_CLK_GATE_BYPASS (0x3F)
+#define BIT_STATUS_LP_RESETCOMPLETE (1 << 0)
+#define BIT_SYSCTRL_LP_RESET_N (1 << 0)
+#define BIT_UFS_REFCLK_SRC_SEl (1 << 0)
+#define MASK_UFS_SYSCRTL_BYPASS (0x3F << 16)
+#define MASK_UFS_DEVICE_RESET (0x1 << 16)
+#define BIT_UFS_DEVICE_RESET (0x1)
+
+/*
+ * M-TX Configuration Attributes for hi3660
+ */
+#define MPHY_TX_FSM_STATE 0x41
+#define TX_FSM_DISABLED 0x0
+#define TX_FSM_HIBERN8 0x1
+#define TX_FSM_SLEEP 0x2
+#define TX_FSM_STALL 0x3
+#define TX_FSM_LS_BURST 0x4
+#define TX_FSM_HS_BURST 0x5
+#define TX_FSM_LINE_CFG 0x6
+#define TX_FSM_LINE_RESET 0x7
+
+/*
+ * hi3660 UFS HC specific Registers
+ */
+enum {
+ UFS_REG_OCPTHRTL = 0xc0,
+ UFS_REG_OOCPR = 0xc4,
+
+ UFS_REG_CDACFG = 0xd0,
+ UFS_REG_CDATX1 = 0xd4,
+ UFS_REG_CDATX2 = 0xd8,
+ UFS_REG_CDARX1 = 0xdc,
+ UFS_REG_CDARX2 = 0xe0,
+ UFS_REG_CDASTA = 0xe4,
+
+ UFS_REG_LBMCFG = 0xf0,
+ UFS_REG_LBMSTA = 0xf4,
+ UFS_REG_UFSMODE = 0xf8,
+
+ UFS_REG_HCLKDIV = 0xfc,
+};
+
+#define UFS_AHIT_AUTOH8_TIMER (0x1001)
+
+/* REG UFS_REG_OCPTHRTL definition */
+#define LP_PGE UFS_BIT(16)
+#define LP_AH8_PGE UFS_BIT(17)
+
+#define UFS_HCLKDIV_NORMAL_VALUE 0xE4
+#define UFS_HCLKDIV_FPGA_VALUE 0x28
+
+/* hi3660 UFS Unipro specific Registers */
+#define VS_ULPH8_Cntrl 0xd0af
+#define Ulp_Ulp_CtrlMode UFS_BIT(3)
+
+/* vendor specific pre-defined parameters */
+#define SLOW 1
+#define FAST 2
+
+#define UFS_HI3660_LIMIT_NUM_LANES_RX 2
+#define UFS_HI3660_LIMIT_NUM_LANES_TX 2
+#define UFS_HI3660_LIMIT_HSGEAR_RX UFS_HS_G1
+#define UFS_HI3660_LIMIT_HSGEAR_TX UFS_HS_G1
+#define UFS_HI3660_LIMIT_PWMGEAR_RX UFS_PWM_G1
+#define UFS_HI3660_LIMIT_PWMGEAR_TX UFS_PWM_G1
+#define UFS_HI3660_LIMIT_RX_PWR_PWM SLOWAUTO_MODE
+#define UFS_HI3660_LIMIT_TX_PWR_PWM SLOWAUTO_MODE
+#define UFS_HI3660_LIMIT_RX_PWR_HS FASTAUTO_MODE
+#define UFS_HI3660_LIMIT_TX_PWR_HS FASTAUTO_MODE
+#define UFS_HI3660_LIMIT_HS_RATE PA_HS_MODE_A
+#define UFS_HI3660_LIMIT_DESIRED_MODE FAST
+
+struct ufs_hi3660_host {
+ struct ufs_hba *hba;
+ void __iomem *ufs_sys_ctrl;
+ struct reset_control *rst;
+ struct reset_control *assert;
+ uint64_t caps;
+#define hi3660_CAP_RESERVED UFS_BIT(0)
+#define USE_SNPS_MPHY_TC UFS_BIT(1)
+#define USE_FPGA_BOARD_CLK UFS_BIT(2)
+#define USE_RATE_B UFS_BIT(3)
+#define BROKEN_FASTAUTO UFS_BIT(4)
+#define USE_ONE_LANE UFS_BIT(5)
+#define USE_HS_GEAR3 UFS_BIT(6)
+#define USE_HS_GEAR2 UFS_BIT(7)
+#define USE_HS_GEAR1 UFS_BIT(8)
+#define USE_AUTO_H8 UFS_BIT(9)
+#define BROKEN_CLK_GATE_BYPASS UFS_BIT(10)
+
+ int avail_ln_rx;
+ int avail_ln_tx;
+
+ u32 busthrtl_backup;
+ u32 reset_gpio;
+
+ bool in_suspend;
+
+ struct ufs_pa_layer_attr dev_req_params;
+};
+
+#define ufs_sys_ctrl_writel(host, val, reg) \
+ writel((val), (host)->ufs_sys_ctrl + (reg))
+#define ufs_sys_ctrl_readl(host, reg) readl((host)->ufs_sys_ctrl + (reg))
+#define ufs_sys_ctrl_set_bits(host, mask, reg) \
+ ufs_sys_ctrl_writel( \
+ (host), ((mask) | (ufs_sys_ctrl_readl((host), (reg)))), (reg))
+#define ufs_sys_ctrl_clr_bits(host, mask, reg) \
+ ufs_sys_ctrl_writel((host), \
+ ((~(mask)) & (ufs_sys_ctrl_readl((host), (reg)))), \
+ (reg))
+#endif /* UFS_HI3660_H_ */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 9599741ff606..ee6b2ba063da 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -48,6 +48,7 @@ enum {
REG_UFS_VERSION = 0x08,
REG_CONTROLLER_DEV_ID = 0x10,
REG_CONTROLLER_PROD_ID = 0x14,
+ REG_CONTROLLER_AHIT = 0x18,
REG_INTERRUPT_STATUS = 0x20,
REG_INTERRUPT_ENABLE = 0x24,
REG_CONTROLLER_STATUS = 0x30,
@@ -141,6 +142,9 @@ enum {
CONTROLLER_FATAL_ERROR |\
SYSTEM_BUS_FATAL_ERROR)
+/* AHIT - Auto-Hibernate Idle Timer */
+#define UFS_AHIT_AH8ITV_MASK 0x3FF
+
/* HCS - Host Controller Status 30h */
#define DEVICE_PRESENT UFS_BIT(0)
#define UTP_TRANSFER_REQ_LIST_READY UFS_BIT(1)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 58a7b3504b82..62c25a4d2dd8 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -106,4 +106,6 @@ source "drivers/staging/greybus/Kconfig"
source "drivers/staging/vc04_services/Kconfig"
+source "drivers/staging/nanohub/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 2fa9745db614..adc66bda8f46 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_ISDN_I4L) += i4l/
obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_BCM2708_VCHIQ) += vc04_services/
+obj-$(CONFIG_NANOHUB) += nanohub/
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index c8fb4134c55d..ad9d96a5087f 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -33,22 +33,16 @@ config ION_TEGRA
help
Choose this option if you wish to use ion on an nVidia Tegra.
-config ION_HISI
- tristate "Ion for Hisilicon"
- depends on ARCH_HISI && ION
- select ION_OF
+config ION_POOL_CACHE_POLICY
+ bool "Ion set page pool cache policy"
+ depends on ION && X86
+ default y if X86
help
- Choose this option if you wish to use ion on Hisilicon Platform.
-
-source "drivers/staging/android/ion/hisilicon/Kconfig"
+ Choose this option if need to explicity set cache policy of the
+ pages in the page pool.
-config ION_OF
- bool "Devicetree support for Ion"
- depends on ION && OF_ADDRESS
+config ION_HISI
+ tristate "Hisilicon ION driver"
+ depends on ARCH_HISI && ION
help
- Provides base support for defining Ion heaps in devicetree
- and setting them up. Also includes functions for platforms
- to parse the devicetree and expand for their own custom
- extensions
-
- If using Ion and devicetree, you should say Y here
+ Choose this option if you wish to use ion on an HISI target.
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index 5d630a088381..b5b606e347ff 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,5 +1,4 @@
-obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o \
- ion_page_pool.o ion_system_heap.o \
+obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
obj-$(CONFIG_ION_TEST) += ion_test.o
ifdef CONFIG_COMPAT
@@ -8,6 +7,7 @@ endif
obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
obj-$(CONFIG_ION_TEGRA) += tegra/
-obj-$(CONFIG_ION_HISI) += hisilicon/
-obj-$(CONFIG_ION_OF) += ion_of.o
-
+obj-$(CONFIG_ION_HISI) += hisi/
+obj-$(CONFIG_ION_HISI_CPUDRAW) += ion_cpudraw_heap.o
+obj-$(CONFIG_ION_HISI_SECCM) += ion_seccm_heap.o
+obj-$(CONFIG_ION_HISI_DMA_POOL) += ion_dma_pool_heap.o
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
index 9a978d21785e..4911df914a78 100644
--- a/drivers/staging/android/ion/compat_ion.c
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -39,12 +39,37 @@ struct compat_ion_handle_data {
compat_int_t handle;
};
+#ifdef CONFIG_HISI_IOMMU
+struct compat_iommu_map_format {
+ compat_ulong_t iova_start;
+ compat_ulong_t iova_size;
+ compat_ulong_t iommu_ptb_base;
+ compat_ulong_t iommu_iova_base;
+ compat_ulong_t header_size;
+ compat_ulong_t phys_page_line;
+ compat_ulong_t virt_page_line;
+ compat_ulong_t is_tile;
+ compat_ulong_t prot;
+};
+
+struct compat_ion_map_iommu_data {
+ compat_int_t handle;
+ struct compat_iommu_map_format format;
+};
+#endif
+
#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct compat_ion_allocation_data)
#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
struct compat_ion_handle_data)
#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
struct compat_ion_custom_data)
+#ifdef CONFIG_HISI_IOMMU
+#define COMPAT_ION_IOC_MAP_IOMMU _IOWR(ION_IOC_MAGIC, 8, \
+ struct compat_ion_map_iommu_data)
+#define COMPAT_ION_IOC_UNMAP_IOMMU _IOWR(ION_IOC_MAGIC, 9, \
+ struct compat_ion_map_iommu_data)
+#endif
static int compat_get_ion_allocation_data(
struct compat_ion_allocation_data __user *data32,
@@ -121,6 +146,71 @@ static int compat_get_ion_custom_data(
return err;
};
+#ifdef CONFIG_HISI_IOMMU
+static int compat_put_ion_map_iommu_data(
+ struct compat_ion_map_iommu_data __user *data32,
+ struct ion_map_iommu_data __user *data)
+{
+ compat_ulong_t l;
+ compat_int_t i;
+ int err;
+
+ err = get_user(i, &data->handle);
+ err |= put_user(i, &data32->handle);
+ err |= get_user(l, &data->format.iova_start);
+ err |= put_user(l, &data32->format.iova_start);
+ err |= get_user(l, &data->format.iova_size);
+ err |= put_user(l, &data32->format.iova_size);
+ err |= get_user(l, &data->format.iommu_ptb_base);
+ err |= put_user(l, &data32->format.iommu_ptb_base);
+ err |= get_user(l, &data->format.iommu_iova_base);
+ err |= put_user(l, &data32->format.iommu_iova_base);
+ err |= get_user(l, &data->format.header_size);
+ err |= put_user(l, &data32->format.header_size);
+ err |= get_user(l, &data->format.phys_page_line);
+ err |= put_user(l, &data32->format.phys_page_line);
+ err |= get_user(l, &data->format.virt_page_line);
+ err |= put_user(l, &data32->format.virt_page_line);
+ err |= get_user(l, &data->format.is_tile);
+ err |= put_user(l, &data32->format.is_tile);
+ err |= get_user(l, &data->format.prot);
+ err |= put_user(l, &data32->format.prot);
+
+ return err;
+}
+
+static int compat_get_ion_map_iommu_data(
+ struct compat_ion_map_iommu_data __user *data32,
+ struct ion_map_iommu_data __user *data)
+{
+ compat_ulong_t l;
+ compat_int_t i;
+ int err;
+
+ err = get_user(i, &data32->handle);
+ err |= put_user(i, &data->handle);
+ err |= get_user(l, &data32->format.iova_start);
+ err |= put_user(l, &data->format.iova_start);
+ err |= get_user(l, &data32->format.iova_size);
+ err |= put_user(l, &data->format.iova_size);
+ err |= get_user(l, &data32->format.iommu_ptb_base);
+ err |= put_user(l, &data->format.iommu_ptb_base);
+ err |= get_user(l, &data32->format.iommu_iova_base);
+ err |= put_user(l, &data->format.iommu_iova_base);
+ err |= get_user(l, &data->format.header_size);
+ err |= put_user(l, &data32->format.header_size);
+ err |= get_user(l, &data32->format.phys_page_line);
+ err |= put_user(l, &data->format.phys_page_line);
+ err |= get_user(l, &data32->format.virt_page_line);
+ err |= put_user(l, &data->format.virt_page_line);
+ err |= get_user(l, &data32->format.is_tile);
+ err |= put_user(l, &data->format.is_tile);
+ err |= get_user(l, &data32->format.prot);
+ err |= put_user(l, &data->format.prot);
+ return err;
+}
+#endif
+
long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
long ret;
@@ -137,7 +227,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
data32 = compat_ptr(arg);
data = compat_alloc_user_space(sizeof(*data));
- if (!data)
+ if (data == NULL)
return -EFAULT;
err = compat_get_ion_allocation_data(data32, data);
@@ -156,7 +246,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
data32 = compat_ptr(arg);
data = compat_alloc_user_space(sizeof(*data));
- if (!data)
+ if (data == NULL)
return -EFAULT;
err = compat_get_ion_handle_data(data32, data);
@@ -173,7 +263,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
data32 = compat_ptr(arg);
data = compat_alloc_user_space(sizeof(*data));
- if (!data)
+ if (data == NULL)
return -EFAULT;
err = compat_get_ion_custom_data(data32, data);
@@ -183,10 +273,51 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
(unsigned long)data);
}
+#ifdef CONFIG_HISI_IOMMU
+ case COMPAT_ION_IOC_MAP_IOMMU:
+ {
+ struct compat_ion_map_iommu_data __user *data32;
+ struct ion_map_iommu_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_map_iommu_data(data32, data);
+ if (err)
+ return err;
+ ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_MAP_IOMMU,
+ (unsigned long)data);
+ err = compat_put_ion_map_iommu_data(data32, data);
+ return ret ? ret : err;
+ }
+ case COMPAT_ION_IOC_UNMAP_IOMMU:
+ {
+ struct compat_ion_map_iommu_data __user *data32;
+ struct ion_map_iommu_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_map_iommu_data(data32, data);
+ if (err)
+ return err;
+ ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_UNMAP_IOMMU,
+ (unsigned long)data);
+ err = compat_put_ion_map_iommu_data(data32, data);
+ return ret ? ret : err;
+ }
+#endif
case ION_IOC_SHARE:
case ION_IOC_MAP:
case ION_IOC_IMPORT:
case ION_IOC_SYNC:
+ case ION_IOC_INV:
return filp->f_op->unlocked_ioctl(filp, cmd,
(unsigned long)compat_ptr(arg));
default:
diff --git a/drivers/staging/android/ion/devicetree.txt b/drivers/staging/android/ion/devicetree.txt
deleted file mode 100644
index 168715271f06..000000000000
--- a/drivers/staging/android/ion/devicetree.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-Ion Memory Manager
-
-Ion is a memory manager that allows for sharing of buffers via dma-buf.
-Ion allows for different types of allocation via an abstraction called
-a 'heap'. A heap represents a specific type of memory. Each heap has
-a different type. There can be multiple instances of the same heap
-type.
-
-Specific heap instances are tied to heap IDs. Heap IDs are not to be specified
-in the devicetree.
-
-Required properties for Ion
-
-- compatible: "linux,ion" PLUS a compatible property for the device
-
-All child nodes of a linux,ion node are interpreted as heaps
-
-required properties for heaps
-
-- compatible: compatible string for a heap type PLUS a compatible property
-for the specific instance of the heap. Current heap types
--- linux,ion-heap-system
--- linux,ion-heap-system-contig
--- linux,ion-heap-carveout
--- linux,ion-heap-chunk
--- linux,ion-heap-dma
--- linux,ion-heap-custom
-
-Optional properties
-- memory-region: A phandle to a memory region. Required for DMA heap type
-(see reserved-memory.txt for details on the reservation)
-
-Example:
-
- ion {
- compatbile = "hisilicon,ion", "linux,ion";
-
- ion-system-heap {
- compatbile = "hisilicon,system-heap", "linux,ion-heap-system"
- };
-
- ion-camera-region {
- compatible = "hisilicon,camera-heap", "linux,ion-heap-dma"
- memory-region = <&camera_region>;
- };
-
- ion-fb-region {
- compatbile = "hisilicon,fb-heap", "linux,ion-heap-dma"
- memory-region = <&fb_region>;
- };
- }
diff --git a/drivers/staging/android/ion/hisi/Makefile b/drivers/staging/android/ion/hisi/Makefile
new file mode 100644
index 000000000000..7d8b6d381587
--- /dev/null
+++ b/drivers/staging/android/ion/hisi/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -I$(srctree)/drivers/staging/android/ion
+ccflags-y += -I$(srctree)/drivers/hisi/ap/platform/$(TARGET_PRODUCT)
+
+obj-$(CONFIG_HISI_SMARTPOOL_OPT)+= hisi_ion_smart_pool.o
+obj-y += hisi_cpudraw_alloc.o
+obj-$(CONFIG_ION_HISI)+= of_hisi_ion.o
+
diff --git a/drivers/staging/android/ion/hisi/hisi_cpudraw_alloc.c b/drivers/staging/android/ion/hisi/hisi_cpudraw_alloc.c
new file mode 100644
index 000000000000..d5518cb52669
--- /dev/null
+++ b/drivers/staging/android/ion/hisi/hisi_cpudraw_alloc.c
@@ -0,0 +1,341 @@
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+#include "hisi_cpudraw_alloc.h"
+
+#define KERNEL_DEBUG
+
+#if defined(KERNEL_DEBUG)
+#define ALOGD(format, arg...) pr_info("[cpubuffer] " format, ##arg);
+#define ALOGD_INFO(format, arg...) pr_info("[cpubuffer] " format, ##arg);
+#else
+#define ALOGD(format, arg...)
+#define ALOGD_INFO(format, arg...)
+#endif
+
+struct list_head idlenode_list;
+struct list_head allocatednode_list;
+struct mutex list_lock;
+
+
+
+static int get_split_mode(mem_node_t * node, int bytew, int byteh)
+{
+ int split_mode;
+
+ if(bytew < node->cpubuf.bytew){
+ split_mode = SPLIT_MODE_W;
+ }else if(byteh <= node->cpubuf.byteh){
+ split_mode = SPLIT_MODE_H;
+ }else{
+ split_mode = SPLIT_MODE_NULL;
+ }
+
+ ALOGD_INFO("cpubuffer info:split_mode=%d\n", split_mode);
+ return split_mode;
+}
+
+void memlist_dump(void)
+{
+ mem_node_t *node;
+
+ mutex_lock(&list_lock);
+ if(list_empty(&idlenode_list)){
+ ALOGD("DUMP_IDLE:idlenode_list is empty\n");
+ }
+
+ if(list_empty(&allocatednode_list)){
+ ALOGD("DUMP_BUSY:allocatednode_list is empty\n");
+ }
+
+ list_for_each_entry(node, &idlenode_list, node_head){
+ ALOGD("DUMP_IDLE:phy=0x%8lx w=%6d h=%6d stride=%6d size=%6d ref=%d this=0x%p father=0x%p\n",
+ node->cpubuf.phy_addr,
+ node->cpubuf.bytew,
+ node->cpubuf.byteh,
+ node->cpubuf.stride,
+ node->cpubuf.size,
+ node->ref,
+ node->thisnode,
+ node->father
+ );
+ }
+
+ list_for_each_entry(node, &allocatednode_list, node_head){
+ ALOGD("DUMP_BUSY:phy=0x%8lx w=%6d h=%6d stride=%6d size=%6d ref=%d this=0x%p father=0x%p\n",
+ node->cpubuf.phy_addr,
+ node->cpubuf.bytew,
+ node->cpubuf.byteh,
+ node->cpubuf.stride,
+ node->cpubuf.size,
+ node->ref,
+ node->thisnode,
+ node->father
+ );
+ }
+
+ mutex_unlock(&list_lock);
+}
+
+
+void memlist_init(void)
+{
+ INIT_LIST_HEAD(&idlenode_list);
+ INIT_LIST_HEAD(&allocatednode_list);
+ mutex_init(&list_lock);
+}
+
+
+static mem_node_t *split_node(mem_node_t *father, int bytew, int byteh)
+{
+ int split_mode;
+ mem_node_t *node[2];
+ int newbytew[2];
+ int newbyteh[2];
+ int newaddr[2];
+ int i;
+
+ if(father->cpubuf.bytew < bytew || father->cpubuf.byteh < byteh){
+ ALOGD_INFO("cpubuffer info:idle memory not enough left=(w=%d h=%d) need=(w=%d h=%d)\n",
+ father->cpubuf.bytew, father->cpubuf.byteh, bytew, byteh);
+ return NULL;
+ }
+
+ split_mode = get_split_mode(father, bytew, byteh);
+
+ if(split_mode != SPLIT_MODE_NULL){
+ if(split_mode == SPLIT_MODE_W){
+ newbytew[0] = bytew;
+ newbytew[1] = father->cpubuf.bytew - bytew;
+ newaddr[0] = father->cpubuf.phy_addr;
+ newaddr[1] = father->cpubuf.phy_addr + bytew;
+ newbyteh[0] = father->cpubuf.byteh;
+ newbyteh[1] = father->cpubuf.byteh;
+ }else{
+ newbyteh[0] = byteh;
+ newbyteh[1] = father->cpubuf.byteh - byteh;
+ newaddr[0] = father->cpubuf.phy_addr;
+ newaddr[1] = father->cpubuf.phy_addr + father->cpubuf.stride*byteh;
+ newbytew[0] = father->cpubuf.bytew;
+ newbytew[1] = father->cpubuf.bytew;
+ }
+
+ for(i = 0; i < 2; i++){
+ node[i] = (mem_node_t *)kmalloc(sizeof(mem_node_t), GFP_KERNEL);
+ if(node[i] == NULL){
+ return NULL;
+ }
+
+ node[i]->father = father;
+ node[i]->thisnode = node[i];
+ node[i]->ref = father->ref + 1;
+ node[i]->cpubuf.phy_addr = newaddr[i];
+ node[i]->cpubuf.bytew = newbytew[i];
+ node[i]->cpubuf.byteh = newbyteh[i];
+ node[i]->cpubuf.stride = father->cpubuf.stride;
+ node[i]->cpubuf.size = newbytew[i]*newbyteh[i];
+ }
+
+ list_del(&father->node_head);
+ list_add(&node[1]->node_head, &idlenode_list);
+ return node[0];
+ }
+
+ return NULL;
+}
+
+static mem_node_t *node_alloc_direct(struct gen_pool *pool, int size, int stride, int byteh)
+{
+ int phy_addr = 0;
+ mem_node_t *newnode = NULL;
+
+ newnode = (mem_node_t *)kmalloc(sizeof(mem_node_t), GFP_KERNEL); //replace with kmalloc
+ if(newnode == NULL) {
+ return NULL;
+ }
+
+ phy_addr = gen_pool_alloc(pool, size); //gen_pool_alloc
+ if(phy_addr == 0){
+ kfree(newnode);
+ ALOGD("cpubuffer err:no more memory ,need size=%d\n", size);
+ return NULL;
+ }
+
+ ALOGD_INFO("MEMORY info:direct alloc node=0x%p size=%d\n", newnode, size);
+
+ newnode->thisnode = newnode;
+ newnode->father = NULL;
+ newnode->ref = 0;
+ newnode->cpubuf.phy_addr = phy_addr;
+ newnode->cpubuf.stride = stride;
+ newnode->cpubuf.bytew = stride;
+ newnode->cpubuf.byteh = byteh;
+ newnode->cpubuf.size = size;
+
+ return newnode;
+}
+
+static mem_node_t *node_alloc(struct gen_pool *pool, int size, int bytew, int byteh)
+{
+ int stride = size / byteh;
+ mem_node_t *node = NULL, *newnode = NULL;
+
+ if(size < bytew*byteh || stride < bytew){
+ ALOGD("cpubuffer err:alloc parameter err size=%d w=%d h=%d\n", size, bytew, byteh);
+ return NULL;
+ }
+
+ do{
+ list_for_each_entry(node, &idlenode_list, node_head){
+ if(stride == node->cpubuf.stride){
+ newnode = split_node(node, bytew, byteh);
+ if(newnode != NULL){
+ return newnode;
+ }
+ }
+ }
+
+ newnode= node_alloc_direct(pool, size, stride, byteh);
+ if(newnode != NULL){
+ list_add(&newnode->node_head, &idlenode_list);
+ }
+ } while(newnode != NULL);
+
+ return NULL;
+}
+
+static mem_node_t * memge_node(mem_node_t * twins, mem_node_t *freenode)
+{
+ mem_node_t *father;
+
+ if(twins->father != freenode->father){
+ ALOGD("cpubuffer err:merge node but different father\n");
+ return NULL;
+ }
+
+ father = freenode->father;
+ list_del(&twins->node_head);
+
+ twins->cpubuf.thisnode = NULL;
+ freenode->cpubuf.thisnode = NULL;
+ kfree(twins);
+ kfree(freenode);
+ return father;
+}
+
+
+static void node_free_direct(struct gen_pool *pool, mem_node_t * tofreenode)
+{
+ mem_node_t *father = tofreenode;
+
+ if(father == NULL){
+ return;
+ }
+
+ pr_info("father->ref=%d\n", father->ref);
+
+ if(father != NULL && father->ref == 0){
+ pr_info("MEMORY info:direct free node=0x%p size=%d\n", father, father->cpubuf.size);
+ father->cpubuf.thisnode = NULL;
+ gen_pool_free(pool, father->cpubuf.phy_addr, father->cpubuf.size);
+ kfree(father);
+ }
+}
+
+static void node_free(struct gen_pool *pool, mem_node_t * tofreenode)
+{
+ mem_node_t *freenode = tofreenode;
+ mem_node_t *father = NULL;
+
+ if(tofreenode == NULL){
+ return;
+ }
+
+ do{
+ mem_node_t *twins = NULL;
+ mem_node_t *node = NULL;
+
+ list_for_each_entry(node, &idlenode_list, node_head){
+ if(node->father == freenode->father){
+ twins = node;
+ }
+ }
+
+ if(twins == NULL){
+ list_add(&freenode->node_head, &idlenode_list);
+ father = NULL;
+ }else{
+ int ref;
+
+ ref = twins->ref;
+ father = memge_node(twins, freenode);
+ freenode = father;
+ ALOGD_INFO("cpubuffer info:merge this_ref=%d father_ref=%d father=0x%p\n", ref, father->ref, father);
+ }
+ }while(father != NULL && father->ref != 0);
+
+ node_free_direct(pool, father);
+}
+
+unsigned long cpubuffer_alloc(struct gen_pool *pool, int size, int bytew, int byteh)
+{
+ mem_node_t *node = NULL;
+ cpubuf_handle_t * cpubuf = NULL;
+
+ mutex_lock(&list_lock);
+ node = node_alloc(pool, size , bytew, byteh);
+
+ if(node != NULL){
+ node->cpubuf.thisnode = (void *)node->thisnode;
+ cpubuf = &node->cpubuf;
+ list_add(&node->node_head, &allocatednode_list);
+ mutex_unlock(&list_lock);
+ return cpubuf->phy_addr;
+ }
+
+ mutex_unlock(&list_lock);
+ return 0;
+}
+
+
+int cpubuffer_free(struct gen_pool *pool, unsigned long phy_addr)
+{
+ mem_node_t *node = NULL;
+ cpubuf_handle_t *cpubuf = NULL;
+
+ pr_info("free addr=0x%lx\n", phy_addr);
+
+ mutex_lock(&list_lock);
+ list_for_each_entry(node, &allocatednode_list, node_head){
+ pr_info("free node_phy_addr=0x%lx addr=0x%lx\n", node->cpubuf.phy_addr, phy_addr);
+ if(node->cpubuf.phy_addr == phy_addr){
+ cpubuf = &node->cpubuf;
+ break;
+ }
+ }
+
+ list_del(&node->node_head);
+ mutex_unlock(&list_lock);
+
+ if(cpubuf == NULL){
+ pr_info("1free addr=0x%lx\n", phy_addr);
+ return -1;
+ }
+
+ if(node != cpubuf->thisnode){
+ pr_info("2free addr=0x%lx\n", phy_addr);
+ return -1;
+ }
+
+ mutex_lock(&list_lock);
+ if(cpubuf->stride == 0 && cpubuf->bytew == 0 && cpubuf->byteh == 0){
+ node_free_direct(pool, node);
+ }else{
+ node_free(pool, node);
+ }
+ mutex_unlock(&list_lock);
+
+ return 0;
+}
diff --git a/drivers/staging/android/ion/hisi/hisi_cpudraw_alloc.h b/drivers/staging/android/ion/hisi/hisi_cpudraw_alloc.h
new file mode 100644
index 000000000000..f79284a57c40
--- /dev/null
+++ b/drivers/staging/android/ion/hisi/hisi_cpudraw_alloc.h
@@ -0,0 +1,38 @@
+#ifndef _CPUBUFFER_ALLOC_H_
+#define _CPUBUFFER_ALLOC_H_
+
+#include <linux/genalloc.h>
+#include <linux/list.h>
+
+enum{
+ SPLIT_MODE_NULL = 0x0,
+ SPLIT_MODE_W,
+ SPLIT_MODE_H
+};
+
+typedef struct cpubuf_handle_t{
+ unsigned long phy_addr;
+ int bytew; // 256 bytes aligned
+ int byteh; // 16 pixel aligned
+ int stride;
+
+ int size; //only for ref = 0 node
+ void *thisnode; //only for free
+}cpubuf_handle_t;
+
+typedef struct mem_node_t{
+ struct list_head node_head;
+ struct mem_node_t *thisnode;
+ struct mem_node_t *father;
+ unsigned int ref;
+
+ cpubuf_handle_t cpubuf;
+}mem_node_t;
+
+void memlist_init(void);
+void memlist_dump(void);
+unsigned long cpubuffer_alloc(struct gen_pool *pool, int size, int bytew, int byteh);
+int cpubuffer_free(struct gen_pool *pool, unsigned long phy_addr);
+
+
+#endif //_CPUBUFFER_ALLOC_H_
diff --git a/drivers/staging/android/ion/hisi/hisi_ion_dump.c b/drivers/staging/android/ion/hisi/hisi_ion_dump.c
new file mode 100644
index 000000000000..18067b68161a
--- /dev/null
+++ b/drivers/staging/android/ion/hisi/hisi_ion_dump.c
@@ -0,0 +1,69 @@
+static size_t ion_client_total(struct ion_client *client)
+{
+ size_t size = 0;
+ struct rb_node *n;
+
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n,
+ struct ion_handle, node);
+ if (!(handle->import) && (handle->buffer->heap->type !=
+ ION_HEAP_TYPE_CARVEOUT)) {
+ if (handle->buffer->cpudraw_sg_table)
+ size += handle->buffer->cpu_buffer_size;
+ else
+ size += handle->buffer->size;
+ }
+ }
+ mutex_unlock(&client->lock);
+ return size;
+}
+
+unsigned long hisi_ion_total(void)
+{
+ return (unsigned long)atomic_long_read(&ion_total_size);
+}
+
+int hisi_ion_memory_info(bool verbose)
+{
+ struct rb_node *n;
+ struct ion_device *dev = get_ion_device();
+
+ if (!dev)
+ return -1;
+ pr_info("ion total size:%ld\n", atomic_long_read(&ion_total_size));
+ if (!verbose)
+ return 0;
+ down_read(&dev->client_lock);
+ for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+ struct ion_client *client = rb_entry(n,
+ struct ion_client, node);
+ size_t size = ion_client_total(client);
+
+ if (!size)
+ continue;
+ if (client->task) {
+ char task_comm[TASK_COMM_LEN];
+
+ get_task_comm(task_comm, client->task);
+ pr_info("%16.s %16u %16zu\n",
+ task_comm, client->pid, size);
+ } else {
+ pr_info("%16.s %16u %16zu\n",
+ client->name, client->pid, size);
+ }
+ }
+ up_read(&dev->client_lock);
+ mutex_lock(&dev->buffer_lock);
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+ node);
+
+ if (!buffer->handle_count &&
+ (buffer->heap->type != ION_HEAP_TYPE_CARVEOUT))
+ pr_info("%16.s %16u %16zu\n", buffer->task_comm,
+ buffer->pid, buffer->size);
+ }
+ mutex_unlock(&dev->buffer_lock);
+ return 0;
+}
diff --git a/drivers/staging/android/ion/hisi/hisi_ion_smart_pool.c b/drivers/staging/android/ion/hisi/hisi_ion_smart_pool.c
new file mode 100644
index 000000000000..1217c57bde6f
--- /dev/null
+++ b/drivers/staging/android/ion/hisi/hisi_ion_smart_pool.c
@@ -0,0 +1,346 @@
+
+/*
+ *
+ * Copyright (C) 2016 hisilicon, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "smartpool: " fmt
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/hisi/ion-iommu.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+
+#include "ion.h"
+#include "hisi_ion_smart_pool.h"
+
+/*for pclin*/
+/*lint -save -e846 -e514 -e866 -e30 -e84 -e712 -e701 -e40 -e578 -e528*/
+/*lint -save -e522 -e838 -e737 -e84 -e774 -e845 -e527 -e531 -e702 -e753*/
+/*lint -save -e713 -e732 -e438 -e778 -e708 -e21 -e528 -e756*/
+
+static bool smart_pool_enable = true;
+static int smart_pool_alloc_size;
+
+struct task_struct *smart_pool_thread;
+static wait_queue_head_t smart_pool_wait;
+static unsigned int smart_pool_wait_flag;
+
+int smart_pool_water_mark = 24 * 64 * 4;
+
+static unsigned int smart_pool_orders[] = {9, 8, 4, 2, 0};
+
+static const int smart_pool_num_orders = ARRAY_SIZE(smart_pool_orders);
+
+#define SMART_POOL_MIN(x, y) (((x) < (y)) ? (x) : (y))
+
+bool ion_smart_is_graphic_buffer(struct ion_buffer *buffer)
+{
+ if (NULL == buffer) {
+ pr_err("%s: buffer is NULL!\n", __func__);
+ return false;
+ }
+ return !!(buffer->flags & ION_FLAG_GRAPHIC_BUFFER);
+}
+
+void ion_smart_set_water_mark(int water_mark)
+{
+ smart_pool_water_mark = water_mark;
+}
+
+static int sp_order_to_index(unsigned int order)
+{
+ int i;
+
+ for (i = 0; i < smart_pool_num_orders; i++) {
+ if (order == smart_pool_orders[i])
+ return i;
+ }
+
+ BUG();
+ return -1;
+}
+
+static inline unsigned long sp_order_to_size(int order)
+{
+ return PAGE_SIZE << order;
+}
+
+static int sp_ion_page_pool_total(struct ion_page_pool *pool)
+{
+ int count;
+
+ if (NULL == pool) {
+ pr_err("%s: pool is NULL!\n", __func__);
+ return 0;
+ }
+
+ count = pool->low_count + pool->high_count;
+
+ return count << pool->order;
+}
+
+static int sp_pool_total_pages(struct ion_smart_pool *pool)
+{
+ int i;
+ int count = 0;
+
+ if (NULL == pool) {
+ pr_err("%s: pool is NULL!\n", __func__);
+ return 0;
+ }
+
+ for (i = 0; i < smart_pool_num_orders; i++)
+ count += sp_ion_page_pool_total(pool->pools[i]);
+
+ return count;
+}
+
+void ion_smart_sp_init_page(struct page *page)
+{
+ unsigned long len;
+
+ if (NULL == page) {
+ pr_err("%s: page is NULL!\n", __func__);
+ return;
+ }
+ len = PAGE_SIZE << compound_order(page);
+ memset(page_address(page), 0, len);
+ __flush_dcache_area(page_address(page), len);
+}
+
+static int sp_fill_pool_once(struct ion_page_pool *pool)
+{
+ struct page *page;
+
+ if (NULL == pool) {
+ pr_err("%s: pool is NULL!\n", __func__);
+ return -ENOENT;
+ }
+ page = ion_page_pool_alloc_pages(pool);
+ if (NULL == page)
+ return -ENOMEM;
+ ion_smart_sp_init_page(page);
+ ion_page_pool_free(pool, page);
+
+ return 0;
+}
+
+static int ion_smart_pool_kworkthread(void *p)
+{
+ int i;
+ struct ion_smart_pool *pool;
+ int ret;
+
+ if (NULL == p) {
+ pr_err("%s: p is NULL!\n", __func__);
+ return 0;
+ }
+
+ pool = (struct ion_smart_pool *)p;
+ while (true) {
+ ret = wait_event_interruptible(smart_pool_wait,
+ (smart_pool_wait_flag == 1));
+ if (ret < 0)
+ continue;
+
+ smart_pool_wait_flag = 0;
+ for (i = 0; i < smart_pool_num_orders; i++) {
+ while (sp_pool_total_pages(pool) <
+ smart_pool_water_mark) {
+ if (sp_fill_pool_once(pool->pools[i]) < 0)
+ break;
+ }
+ }
+
+ for (i = 2; i < smart_pool_num_orders; i++) {
+ while (sp_ion_page_pool_total(pool->pools[i]) <
+ LOWORDER_WATER_MASK) {
+ if (sp_fill_pool_once(pool->pools[i]) < 0)
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+struct page *ion_smart_pool_allocate(struct ion_smart_pool *pool,
+ unsigned long size, unsigned int max_order)
+{
+ int i;
+ struct page *page;
+
+ if (NULL == pool) {
+ pr_err("%s: pool is NULL!\n", __func__);
+ return NULL;
+ }
+
+ for (i = 0; i < smart_pool_num_orders; i++) {
+ if (size < sp_order_to_size(smart_pool_orders[i]))
+ continue;
+ if (max_order < smart_pool_orders[i])
+ continue;
+
+ page = ion_page_pool_alloc(pool->pools[i]);
+ if (!page)
+ continue;
+ if (smart_pool_alloc_size) {
+ smart_pool_alloc_size +=
+ PAGE_SIZE << compound_order(page);
+ }
+ return page;
+ }
+
+ return NULL;
+}
+
+void ion_smart_pool_wakeup_process(void)
+{
+ if (!smart_pool_enable)
+ return;
+ smart_pool_wait_flag = 1;
+ wake_up_interruptible(&smart_pool_wait);
+}
+
+void ion_smart_pool_all_free(struct ion_smart_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan)
+{
+ int i;
+
+ if (NULL == pool) {
+ pr_err("%s: smart_pool is NULL!\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < smart_pool_num_orders; i++)
+ ion_page_pool_shrink(pool->pools[i], gfp_mask, nr_to_scan);
+}
+
+int ion_smart_pool_free(struct ion_smart_pool *pool, struct page *page)
+{
+ int order;
+
+ if (!smart_pool_enable) {
+ ion_smart_pool_all_free(pool, __GFP_HIGHMEM, MAX_POOL_SIZE);
+ return -1;
+ }
+ if ((NULL == pool) || (NULL == page)) {
+ pr_err("%s: pool/page is NULL!\n", __func__);
+ return -1;
+ }
+
+ order = compound_order(page);
+
+ if (sp_pool_total_pages(pool) < MAX_POOL_SIZE) {
+ ion_smart_sp_init_page(page);
+ ion_page_pool_free(pool->pools[sp_order_to_index(order)], page);
+ return 0;
+ }
+
+ return -1;
+}
+
+int ion_smart_pool_shrink(struct ion_smart_pool *smart_pool,
+ struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan)
+{
+ int nr_max_free;
+ int nr_to_free;
+ int nr_total = 0;
+
+ if ((NULL == smart_pool) || (NULL == pool)) {
+ pr_err("%s: smartpool/pool is NULL!\n", __func__);
+ return 0;
+ }
+
+ if (nr_to_scan == 0)
+ return ion_page_pool_shrink(pool, gfp_mask, 0);
+
+ nr_max_free = sp_pool_total_pages(smart_pool) -
+ (smart_pool_water_mark + LOWORDER_WATER_MASK);
+ nr_to_free = SMART_POOL_MIN(nr_max_free, nr_to_scan);
+
+ if (nr_to_free <= 0)
+ return 0;
+
+ nr_total = ion_page_pool_shrink(pool, gfp_mask, nr_to_free);
+ return nr_total;
+}
+
+void ion_smart_pool_debug_show_total(struct seq_file *s,
+ struct ion_smart_pool *smart_pool)
+{
+ if ((NULL == s) || (NULL == smart_pool)) {
+ pr_err("%s: s/smart_pool is NULL!\n", __func__);
+ return;
+ }
+ seq_puts(s, "----------------------------------------------------\n");
+ seq_printf(s, "in smart pool = %d total\n",
+ sp_pool_total_pages(smart_pool) * 4 / 1024);
+}
+
+struct ion_smart_pool *ion_smart_pool_create(void)
+{
+ struct ion_smart_pool *smart_pool =
+ kzalloc(sizeof(struct ion_smart_pool) +
+ sizeof(struct ion_page_pool *) * smart_pool_num_orders,
+ GFP_KERNEL);
+ bool graphic_buffer_flag = true;
+
+ if (NULL == smart_pool) {
+ pr_err("%s: smart_pool is NULL!\n", __func__);
+ return NULL;
+ }
+
+ if (ion_system_heap_create_pools(smart_pool->pools,
+ graphic_buffer_flag))
+ goto free_heap;
+
+ init_waitqueue_head(&smart_pool_wait);
+ smart_pool_thread = kthread_run(ion_smart_pool_kworkthread, smart_pool,
+ "%s", "smartpool");
+ if (IS_ERR(smart_pool_thread)) {
+ pr_err("%s: kthread_create failed!\n", __func__);
+ goto destroy_pools;
+ }
+
+ ion_smart_pool_wakeup_process();
+
+ return smart_pool;
+
+destroy_pools:
+ ion_system_heap_destroy_pools(smart_pool->pools);
+
+free_heap:
+ kfree(smart_pool);
+ smart_pool = NULL;
+ return NULL;
+}
+
+module_param_named(debug_smart_pool_enable, smart_pool_enable, bool, 0644);
+MODULE_PARM_DESC(debug_smart_pool_enable, "enable smart pool");
+
+module_param_named(debug_smart_pool_alloc_size, smart_pool_alloc_size, int,
+ 0644);
+MODULE_PARM_DESC(debug_smart_pool_alloc_size, "alloc size from smartpool");
+/*lint -restore*/
diff --git a/drivers/staging/android/ion/hisi/hisi_ion_smart_pool.h b/drivers/staging/android/ion/hisi/hisi_ion_smart_pool.h
new file mode 100644
index 000000000000..1fac41d482af
--- /dev/null
+++ b/drivers/staging/android/ion/hisi/hisi_ion_smart_pool.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 Hislicon, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_SMART_POOL_H
+#define _ION_SMART_POOL_H
+
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include "ion_priv.h"
+
+#define LOWORDER_WATER_MASK (64*4)
+#define MAX_POOL_SIZE (128*64*4)
+
+struct ion_smart_pool {
+ struct ion_page_pool *pools[0];
+};
+bool ion_smart_is_graphic_buffer(struct ion_buffer *buffer);
+void ion_smart_pool_debug_show_total(struct seq_file *s,
+ struct ion_smart_pool *smart_pool);
+void ion_smart_sp_init_page(struct page *page);
+struct page *ion_smart_pool_allocate(struct ion_smart_pool *pool,
+ unsigned long size,
+ unsigned int max_order);
+int ion_smart_pool_free(struct ion_smart_pool *pool, struct page *page);
+int ion_smart_pool_shrink(struct ion_smart_pool *smart_pool,
+ struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan);
+struct ion_smart_pool *ion_smart_pool_create(void);
+void ion_smart_pool_wakeup_process(void);
+void ion_smart_set_water_mark(int water_mark);
+#endif /* _ION_SMART_POOL_H */
diff --git a/drivers/staging/android/ion/hisi/of_hisi_ion.c b/drivers/staging/android/ion/hisi/of_hisi_ion.c
new file mode 100644
index 000000000000..b9d2c886d677
--- /dev/null
+++ b/drivers/staging/android/ion/hisi/of_hisi_ion.c
@@ -0,0 +1,413 @@
+/*
+ * Hisilicon hisi ION Driver
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ *
+ * Author: Chen Feng <puck.chen@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "Ion: " fmt
+
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/hisi/hisi_ion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/sched.h>
+#include <linux/rwsem.h>
+#include <linux/uaccess.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <linux/compat.h>
+#include <linux/sizes.h>
+#include <ion_priv.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of_fdt.h>
+#include <asm/cputype.h>
+
+
+#define MAX_HISI_ION_DYNAMIC_AREA_NAME_LEN 64
+struct hisi_ion_dynamic_area {
+ phys_addr_t base;
+ unsigned long size;
+ char name[MAX_HISI_ION_DYNAMIC_AREA_NAME_LEN];
+};
+
+struct hisi_ion_type_table {
+ const char *name;
+ enum ion_heap_type type;
+};
+
+static const struct hisi_ion_type_table ion_type_table[] = {
+ {"ion_system", ION_HEAP_TYPE_SYSTEM},
+ {"ion_system_contig", ION_HEAP_TYPE_SYSTEM_CONTIG},
+ {"ion_carveout", ION_HEAP_TYPE_CARVEOUT},
+ {"ion_chunk", ION_HEAP_TYPE_CHUNK},
+ {"ion_dma", ION_HEAP_TYPE_DMA},
+ {"ion_custom", ION_HEAP_TYPE_CUSTOM},
+};
+
+static struct ion_device *idev;
+static int num_heaps;
+static struct ion_heap **heaps;
+static struct ion_platform_heap **heaps_data;
+
+
+#define MAX_HISI_ION_DYNAMIC_AREA_NUM 5
+static struct hisi_ion_dynamic_area ion_dynamic_area_table[MAX_HISI_ION_DYNAMIC_AREA_NUM];
+static int ion_dynamic_area_count = 0;
+
+static int add_dynamic_area(phys_addr_t base, unsigned long len, const char* name)
+{
+ int ret = 0;
+ int i = ion_dynamic_area_count;
+
+ if (i < MAX_HISI_ION_DYNAMIC_AREA_NUM) {
+ ion_dynamic_area_table[i].base = base;
+ ion_dynamic_area_table[i].size = len;
+ strncpy(ion_dynamic_area_table[i].name, name,
+ MAX_HISI_ION_DYNAMIC_AREA_NAME_LEN-1);
+ ion_dynamic_area_table[i].name[MAX_HISI_ION_DYNAMIC_AREA_NAME_LEN-1] = '\0';
+ pr_err("insert heap-name %s \n", ion_dynamic_area_table[i].name);
+
+ ion_dynamic_area_count ++;
+
+ return ret;
+
+ }
+
+ return -EFAULT;
+}
+
+static struct hisi_ion_dynamic_area* find_dynamic_area_by_name(const char* name)
+{
+ int i = 0;
+
+ if (!name) {
+ return NULL;
+ }
+
+ for (; i < MAX_HISI_ION_DYNAMIC_AREA_NUM; i++) {
+ pr_err("name = %s, table name %s \n", name, ion_dynamic_area_table[i].name);
+ if (!strcmp(name, ion_dynamic_area_table[i].name)) {
+ return &ion_dynamic_area_table[i];
+ }
+ }
+
+ return NULL;
+}
+
+static int __init hisi_ion_reserve_area(struct reserved_mem *rmem)
+{
+ char *status = NULL;
+ int namesize = 0;
+ const char* heapname;
+
+ status = (char *)of_get_flat_dt_prop(rmem->fdt_node, "status", NULL);
+ if (status && (strncmp(status, "ok", strlen("ok")) != 0))
+ return 0;
+
+ heapname = of_get_flat_dt_prop(rmem->fdt_node, "heap-name", &namesize);
+ if (!heapname || (namesize <= 0)) {
+ pr_err("no 'heap-name' property namesize=%d\n", namesize);
+ return -EFAULT;
+ }
+
+ pr_info("base 0x%llx, size is 0x%llx, node name %s, heap-name %s namesize %d,"
+ "[%d][%d][%d][%d]\n",
+ rmem->base, rmem->size, rmem->name, heapname, namesize,
+ heapname[0],heapname[1],heapname[2],heapname[3] );
+
+ if (add_dynamic_area(rmem->base, rmem->size, heapname)) {
+ pr_err("fail to add to dynamic area \n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(hisi_ion, "hisi_ion", hisi_ion_reserve_area);
+
+struct ion_device *get_ion_device(void) {
+ return idev;
+}
+
+static void ion_pm_init(void)
+{
+ return;
+}
+
+void ion_flush_all_cpus_caches(void)
+{
+ return;
+}
+
+struct ion_client *hisi_ion_client_create(const char *name)
+{
+ return ion_client_create(idev, name);
+}
+EXPORT_SYMBOL(hisi_ion_client_create);
+
+static long hisi_ion_custom_ioctl(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ return ret;
+}
+
+extern int hisi_ion_enable_iommu(struct platform_device *pdev);
+
+static int get_type_by_name(const char *name, enum ion_heap_type *type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ion_type_table); i++) {
+ if (strcmp(name, ion_type_table[i].name))
+ continue;
+
+ *type = ion_type_table[i].type;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hisi_set_platform_data(struct platform_device *pdev)
+{
+ unsigned int base = 0;
+ unsigned int size = 0;
+ unsigned int id = 0;
+ const char *heap_name;
+ const char *type_name;
+ const char *status;
+ enum ion_heap_type type = 0;
+ int ret = 0;
+ struct device_node *np;
+ struct device_node *phandle_node;
+ struct property *prop;
+ struct ion_platform_heap *p_data;
+ const struct device_node *dt_node = pdev->dev.of_node;
+ int index = 0;
+
+ for_each_child_of_node(dt_node, np)
+ num_heaps++;
+
+ heaps_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct ion_platform_heap *) *
+ num_heaps,
+ GFP_KERNEL);
+ if (!heaps_data)
+ return -ENOMEM;
+
+ for_each_child_of_node(dt_node, np) {
+ ret = of_property_read_string(np, "status", &status);
+ if (!ret) {
+ if (strncmp("ok", status, strlen("ok")))
+ continue;
+ }
+
+ phandle_node = of_parse_phandle(np, "heap-name", 0);
+ if (phandle_node) {
+ int len;
+
+ ret = of_property_read_string(phandle_node, "status", &status);
+ if (!ret) {
+ if (strncmp("ok", status, strlen("ok")))
+ continue;
+ }
+
+ prop = of_find_property(phandle_node, "heap-name", &len);
+ if (!prop) {
+ pr_err("no heap-name in phandle of node %s\n", np->name);
+ continue;
+ }
+
+ if (!prop->value || !prop->length) {
+ pr_err("%s %s %d, node %s, invalid phandle, value=%p,length=%d\n",
+ __FILE__,__FUNCTION__,__LINE__,
+ np->name, prop->value, prop->length );
+ continue;
+ } else {
+ heap_name = prop->value;
+ }
+ } else {
+ ret = of_property_read_string(np, "heap-name", &heap_name);
+ if (ret < 0) {
+ pr_err("invalid heap-name in node %s, please check the name \n", np->name);
+ continue;
+ }
+
+ }
+
+ pr_err("node name [%s], heap-name [%s]\n", np->name, heap_name);
+
+ ret = of_property_read_u32(np, "heap-id", &id);
+ if (ret < 0) {
+ pr_err("check the id %s\n", np->name);
+ continue;
+ }
+
+ ret = of_property_read_u32(np, "heap-base", &base);
+ if (ret < 0) {
+ pr_err("check the base of node %s\n", np->name);
+ continue;
+ }
+
+ ret = of_property_read_u32(np, "heap-size", &size);
+ if (ret < 0) {
+ pr_err("check the size of node %s\n", np->name);
+ continue;
+ }
+
+ ret = of_property_read_string(np, "heap-type", &type_name);
+ if (ret < 0) {
+ pr_err("check the type of node %s\n", np->name);
+ continue;
+ }
+
+ ret = get_type_by_name(type_name, &type);
+ if (ret < 0) {
+ pr_err("type name error %s!\n", type_name);
+ continue;
+ }
+ pr_err("heap index %d : name %s base 0x%x size 0x%x id %d type %d\n",
+ index, heap_name, base, size, id, type);
+
+ p_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct ion_platform_heap),
+ GFP_KERNEL);
+ if (!p_data)
+ return -ENOMEM;
+
+ p_data->name = heap_name;
+ p_data->base = base;
+ p_data->size = size;
+ p_data->id = id;
+ p_data->type = type;
+ p_data->priv = (void *)&pdev->dev;
+
+ if (!p_data->base && !p_data->size) {
+ struct hisi_ion_dynamic_area* area = NULL;
+ pr_err("heap %s base =0, try to find dynamic area \n", p_data->name);
+ area = find_dynamic_area_by_name(p_data->name);
+ if (area) {
+ p_data->base = area->base;
+ p_data->size = area->size;
+ pr_err("have found heap name %s base = 0x%lx, size %zu\n",
+ p_data->name,
+ p_data->base, p_data->size);
+ }
+ }
+
+ heaps_data[index] = p_data;
+ index++;
+ }
+ num_heaps = index;
+ return 0;
+}
+
+static int hisi_ion_probe(struct platform_device *pdev)
+{
+ int i;
+ int err;
+ static struct ion_platform_heap *p_heap;
+
+ idev = ion_device_create(hisi_ion_custom_ioctl);
+ err = hisi_set_platform_data(pdev);
+ if (err) {
+ pr_err("ion set platform data error!\n");
+ goto err_free_idev;
+ }
+ heaps = devm_kzalloc(&pdev->dev,
+ sizeof(struct ion_heap *) * num_heaps,
+ GFP_KERNEL);
+ if (!heaps) {
+ err = -ENOMEM;
+ goto err_free_idev;
+ }
+
+ /* FIXME will move to iommu driver*/
+ if (hisi_ion_enable_iommu(pdev)) {
+ dev_info(&pdev->dev, "enable iommu fail \n");
+ err = -EINVAL;
+ goto err_free_idev;
+ }
+ ion_pm_init();
+
+ /*
+ * create the heaps as specified in the dts file
+ */
+ for (i = 0; i < num_heaps; i++) {
+ p_heap = heaps_data[i];
+
+ pr_info("id %d name %s base %lu size %lu\n",
+ i, p_heap->name, p_heap->base, p_heap->size);
+
+ heaps[i] = ion_heap_create(p_heap);
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ pr_err("error add %s of type %d with %lx@%lx\n",
+ p_heap->name, p_heap->type,
+ p_heap->base, (unsigned long)p_heap->size);
+ continue;
+ }
+
+ ion_device_add_heap(idev, heaps[i]);
+
+ pr_info("adding heap %s of type %d with %lx@%lx\n",
+ p_heap->name, p_heap->type,
+ p_heap->base, (unsigned long)p_heap->size);
+ }
+ return 0;
+
+err_free_idev:
+ ion_device_destroy(idev);
+
+ return err;
+}
+
+static int hisi_ion_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < num_heaps; i++) {
+ ion_heap_destroy(heaps[i]);
+ heaps[i] = NULL;
+ }
+ ion_device_destroy(idev);
+
+ return 0;
+}
+
+static const struct of_device_id hisi_ion_match_table[] = {
+ {.compatible = "hisilicon,hisi-ion"},
+ {},
+};
+
+static struct platform_driver hisi_ion_driver = {
+ .probe = hisi_ion_probe,
+ .remove = hisi_ion_remove,
+ .driver = {
+ .name = "ion-hisi",
+ .of_match_table = hisi_ion_match_table,
+ },
+};
+
+static int __init hisi_ion_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&hisi_ion_driver);
+ return ret;
+}
+
+subsys_initcall(hisi_ion_init);
diff --git a/drivers/staging/android/ion/hisilicon/Kconfig b/drivers/staging/android/ion/hisilicon/Kconfig
deleted file mode 100644
index 2b4bd0798290..000000000000
--- a/drivers/staging/android/ion/hisilicon/Kconfig
+++ /dev/null
@@ -1,5 +0,0 @@
-config HI6220_ION
- bool "Hi6220 ION Driver"
- depends on ARCH_HISI && ION
- help
- Build the Hisilicon Hi6220 ion driver.
diff --git a/drivers/staging/android/ion/hisilicon/Makefile b/drivers/staging/android/ion/hisilicon/Makefile
index 2a89414280ac..f123a57ce456 100644
--- a/drivers/staging/android/ion/hisilicon/Makefile
+++ b/drivers/staging/android/ion/hisilicon/Makefile
@@ -1 +1,2 @@
-obj-$(CONFIG_HI6220_ION) += hi6220_ion.o
+ccflags-y += -I$(srctree)/drivers/staging/android
+obj-y += hisi_ion.o
diff --git a/drivers/staging/android/ion/hisilicon/hi6220_ion.c b/drivers/staging/android/ion/hisilicon/hi6220_ion.c
deleted file mode 100644
index 0de7897fd4bf..000000000000
--- a/drivers/staging/android/ion/hisilicon/hi6220_ion.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Hisilicon Hi6220 ION Driver
- *
- * Copyright (c) 2015 Hisilicon Limited.
- *
- * Author: Chen Feng <puck.chen@hisilicon.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) "Ion: " fmt
-
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/mm.h>
-#include "../ion_priv.h"
-#include "../ion.h"
-#include "../ion_of.h"
-
-struct hisi_ion_dev {
- struct ion_heap **heaps;
- struct ion_device *idev;
- struct ion_platform_data *data;
-};
-
-static struct ion_of_heap hisi_heaps[] = {
- PLATFORM_HEAP("hisilicon,sys_user", 0,
- ION_HEAP_TYPE_SYSTEM, "sys_user"),
- PLATFORM_HEAP("hisilicon,sys_contig", 1,
- ION_HEAP_TYPE_SYSTEM_CONTIG, "sys_contig"),
- PLATFORM_HEAP("hisilicon,cma", ION_HEAP_TYPE_DMA, ION_HEAP_TYPE_DMA,
- "cma"),
- {}
-};
-
-static int hi6220_ion_probe(struct platform_device *pdev)
-{
- struct hisi_ion_dev *ipdev;
- int i;
-
- ipdev = devm_kzalloc(&pdev->dev, sizeof(*ipdev), GFP_KERNEL);
- if (!ipdev)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ipdev);
-
- ipdev->idev = ion_device_create(NULL);
- if (IS_ERR(ipdev->idev))
- return PTR_ERR(ipdev->idev);
-
- ipdev->data = ion_parse_dt(pdev, hisi_heaps);
- if (IS_ERR(ipdev->data))
- return PTR_ERR(ipdev->data);
-
- ipdev->heaps = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_heap) * ipdev->data->nr,
- GFP_KERNEL);
- if (!ipdev->heaps) {
- ion_destroy_platform_data(ipdev->data);
- return -ENOMEM;
- }
-
- for (i = 0; i < ipdev->data->nr; i++) {
- ipdev->heaps[i] = ion_heap_create(&ipdev->data->heaps[i]);
- if (!ipdev->heaps) {
- ion_destroy_platform_data(ipdev->data);
- return -ENOMEM;
- }
- ion_device_add_heap(ipdev->idev, ipdev->heaps[i]);
- }
- return 0;
-}
-
-static int hi6220_ion_remove(struct platform_device *pdev)
-{
- struct hisi_ion_dev *ipdev;
- int i;
-
- ipdev = platform_get_drvdata(pdev);
-
- for (i = 0; i < ipdev->data->nr; i++)
- ion_heap_destroy(ipdev->heaps[i]);
-
- ion_destroy_platform_data(ipdev->data);
- ion_device_destroy(ipdev->idev);
-
- return 0;
-}
-
-static const struct of_device_id hi6220_ion_match_table[] = {
- {.compatible = "hisilicon,hi6220-ion"},
- {},
-};
-
-static struct platform_driver hi6220_ion_driver = {
- .probe = hi6220_ion_probe,
- .remove = hi6220_ion_remove,
- .driver = {
- .name = "ion-hi6220",
- .of_match_table = hi6220_ion_match_table,
- },
-};
-
-static int __init hi6220_ion_init(void)
-{
- return platform_driver_register(&hi6220_ion_driver);
-}
-
-subsys_initcall(hi6220_ion_init);
diff --git a/drivers/staging/android/ion/hisilicon/hisi_ion.c b/drivers/staging/android/ion/hisilicon/hisi_ion.c
new file mode 100644
index 000000000000..05964ef7ff5f
--- /dev/null
+++ b/drivers/staging/android/ion/hisilicon/hisi_ion.c
@@ -0,0 +1,266 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt) "ion: " fmt
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/hisi_ion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+#include "../ion_priv.h"
+
+struct hisi_ion_name_id_table {
+ const char *name;
+ unsigned int id;
+};
+
+static struct hisi_ion_name_id_table name_id_table[] = {
+ {"fb", ION_FB_HEAP_ID},
+ {"vpu", ION_VPU_HEAP_ID},
+ {"jpu", ION_JPU_HEAP_ID},
+ {"gralloc-carveout", ION_GRALLOC_HEAP_ID},
+ {"overlay", ION_OVERLAY_HEAP_ID},
+ {"sys_user", ION_SYSTEM_HEAP_ID},
+ {"sys_contig", ION_SYSTEM_CONTIG_HEAP_ID},
+ {"cma", ION_HEAP_TYPE_DMA},
+};
+
+struct hisi_ion_type_id_table {
+ const char *name;
+ enum ion_heap_type type;
+};
+
+static struct hisi_ion_type_id_table type_id_table[] = {
+ {"ion_system_contig", ION_HEAP_TYPE_SYSTEM_CONTIG},
+ {"ion_system", ION_HEAP_TYPE_SYSTEM},
+ {"ion_carveout", ION_HEAP_TYPE_CARVEOUT},
+ {"ion_chunk", ION_HEAP_TYPE_CHUNK},
+ {"ion_dma", ION_HEAP_TYPE_DMA},
+ {"ion_custom", ION_HEAP_TYPE_CUSTOM},
+ {"ion_cma", ION_HEAP_TYPE_DMA},
+};
+
+#define HISI_ION_HEAP_NUM 16
+
+static struct ion_platform_data hisi_ion_platform_data = {0};
+static struct ion_platform_heap hisi_ion_platform_heap[HISI_ION_HEAP_NUM] = {{0} };
+
+static struct ion_device *hisi_ion_device;
+static struct ion_heap *hisi_ion_heap[HISI_ION_HEAP_NUM] = {NULL};
+
+int hisi_ion_get_heap_info(unsigned int id, struct ion_heap_info_data *data)
+{
+ int i;
+
+ BUG_ON(!data);
+
+ for (i = 0; i < hisi_ion_platform_data.nr; i++) {
+ if (hisi_ion_platform_heap[i].id == id) {
+ data->heap_phy = hisi_ion_platform_heap[i].base;
+ data->heap_size = hisi_ion_platform_heap[i].size;
+ strncpy((void *)data->name, (void *)hisi_ion_platform_heap[i].name, HISI_ION_NAME_LEN);
+ pr_info("heap info : id %d name %s phy 0x%llx size %u\n",
+ id, data->name, data->heap_phy, data->heap_size);
+ return 0;
+ }
+ }
+ pr_err("in %s please check the id %d\n", __func__, id);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(hisi_ion_get_heap_info);
+
+struct ion_device *get_ion_device(void)
+{
+ return hisi_ion_device;
+}
+EXPORT_SYMBOL(get_ion_device);
+
+static int get_id_by_name(const char *name, unsigned int *id)
+{
+ int i, n;
+
+ n = sizeof(name_id_table)/sizeof(name_id_table[0]);
+ for (i = 0; i < n; i++) {
+ if (strncmp(name, name_id_table[i].name, HISI_ION_NAME_LEN))
+ continue;
+
+ *id = name_id_table[i].id;
+ return 0;
+ }
+ return -1;
+}
+
+static int get_type_by_name(const char *name, enum ion_heap_type *type)
+{
+ int i, n;
+
+ n = sizeof(type_id_table)/sizeof(type_id_table[0]);
+ for (i = 0; i < n; i++) {
+ if (strncmp(name, type_id_table[i].name, HISI_ION_NAME_LEN))
+ continue;
+
+ *type = type_id_table[i].type;
+ return 0;
+ }
+
+ return -1;
+}
+
+static u64 hisi_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device ion_cma_device = {
+ .name = "ion-cma-device",
+ .id = -1,
+ .dev = {
+ .dma_mask = &hisi_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
+};
+
+static int hisi_ion_setup_platform_data(struct platform_device *dev)
+{
+ struct device_node *node, *np;
+ const char *heap_name;
+ const char *type_name;
+ unsigned int id;
+ unsigned int range[2] = {0, 0};
+ enum ion_heap_type type;
+ int ret;
+ int index = 0;
+
+ node = dev->dev.of_node;
+ for_each_child_of_node(node, np) {
+ ret = of_property_read_string(np, "heap-name", &heap_name);
+ if (ret < 0) {
+ pr_err("in node %s please check the name property of node %s\n", __func__, np->name);
+ continue;
+ }
+
+ ret = get_id_by_name(heap_name, &id);
+ if (ret < 0) {
+ pr_err("in node %s please check the name %s\n", __func__, heap_name);
+ continue;
+ }
+
+ ret = of_property_read_u32_array(np, "heap-range", range, ARRAY_SIZE(range));
+ if (ret < 0) {
+ pr_err("in node %s please check the range property of node %s\n", __func__, np->name);
+ continue;
+ }
+
+
+ ret = of_property_read_string(np, "heap-type", &type_name);
+ if (ret < 0) {
+ pr_err("in node %s please check the type property of node %s\n", __func__, np->name);
+ continue;
+ }
+
+ ret = get_type_by_name(type_name, &type);
+ if (ret < 0) {
+ pr_err("in node %s please check the type %s\n", __func__, type_name);
+ continue;
+ }
+
+ hisi_ion_platform_heap[index].name = heap_name;
+ hisi_ion_platform_heap[index].base = range[0];
+ hisi_ion_platform_heap[index].size = range[1];
+ hisi_ion_platform_heap[index].id = id;
+ hisi_ion_platform_heap[index].type = type;
+ if (type == ION_HEAP_TYPE_DMA) {
+ // ion_cma_device.dev.archdata.dma_ops = swiotlb_dma_ops;
+ hisi_ion_platform_heap[index].priv =
+ (void *)&ion_cma_device.dev;
+ }
+ index++;
+ }
+
+ hisi_ion_platform_data.nr = index;
+ hisi_ion_platform_data.heaps = hisi_ion_platform_heap;
+
+ return 0;
+}
+
+static int hisi_ion_probe(struct platform_device *pdev)
+{
+ int i, err;
+ struct ion_heap *heap;
+ struct ion_platform_heap *heap_data;
+
+ if (hisi_ion_setup_platform_data(pdev)) {
+ pr_err("hisi_ion_setup_platform_data is failed\n");
+ return -EINVAL;
+ }
+
+ hisi_ion_device = ion_device_create(NULL);
+ if (IS_ERR_OR_NULL(hisi_ion_device))
+ return PTR_ERR(hisi_ion_device);
+ /*
+ * create the heaps as specified in the board file
+ */
+ for (i = 0; i < hisi_ion_platform_data.nr; i++) {
+ heap_data = &hisi_ion_platform_data.heaps[i];
+ heap = ion_heap_create(heap_data);
+ if (IS_ERR_OR_NULL(heap)) {
+ err = PTR_ERR(heap);
+ goto out;
+ }
+
+ ion_device_add_heap(hisi_ion_device, heap);
+ hisi_ion_heap[i] = heap;
+ }
+ platform_set_drvdata(pdev, hisi_ion_device);
+
+ return 0;
+out:
+ for (i = 0; i < HISI_ION_HEAP_NUM; i++) {
+ if (!hisi_ion_heap[i])
+ continue;
+ ion_heap_destroy(hisi_ion_heap[i]);
+ hisi_ion_heap[i] = NULL;
+ }
+ return err;
+}
+
+static int hisi_ion_remove(struct platform_device *pdev)
+{
+ int i;
+
+ ion_device_destroy(hisi_ion_device);
+ for (i = 0; i < HISI_ION_HEAP_NUM; i++) {
+ if (!hisi_ion_heap[i])
+ continue;
+ ion_heap_destroy(hisi_ion_heap[i]);
+ hisi_ion_heap[i] = NULL;
+ }
+
+ return 0;
+}
+
+static struct of_device_id hisi_ion_match_table[] = {
+ {.compatible = "hisilicon,ion"},
+ {},
+};
+
+static struct platform_driver hisi_ion_driver = {
+ .probe = hisi_ion_probe,
+ .remove = hisi_ion_remove,
+ .driver = {
+ .name = "ion",
+ .of_match_table = hisi_ion_match_table,
+ },
+};
+
+module_platform_driver(hisi_ion_driver);
diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
deleted file mode 100644
index 7e7431d8d49f..000000000000
--- a/drivers/staging/android/ion/ion-ioctl.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-
-#include "ion.h"
-#include "ion_priv.h"
-#include "compat_ion.h"
-
-union ion_ioctl_arg {
- struct ion_fd_data fd;
- struct ion_allocation_data allocation;
- struct ion_handle_data handle;
- struct ion_custom_data custom;
- struct ion_heap_query query;
-};
-
-static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
-{
- int ret = 0;
-
- switch (cmd) {
- case ION_IOC_HEAP_QUERY:
- ret = arg->query.reserved0 != 0;
- ret |= arg->query.reserved1 != 0;
- ret |= arg->query.reserved2 != 0;
- break;
- default:
- break;
- }
-
- return ret ? -EINVAL : 0;
-}
-
-/* fix up the cases where the ioctl direction bits are incorrect */
-static unsigned int ion_ioctl_dir(unsigned int cmd)
-{
- switch (cmd) {
- case ION_IOC_SYNC:
- case ION_IOC_FREE:
- case ION_IOC_CUSTOM:
- return _IOC_WRITE;
- default:
- return _IOC_DIR(cmd);
- }
-}
-
-long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct ion_client *client = filp->private_data;
- struct ion_device *dev = client->dev;
- struct ion_handle *cleanup_handle = NULL;
- int ret = 0;
- unsigned int dir;
- union ion_ioctl_arg data;
-
- dir = ion_ioctl_dir(cmd);
-
- if (_IOC_SIZE(cmd) > sizeof(data))
- return -EINVAL;
-
- /*
- * The copy_from_user is unconditional here for both read and write
- * to do the validate. If there is no write for the ioctl, the
- * buffer is cleared
- */
- if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
- return -EFAULT;
-
- ret = validate_ioctl_arg(cmd, &data);
- if (WARN_ON_ONCE(ret))
- return ret;
-
- if (!(dir & _IOC_WRITE))
- memset(&data, 0, sizeof(data));
-
- switch (cmd) {
- case ION_IOC_ALLOC:
- {
- struct ion_handle *handle;
-
- handle = ion_alloc(client, data.allocation.len,
- data.allocation.align,
- data.allocation.heap_id_mask,
- data.allocation.flags);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- data.allocation.handle = handle->id;
-
- cleanup_handle = handle;
- break;
- }
- case ION_IOC_FREE:
- {
- struct ion_handle *handle;
-
- mutex_lock(&client->lock);
- handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
- if (IS_ERR(handle)) {
- mutex_unlock(&client->lock);
- return PTR_ERR(handle);
- }
- ion_free_nolock(client, handle);
- ion_handle_put_nolock(handle);
- mutex_unlock(&client->lock);
- break;
- }
- case ION_IOC_SHARE:
- case ION_IOC_MAP:
- {
- struct ion_handle *handle;
-
- handle = ion_handle_get_by_id(client, data.handle.handle);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- data.fd.fd = ion_share_dma_buf_fd(client, handle);
- ion_handle_put(handle);
- if (data.fd.fd < 0)
- ret = data.fd.fd;
- break;
- }
- case ION_IOC_IMPORT:
- {
- struct ion_handle *handle;
-
- handle = ion_import_dma_buf_fd(client, data.fd.fd);
- if (IS_ERR(handle))
- ret = PTR_ERR(handle);
- else
- data.handle.handle = handle->id;
- break;
- }
- case ION_IOC_SYNC:
- {
- ret = ion_sync_for_device(client, data.fd.fd);
- break;
- }
- case ION_IOC_CUSTOM:
- {
- if (!dev->custom_ioctl)
- return -ENOTTY;
- ret = dev->custom_ioctl(client, data.custom.cmd,
- data.custom.arg);
- break;
- }
- case ION_IOC_HEAP_QUERY:
- ret = ion_query_heaps(client, &data.query);
- break;
- default:
- return -ENOTTY;
- }
-
- if (dir & _IOC_READ) {
- if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
- if (cleanup_handle)
- ion_free(client, cleanup_handle);
- return -EFAULT;
- }
- }
- return ret;
-}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 209a8f7ef02b..1f83162ab1c9 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -36,11 +36,91 @@
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/idr.h>
-
+#include <asm/cacheflush.h>
+#include <linux/iommu.h>
+#include <linux/proc_fs.h>
+#include <linux/hisi/hisi_ion.h>
+#include <linux/hisi/ion-iommu.h>
+#include <linux/atomic.h>
#include "ion.h"
#include "ion_priv.h"
#include "compat_ion.h"
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev: the actual misc device
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
+ * @heaps: list of all the heaps in the system
+ * @user_clients: list of all the clients created from userspace
+ */
+struct ion_device {
+ struct miscdevice dev;
+ struct rb_root buffers;
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
+ struct plist_head heaps;
+ long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
+ unsigned long arg);
+ struct rb_root clients;
+ struct dentry *debug_root;
+ struct dentry *heaps_debug_root;
+ struct dentry *clients_debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node: node in the tree of all clients
+ * @dev: backpointer to ion device
+ * @handles: an rb tree of all the handles in this client
+ * @idr: an idr space for allocating handle ids
+ * @lock: lock protecting the tree of handles
+ * @name: used for debugging
+ * @display_name: used for debugging (unique version of @name)
+ * @display_serial: used for debugging (to make display_name unique)
+ * @task: used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+ struct rb_node node;
+ struct ion_device *dev;
+ struct rb_root handles;
+ struct idr idr;
+ struct mutex lock;
+ const char *name;
+ char *display_name;
+ int display_serial;
+ struct task_struct *task;
+ pid_t pid;
+ struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref: reference count
+ * @client: back pointer to the client the buffer resides in
+ * @buffer: pointer to the buffer
+ * @node: node in the client's handle rbtree
+ * @kmap_cnt: count of times this client has mapped to kernel
+ * @id: client-unique id allocated by client->idr
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client. Other fields are never changed after initialization.
+ */
+struct ion_handle {
+ struct kref ref;
+ struct ion_client *client;
+ struct ion_buffer *buffer;
+ struct rb_node node;
+ unsigned int kmap_cnt;
+ int id;
+ int import;
+};
+
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
return (buffer->flags & ION_FLAG_CACHED) &&
@@ -100,23 +180,24 @@ static void ion_buffer_add(struct ion_device *dev,
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
- struct ion_device *dev,
- unsigned long len,
- unsigned long align,
- unsigned long flags)
+ struct ion_device *dev,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
{
struct ion_buffer *buffer;
struct sg_table *table;
struct scatterlist *sg;
int i, ret;
- buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
buffer->heap = heap;
buffer->flags = flags;
kref_init(&buffer->ref);
+ buffer->iommu_map = NULL;
ret = heap->ops->allocate(heap, buffer, len, align, flags);
@@ -131,16 +212,19 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
goto err2;
}
- if (buffer->sg_table == NULL) {
- WARN_ONCE(1, "This heap needs to set the sgtable");
+ buffer->dev = dev;
+ buffer->size = len;
+
+ table = heap->ops->map_dma(heap, buffer);
+ if (WARN_ONCE(table == NULL,
+ "heap->ops->map_dma should return ERR_PTR on error"))
+ table = ERR_PTR(-EINVAL);
+ if (IS_ERR(table)) {
ret = -EINVAL;
goto err1;
}
- table = buffer->sg_table;
- buffer->dev = dev;
- buffer->size = len;
-
+ buffer->sg_table = table;
if (ion_buffer_fault_user_mappings(buffer)) {
int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
struct scatterlist *sg;
@@ -149,7 +233,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
if (!buffer->pages) {
ret = -ENOMEM;
- goto err1;
+ goto err;
}
for_each_sg(table->sgl, sg, table->nents, i) {
@@ -183,6 +267,8 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
mutex_unlock(&dev->buffer_lock);
return buffer;
+err:
+ heap->ops->unmap_dma(heap, buffer);
err1:
heap->ops->free(buffer);
err2:
@@ -192,8 +278,16 @@ err2:
void ion_buffer_destroy(struct ion_buffer *buffer)
{
+ if (buffer->iommu_map) {
+ pr_info("%s: iommu map not released, do unmap now!\n",
+ __func__);
+ buffer->heap->ops->unmap_iommu(buffer->iommu_map);
+ kfree(buffer->iommu_map);
+ buffer->iommu_map = NULL;
+ }
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
vfree(buffer->pages);
kfree(buffer);
@@ -257,11 +351,11 @@ static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
}
static struct ion_handle *ion_handle_create(struct ion_client *client,
- struct ion_buffer *buffer)
+ struct ion_buffer *buffer)
{
struct ion_handle *handle;
- handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
if (!handle)
return ERR_PTR(-ENOMEM);
kref_init(&handle->ref);
@@ -270,6 +364,7 @@ static struct ion_handle *ion_handle_create(struct ion_client *client,
ion_buffer_get(buffer);
ion_buffer_add_to_handle(buffer);
handle->buffer = buffer;
+ handle->import = 0;
return handle;
}
@@ -297,14 +392,23 @@ static void ion_handle_destroy(struct kref *kref)
kfree(handle);
}
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+ return handle->buffer;
+}
+
static void ion_handle_get(struct ion_handle *handle)
{
kref_get(&handle->ref);
}
-int ion_handle_put_nolock(struct ion_handle *handle)
+static int ion_handle_put_nolock(struct ion_handle *handle)
{
- return kref_put(&handle->ref, ion_handle_destroy);
+ int ret;
+
+ ret = kref_put(&handle->ref, ion_handle_destroy);
+
+ return ret;
}
int ion_handle_put(struct ion_handle *handle)
@@ -337,8 +441,8 @@ static struct ion_handle *ion_handle_lookup(struct ion_client *client,
return ERR_PTR(-EINVAL);
}
-struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
- int id)
+static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+ int id)
{
struct ion_handle *handle;
@@ -350,7 +454,7 @@ struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
}
struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id)
+ int id)
{
struct ion_handle *handle;
@@ -419,10 +523,20 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
*/
len = PAGE_ALIGN(len);
- if (!len)
+ if (!len) {
+ pr_err("%s: illegal len: 0x%lx\n", __func__, len);
return ERR_PTR(-EINVAL);
+ }
+ if (len > SZ_128M) {
+ pr_err("%s: size more than 32M(0x%lx), pid: %d, process name: %s\n",
+ __func__, len, current->pid, current->comm);
+ }
down_read(&dev->lock);
+
+ if ((heap_id_mask == 0x1 << ION_DRM_VCODEC_HEAP_ID) ||
+ (heap_id_mask == 0x1 << ION_DRM_GRALLOC_HEAP_ID))
+ heap_id_mask = 0x1 << ION_DRM_HEAP_ID;
plist_for_each_entry(heap, &dev->heaps, node) {
/* if the caller didn't specify this heap id */
if (!((1 << heap->id) & heap_id_mask))
@@ -462,10 +576,15 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
}
EXPORT_SYMBOL(ion_alloc);
-void ion_free_nolock(struct ion_client *client,
- struct ion_handle *handle)
+static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
{
- if (!ion_handle_validate(client, handle)) {
+ bool valid_handle;
+
+ BUG_ON(client != handle->client);
+
+ valid_handle = ion_handle_validate(client, handle);
+
+ if (!valid_handle) {
WARN(1, "%s: invalid handle passed to free.\n", __func__);
return;
}
@@ -482,6 +601,32 @@ void ion_free(struct ion_client *client, struct ion_handle *handle)
}
EXPORT_SYMBOL(ion_free);
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_buffer *buffer;
+ int ret;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+
+ if (!buffer->heap->ops->phys) {
+ pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
+ __func__, buffer->heap->name, buffer->heap->type);
+ mutex_unlock(&client->lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&client->lock);
+ ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+ return ret;
+}
+EXPORT_SYMBOL(ion_phys);
+
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
void *vaddr;
@@ -492,7 +637,7 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
}
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
if (WARN_ONCE(vaddr == NULL,
- "heap->ops->map_kernel should return ERR_PTR on error"))
+ "heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
if (IS_ERR(vaddr))
return vaddr;
@@ -582,34 +727,158 @@ void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
}
EXPORT_SYMBOL(ion_unmap_kernel);
-static struct mutex debugfs_mutex;
-static struct rb_root *ion_root_client;
-static int is_client_alive(struct ion_client *client)
+static int do_iommu_map(struct ion_buffer *buffer,
+ struct iommu_map_format *format)
{
- struct rb_node *node;
- struct ion_client *tmp;
- struct ion_device *dev;
+ struct ion_iommu_map *map;
+ int ret = 0;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ map->format.prot = format->prot;
+ /* set tile format info */
+ map->format.is_tile = format->is_tile;
+ if (map->format.is_tile) {
+ map->format.phys_page_line = format->phys_page_line;
+ map->format.virt_page_line = format->virt_page_line;
+ map->format.header_size = format->header_size;
+ }
- node = ion_root_client->rb_node;
- dev = container_of(ion_root_client, struct ion_device, clients);
+ /* do iommu map */
+ ret = buffer->heap->ops->map_iommu(buffer, map);
+ if (ret) {
+ kfree(map);
+ return ret;
+ }
- down_read(&dev->lock);
- while (node) {
- tmp = rb_entry(node, struct ion_client, node);
- if (client < tmp) {
- node = node->rb_left;
- } else if (client > tmp) {
- node = node->rb_right;
+ /* init the map count as 1 */
+ kref_init(&map->ref);
+
+ /* bind iommu_map to buffer */
+ map->buffer = buffer;
+ buffer->iommu_map = map;
+
+ return 0;
+}
+int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
+ struct iommu_map_format *format)
+{
+ struct ion_buffer *buffer;
+ int ret = 0;
+
+ /* lock client */
+ mutex_lock(&client->lock);
+
+ /* check if the handle belongs to client. */
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to iommu map.\n", __func__);
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+
+ /* lock buffer */
+ mutex_lock(&buffer->lock);
+
+ if (!handle->buffer->heap->ops->map_iommu) {
+ pr_err("%s: map_iommu is not implemented by this heap.\n",
+ __func__);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* buffer size sould align to 4k */
+ if (buffer->size & ~PAGE_MASK) {
+ pr_err("%s: buffer size %lx is not aligned to %lx",
+ __func__, (unsigned long)(buffer->size), PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+
+ /* buffer->iommu_map != NULL means buffer has mapped */
+ if (buffer->iommu_map) {
+ struct iommu_map_format *mapped_fmt
+ = &buffer->iommu_map->format;
+
+ pr_debug("This buffer has already iommu mapped!\n");
+
+ /* map tile format should be same as last time */
+ if (format->is_tile && (
+ (format->phys_page_line != mapped_fmt->phys_page_line) ||
+ (format->virt_page_line != mapped_fmt->virt_page_line))) {
+ WARN(1, "map_iommu format do not match!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* increase iommu map count */
+ kref_get(&buffer->iommu_map->ref);
} else {
- up_read(&dev->lock);
- return 1;
+ /* do iommu map */
+ ret = do_iommu_map(buffer, format);
+ if (ret) {
+ goto out;
}
}
- up_read(&dev->lock);
- return 0;
+ memcpy(format, &buffer->iommu_map->format, sizeof(*format));
+
+out:
+ /* unlock buffer and unlock client */
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ion_map_iommu);
+static void do_iommu_unmap(struct kref *kref)
+{
+ struct ion_iommu_map *map
+ = container_of(kref, struct ion_iommu_map, ref);
+ struct ion_buffer *buffer = map->buffer;
+
+ buffer->heap->ops->unmap_iommu(map);
+
+ buffer->iommu_map = NULL;
+
+ kfree(map);
}
+void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_iommu_map *iommu_map;
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+
+ /* check if the handle belongs to client. */
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to iommu unmap.\n", __func__);
+ mutex_unlock(&client->lock);
+ return;
+ }
+
+ buffer = handle->buffer;
+
+ mutex_lock(&buffer->lock);
+
+ iommu_map = buffer->iommu_map;
+ if (!iommu_map) {
+ WARN(1, "This buffer have not been map iommu\n");
+ goto out;
+ }
+
+ kref_put(&iommu_map->ref, do_iommu_unmap);
+
+out:
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_unmap_iommu);
static int ion_debug_client_show(struct seq_file *s, void *unused)
{
struct ion_client *client = s->private;
@@ -618,14 +887,6 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
const char *names[ION_NUM_HEAP_IDS] = {NULL};
int i;
- mutex_lock(&debugfs_mutex);
- if (!is_client_alive(client)) {
- seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
- client);
- mutex_unlock(&debugfs_mutex);
- return 0;
- }
-
mutex_lock(&client->lock);
for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
@@ -637,7 +898,6 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
sizes[id] += handle->buffer->size;
}
mutex_unlock(&client->lock);
- mutex_unlock(&debugfs_mutex);
seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
@@ -661,14 +921,14 @@ static const struct file_operations debug_client_fops = {
};
static int ion_get_client_serial(const struct rb_root *root,
- const unsigned char *name)
+ const unsigned char *name)
{
int serial = -1;
struct rb_node *node;
for (node = rb_first(root); node; node = rb_next(node)) {
struct ion_client *client = rb_entry(node, struct ion_client,
- node);
+ node);
if (strcmp(client->name, name))
continue;
@@ -707,7 +967,7 @@ struct ion_client *ion_client_create(struct ion_device *dev,
}
task_unlock(current->group_leader);
- client = kzalloc(sizeof(*client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
if (!client)
goto err_put_task_struct;
@@ -743,14 +1003,14 @@ struct ion_client *ion_client_create(struct ion_device *dev,
rb_insert_color(&client->node, &dev->clients);
client->debug_root = debugfs_create_file(client->display_name, 0664,
- dev->clients_debug_root,
- client, &debug_client_fops);
+ dev->clients_debug_root,
+ client, &debug_client_fops);
if (!client->debug_root) {
char buf[256], *path;
path = dentry_path(dev->clients_debug_root, buf, 256);
pr_err("Failed to create client debugfs at %s/%s\n",
- path, client->display_name);
+ path, client->display_name);
}
up_write(&dev->lock);
@@ -774,7 +1034,6 @@ void ion_client_destroy(struct ion_client *client)
struct rb_node *n;
pr_debug("%s: %d\n", __func__, __LINE__);
- mutex_lock(&debugfs_mutex);
while ((n = rb_first(&client->handles))) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
@@ -793,10 +1052,29 @@ void ion_client_destroy(struct ion_client *client)
kfree(client->display_name);
kfree(client->name);
kfree(client);
- mutex_unlock(&debugfs_mutex);
}
EXPORT_SYMBOL(ion_client_destroy);
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct sg_table *table;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_dma.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ table = buffer->sg_table;
+ mutex_unlock(&client->lock);
+ return table;
+}
+EXPORT_SYMBOL(ion_sg_table);
+
static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
struct device *dev,
enum dma_data_direction direction);
@@ -818,7 +1096,7 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
}
void ion_pages_sync_for_device(struct device *dev, struct page *page,
- size_t size, enum dma_data_direction dir)
+ size_t size, enum dma_data_direction dir)
{
struct scatterlist sg;
@@ -858,7 +1136,7 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
if (ion_buffer_page_is_dirty(page))
ion_pages_sync_for_device(dev, ion_buffer_page(page),
- PAGE_SIZE, dir);
+ PAGE_SIZE, dir);
ion_buffer_page_clean(buffer->pages + i);
}
@@ -895,7 +1173,7 @@ static void ion_vm_open(struct vm_area_struct *vma)
struct ion_buffer *buffer = vma->vm_private_data;
struct ion_vma_list *vma_list;
- vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
+ vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
if (!vma_list)
return;
vma_list->vma = vma;
@@ -936,7 +1214,7 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
if (!buffer->heap->ops->map_user) {
pr_err("%s: this heap does not define a method for mapping to userspace\n",
- __func__);
+ __func__);
return -EINVAL;
}
@@ -968,6 +1246,9 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
{
struct ion_buffer *buffer = dmabuf->priv;
+ if (buffer->iommu_map)
+ kref_put(&buffer->iommu_map->ref, do_iommu_unmap);
+
ion_buffer_put(buffer);
}
@@ -1002,14 +1283,13 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
}
static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
- enum dma_data_direction direction)
+ enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock);
-
return 0;
}
@@ -1027,7 +1307,7 @@ static struct dma_buf_ops dma_buf_ops = {
};
struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle)
+ struct ion_handle *handle)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct ion_buffer *buffer;
@@ -1043,6 +1323,8 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
}
buffer = handle->buffer;
ion_buffer_get(buffer);
+ if (buffer->iommu_map)
+ kref_get(&buffer->iommu_map->ref);
mutex_unlock(&client->lock);
exp_info.ops = &dma_buf_ops;
@@ -1077,18 +1359,22 @@ int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
}
EXPORT_SYMBOL(ion_share_dma_buf_fd);
-struct ion_handle *ion_import_dma_buf(struct ion_client *client,
- struct dma_buf *dmabuf)
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
{
+ struct dma_buf *dmabuf;
struct ion_buffer *buffer;
struct ion_handle *handle;
int ret;
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return ERR_CAST(dmabuf);
/* if this memory came from ion */
if (dmabuf->ops != &dma_buf_ops) {
pr_err("%s: can not import dmabuf from another exporter\n",
__func__);
+ dma_buf_put(dmabuf);
return ERR_PTR(-EINVAL);
}
buffer = dmabuf->priv;
@@ -1106,6 +1392,8 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client,
if (IS_ERR(handle)) {
mutex_unlock(&client->lock);
goto end;
+ } else {
+ handle->import = 1;
}
ret = ion_handle_add(client, handle);
@@ -1116,90 +1404,227 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client,
}
end:
+ dma_buf_put(dmabuf);
return handle;
}
EXPORT_SYMBOL(ion_import_dma_buf);
-struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
+static int ion_sync_for_device(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
- struct ion_handle *handle;
+ struct ion_buffer *buffer;
dmabuf = dma_buf_get(fd);
if (IS_ERR(dmabuf))
- return ERR_CAST(dmabuf);
+ return PTR_ERR(dmabuf);
- handle = ion_import_dma_buf(client, dmabuf);
+ /* if this memory came from ion */
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not sync dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return -EINVAL;
+ }
+ buffer = dmabuf->priv;
+
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_BIDIRECTIONAL);
dma_buf_put(dmabuf);
- return handle;
+ return 0;
+}
+
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+ switch (cmd) {
+ case ION_IOC_SYNC:
+ case ION_IOC_FREE:
+ case ION_IOC_CUSTOM:
+ return _IOC_WRITE;
+ default:
+ return _IOC_DIR(cmd);
+ }
}
-EXPORT_SYMBOL(ion_import_dma_buf_fd);
-int ion_sync_for_device(struct ion_client *client, int fd)
+int ion_sync_for_cpu(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
struct ion_buffer *buffer;
dmabuf = dma_buf_get(fd);
- if (IS_ERR(dmabuf))
+ if (IS_ERR_OR_NULL(dmabuf)) {
+ pr_err("%s: can't get dmabuf!\n", __func__);
return PTR_ERR(dmabuf);
+ }
/* if this memory came from ion */
if (dmabuf->ops != &dma_buf_ops) {
pr_err("%s: can not sync dmabuf from another exporter\n",
- __func__);
+ __func__);
dma_buf_put(dmabuf);
return -EINVAL;
}
buffer = dmabuf->priv;
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+ if (buffer->cpudraw_sg_table) {
+ dma_sync_sg_for_cpu(NULL,
+ buffer->cpudraw_sg_table->sgl,
+ buffer->cpudraw_sg_table->nents,
+ DMA_FROM_DEVICE);
+ } else {
+ dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents,
+ DMA_FROM_DEVICE);
+ }
+
dma_buf_put(dmabuf);
return 0;
}
-int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
+ struct ion_client *client = filp->private_data;
struct ion_device *dev = client->dev;
- struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
- int ret = -EINVAL, cnt = 0, max_cnt;
- struct ion_heap *heap;
- struct ion_heap_data hdata;
+ struct ion_handle *cleanup_handle = NULL;
+ int ret = 0;
+ unsigned int dir;
- memset(&hdata, 0, sizeof(hdata));
+ union {
+ struct ion_fd_data fd;
+ struct ion_allocation_data allocation;
+ struct ion_handle_data handle;
+ struct ion_custom_data custom;
+ struct ion_map_iommu_data map_iommu;
+ } data;
- down_read(&dev->lock);
- if (!buffer) {
- query->cnt = dev->heap_cnt;
- ret = 0;
- goto out;
+ dir = ion_ioctl_dir(cmd);
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (dir & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ION_IOC_ALLOC:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_alloc(client, data.allocation.len,
+ data.allocation.align,
+ data.allocation.heap_id_mask,
+ data.allocation.flags);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ data.allocation.handle = handle->id;
+
+ cleanup_handle = handle;
+ break;
+ }
+ case ION_IOC_FREE:
+ {
+ struct ion_handle *handle;
+
+ mutex_lock(&client->lock);
+ handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
+ if (IS_ERR(handle)) {
+ mutex_unlock(&client->lock);
+ return PTR_ERR(handle);
+ }
+ ion_free_nolock(client, handle);
+ ion_handle_put_nolock(handle);
+ mutex_unlock(&client->lock);
+ break;
}
+ case ION_IOC_SHARE:
+ case ION_IOC_MAP:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ data.fd.fd = ion_share_dma_buf_fd(client, handle);
+ ion_handle_put(handle);
+ if (data.fd.fd < 0)
+ ret = data.fd.fd;
+ break;
+ }
+ case ION_IOC_IMPORT:
+ {
+ struct ion_handle *handle;
- if (query->cnt <= 0)
- goto out;
+ handle = ion_import_dma_buf(client, data.fd.fd);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ else
+ data.handle.handle = handle->id;
+ break;
+ }
+ case ION_IOC_SYNC:
+ {
+ ret = ion_sync_for_device(client, data.fd.fd);
+ break;
+ }
+ case ION_IOC_CUSTOM:
+ {
+ if (!dev->custom_ioctl)
+ return -ENOTTY;
+ ret = dev->custom_ioctl(client, data.custom.cmd,
+ data.custom.arg);
+ break;
+ }
- max_cnt = query->cnt;
+ case ION_IOC_INV:
+ {
+ ion_sync_for_cpu(client, data.fd.fd);
+ break;
+ }
+ case ION_IOC_MAP_IOMMU:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.map_iommu.handle);
+ if (IS_ERR(handle)) {
+ pr_err("%s: map iommu but handle invalid!\n", __func__);
+ return PTR_ERR(handle);
+ }
- plist_for_each_entry(heap, &dev->heaps, node) {
- strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
- hdata.name[sizeof(hdata.name) - 1] = '\0';
- hdata.type = heap->type;
- hdata.heap_id = heap->id;
+ ret = ion_map_iommu(client, handle, &data.map_iommu.format);
- if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
- ret = -EFAULT;
- goto out;
+ ion_handle_put(handle);
+ break;
+ }
+ case ION_IOC_UNMAP_IOMMU:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.map_iommu.handle);
+ if (IS_ERR(handle)) {
+ pr_err("%s: map iommu but handle invalid!\n", __func__);
+ return PTR_ERR(handle);
}
- cnt++;
- if (cnt >= max_cnt)
- break;
+ ion_unmap_iommu(client, handle);
+ data.map_iommu.format.iova_start = 0;
+ data.map_iommu.format.iova_size = 0;
+
+ ion_handle_put(handle);
+ break;
+ }
+ default:
+ return -ENOTTY;
}
- query->cnt = cnt;
-out:
- up_read(&dev->lock);
+ if (dir & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+ if (cleanup_handle)
+ ion_free(client, cleanup_handle);
+ return -EFAULT;
+ }
+ }
return ret;
}
@@ -1266,7 +1691,6 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
seq_puts(s, "----------------------------------------------------\n");
- mutex_lock(&debugfs_mutex);
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
node);
@@ -1285,8 +1709,6 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
client->pid, size);
}
}
- mutex_unlock(&debugfs_mutex);
-
seq_puts(s, "----------------------------------------------------\n");
seq_puts(s, "orphaned allocations (info is from last known client):\n");
mutex_lock(&dev->buffer_lock);
@@ -1311,7 +1733,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
seq_printf(s, "%16s %16zu\n", "total ", total_size);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
seq_printf(s, "%16s %16zu\n", "deferred free",
- heap->free_list_size);
+ heap->free_list_size);
seq_puts(s, "----------------------------------------------------\n");
if (heap->debug_show)
@@ -1338,7 +1760,7 @@ static int debug_shrink_set(void *data, u64 val)
struct shrink_control sc;
int objs;
- sc.gfp_mask = GFP_HIGHUSER;
+ sc.gfp_mask = -1;
sc.nr_to_scan = val;
if (!val) {
@@ -1356,7 +1778,7 @@ static int debug_shrink_get(void *data, u64 *val)
struct shrink_control sc;
int objs;
- sc.gfp_mask = GFP_HIGHUSER;
+ sc.gfp_mask = -1;
sc.nr_to_scan = 0;
objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
@@ -1371,7 +1793,8 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
struct dentry *debug_file;
- if (!heap->ops->allocate || !heap->ops->free)
+ if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+ !heap->ops->unmap_dma)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);
@@ -1393,15 +1816,15 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
debug_file = debugfs_create_file(heap->name, 0664,
- dev->heaps_debug_root, heap,
- &debug_heap_fops);
+ dev->heaps_debug_root, heap,
+ &debug_heap_fops);
if (!debug_file) {
char buf[256], *path;
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap debugfs at %s/%s\n",
- path, heap->name);
+ path, heap->name);
}
if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
@@ -1416,11 +1839,10 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
- path, debug_name);
+ path, debug_name);
}
}
- dev->heap_cnt++;
up_write(&dev->lock);
}
EXPORT_SYMBOL(ion_device_add_heap);
@@ -1433,7 +1855,7 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
struct ion_device *idev;
int ret;
- idev = kzalloc(sizeof(*idev), GFP_KERNEL);
+ idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
if (!idev)
return ERR_PTR(-ENOMEM);
@@ -1471,8 +1893,6 @@ debugfs_done:
init_rwsem(&idev->lock);
plist_head_init(&idev->heaps);
idev->clients = RB_ROOT;
- ion_root_client = &idev->clients;
- mutex_init(&debugfs_mutex);
return idev;
}
EXPORT_SYMBOL(ion_device_create);
@@ -1485,3 +1905,38 @@ void ion_device_destroy(struct ion_device *dev)
kfree(dev);
}
EXPORT_SYMBOL(ion_device_destroy);
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+ int i;
+
+ for (i = 0; i < data->nr; i++) {
+ if (data->heaps[i].size == 0)
+ continue;
+
+ if (data->heaps[i].base == 0) {
+ phys_addr_t paddr;
+
+ paddr = memblock_alloc_base(data->heaps[i].size,
+ data->heaps[i].align,
+ MEMBLOCK_ALLOC_ANYWHERE);
+ if (!paddr) {
+ pr_err("%s: error allocating memblock for heap %d\n",
+ __func__, i);
+ continue;
+ }
+ data->heaps[i].base = paddr;
+ } else {
+ int ret = memblock_reserve(data->heaps[i].base,
+ data->heaps[i].size);
+ if (ret)
+ pr_err("memblock reserve of %zx@%lx failed\n",
+ data->heaps[i].size,
+ data->heaps[i].base);
+ }
+ pr_info("%s: %s reserved base %lx size %zu\n", __func__,
+ data->heaps[i].name,
+ data->heaps[i].base,
+ data->heaps[i].size);
+ }
+}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index 93dafb4586e4..5ef96dde3a82 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -73,6 +73,17 @@ struct ion_platform_data {
};
/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data: platform data specifying starting physical address and
+ * size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specific sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
* ion_client_create() - allocate a client and returns it
* @dev: the global ion device
* @name: used for debugging
@@ -119,6 +130,36 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
void ion_free(struct ion_client *client, struct ion_handle *handle);
/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client: the client
+ * @handle: the handle
+ * @addr: a pointer to put the address in
+ * @len: a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address. It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_sg_table should be used
+ * instead. Returns -EINVAL if the handle is invalid. This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client: the client
+ * @handle: the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
* ion_map_kernel - create mapping for the given handle
* @client: the client
* @handle: handle to map
@@ -151,26 +192,39 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
/**
- * ion_import_dma_buf() - get ion_handle from dma-buf
+ * ion_map_iommu() - create iommu mapping for the given handle
* @client: the client
- * @dmabuf: the dma-buf
- *
- * Get the ion_buffer associated with the dma-buf and return the ion_handle.
- * If no ion_handle exists for this buffer, return newly created ion_handle.
- * If dma-buf from another exporter is passed, return ERR_PTR(-EINVAL)
+ * @handle: the handle
+ * @format: the format of iommu mapping
+ */
+int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
+ struct iommu_map_format *format);
+
+/**
+ * ion_unmap_iommu() - destroy a iommu mapping for a handle
+ * @client: the client
+ * @handle: the handle
+ */
+void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_change_flags() - change buffer flags
+ * @client: the client
+ * @handle: the handle
+ * @flags: flags
*/
-struct ion_handle *ion_import_dma_buf(struct ion_client *client,
- struct dma_buf *dmabuf);
+int ion_change_flags(struct ion_client *client,
+ struct ion_handle *handle, int flags);
/**
- * ion_import_dma_buf_fd() - given a dma-buf fd from the ion exporter get handle
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
* @client: the client
* @fd: the dma-buf fd
*
- * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf_fd,
- * import that fd and return a handle representing it. If a dma-buf from
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it. If a dma-buf from
* another exporter is passed in this function will return ERR_PTR(-EINVAL)
*/
-struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd);
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
#endif /* _LINUX_ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index a8ea97391c40..df1fc2697946 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -25,17 +25,15 @@
#include "ion.h"
#include "ion_priv.h"
-#define ION_CARVEOUT_ALLOCATE_FAIL -1
-
struct ion_carveout_heap {
struct ion_heap heap;
struct gen_pool *pool;
ion_phys_addr_t base;
};
-static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
- unsigned long size,
- unsigned long align)
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+ unsigned long size,
+ unsigned long align)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -47,8 +45,8 @@ static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
return offset;
}
-static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
- unsigned long size)
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -58,6 +56,19 @@ static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
gen_pool_free(carveout_heap->pool, addr, size);
}
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ *addr = paddr;
+ *len = buffer->size;
+ return 0;
+}
+
static int ion_carveout_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long size, unsigned long align,
@@ -70,7 +81,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
if (align > PAGE_SIZE)
return -EINVAL;
- table = kmalloc(sizeof(*table), GFP_KERNEL);
+ table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
return -ENOMEM;
ret = sg_alloc_table(table, 1, GFP_KERNEL);
@@ -84,7 +95,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
}
sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
- buffer->sg_table = table;
+ buffer->priv_virt = table;
return 0;
@@ -98,7 +109,7 @@ err_free:
static void ion_carveout_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
- struct sg_table *table = buffer->sg_table;
+ struct sg_table *table = buffer->priv_virt;
struct page *page = sg_page(table->sgl);
ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
@@ -106,19 +117,35 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
if (ion_buffer_cached(buffer))
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
ion_carveout_free(heap, paddr, buffer->size);
sg_free_table(table);
kfree(table);
}
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
static struct ion_heap_ops carveout_heap_ops = {
.allocate = ion_carveout_heap_allocate,
.free = ion_carveout_heap_free,
+ .phys = ion_carveout_heap_phys,
+ .map_dma = ion_carveout_heap_map_dma,
+ .unmap_dma = ion_carveout_heap_unmap_dma,
.map_user = ion_heap_map_user,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
+ .map_iommu = ion_heap_map_iommu,
+ .unmap_iommu = ion_heap_unmap_iommu,
};
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
@@ -138,7 +165,7 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
if (ret)
return ERR_PTR(ret);
- carveout_heap = kzalloc(sizeof(*carveout_heap), GFP_KERNEL);
+ carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
if (!carveout_heap)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 70495dc645ea..0813163f962f 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -34,9 +34,9 @@ struct ion_chunk_heap {
};
static int ion_chunk_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
{
struct ion_chunk_heap *chunk_heap =
container_of(heap, struct ion_chunk_heap, heap);
@@ -55,7 +55,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
if (allocated_size > chunk_heap->size - chunk_heap->allocated)
return -ENOMEM;
- table = kmalloc(sizeof(*table), GFP_KERNEL);
+ table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
return -ENOMEM;
ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
@@ -71,11 +71,11 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
if (!paddr)
goto err;
sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
- chunk_heap->chunk_size, 0);
+ chunk_heap->chunk_size, 0);
sg = sg_next(sg);
}
- buffer->sg_table = table;
+ buffer->priv_virt = table;
chunk_heap->allocated += allocated_size;
return 0;
err:
@@ -95,7 +95,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
struct ion_heap *heap = buffer->heap;
struct ion_chunk_heap *chunk_heap =
container_of(heap, struct ion_chunk_heap, heap);
- struct sg_table *table = buffer->sg_table;
+ struct sg_table *table = buffer->priv_virt;
struct scatterlist *sg;
int i;
unsigned long allocated_size;
@@ -106,7 +106,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
if (ion_buffer_cached(buffer))
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
for_each_sg(table->sgl, sg, table->nents, i) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
@@ -117,9 +117,22 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
kfree(table);
}
+static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
static struct ion_heap_ops chunk_heap_ops = {
.allocate = ion_chunk_heap_allocate,
.free = ion_chunk_heap_free,
+ .map_dma = ion_chunk_heap_map_dma,
+ .unmap_dma = ion_chunk_heap_unmap_dma,
.map_user = ion_heap_map_user,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
@@ -141,7 +154,7 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
if (ret)
return ERR_PTR(ret);
- chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
+ chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
if (!chunk_heap)
return ERR_PTR(-ENOMEM);
@@ -161,7 +174,7 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
pr_debug("%s: base %lu size %zu align %ld\n", __func__,
- chunk_heap->base, heap_data->size, heap_data->align);
+ chunk_heap->base, heap_data->size, heap_data->align);
return &chunk_heap->heap;
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 6c7de74bc7ab..1103f837f576 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -78,7 +78,6 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
goto free_table;
/* keep this for memory release */
buffer->priv_virt = info;
- buffer->sg_table = info->table;
dev_dbg(dev, "Allocate buffer %p\n", buffer);
return 0;
@@ -106,6 +105,36 @@ static void ion_cma_free(struct ion_buffer *buffer)
kfree(info);
}
+/* return physical address in addr */
+static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
+ &info->handle);
+
+ *addr = info->handle;
+ *len = buffer->size;
+
+ return 0;
+}
+
+static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ return info->table;
+}
+
+static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
@@ -126,16 +155,21 @@ static void *ion_cma_map_kernel(struct ion_heap *heap,
}
static void ion_cma_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
+ struct ion_buffer *buffer)
{
}
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
+ .map_dma = ion_cma_heap_map_dma,
+ .unmap_dma = ion_cma_heap_unmap_dma,
+ .phys = ion_cma_phys,
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
+ .map_iommu = ion_heap_map_iommu,
+ .unmap_iommu = ion_heap_unmap_iommu,
};
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index b23f2c76c753..5678870bff48 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -68,8 +68,6 @@ static int __init ion_dummy_init(void)
int i, err;
idev = ion_device_create(NULL);
- if (IS_ERR(idev))
- return PTR_ERR(idev);
heaps = kcalloc(dummy_ion_pdata.nr, sizeof(struct ion_heap *),
GFP_KERNEL);
if (!heaps)
@@ -99,7 +97,7 @@ static int __init ion_dummy_init(void)
struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
- !heap_data->base)
+ !heap_data->base)
continue;
if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
@@ -120,12 +118,12 @@ err:
if (carveout_ptr) {
free_pages_exact(carveout_ptr,
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
carveout_ptr = NULL;
}
if (chunk_ptr) {
free_pages_exact(chunk_ptr,
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
chunk_ptr = NULL;
}
return err;
@@ -144,12 +142,12 @@ static void __exit ion_dummy_exit(void)
if (carveout_ptr) {
free_pages_exact(carveout_ptr,
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
carveout_ptr = NULL;
}
if (chunk_ptr) {
free_pages_exact(chunk_ptr,
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
chunk_ptr = NULL;
}
}
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 4e5c0f17f579..74b026ae9b4d 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -22,6 +22,9 @@
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+#include <linux/iommu.h>
+#include <linux/hisi/ion-iommu.h>
+
#include "ion.h"
#include "ion_priv.h"
@@ -93,7 +96,7 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
}
len = min(len, remainder);
ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
- vma->vm_page_prot);
+ vma->vm_page_prot);
if (ret)
return ret;
addr += len;
@@ -103,6 +106,31 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
return 0;
}
+int ion_heap_map_iommu(struct ion_buffer *buffer,
+ struct ion_iommu_map *map_data)
+{
+ struct sg_table *table = buffer->sg_table;
+ int ret;
+
+ ret = hisi_iommu_map_domain(table->sgl, &map_data->format);
+ if (ret) {
+ pr_err("%s: iommu map failed, heap: %s\n", __func__,
+ buffer->heap->name);
+ }
+ return ret;
+}
+
+void ion_heap_unmap_iommu(struct ion_iommu_map *map_data)
+{
+ int ret;
+
+ ret = hisi_iommu_unmap_domain(&map_data->format);
+ if (ret) {
+ pr_err("%s: iommu unmap failed, heap: %s\n", __func__,
+ map_data->buffer->heap->name);
+ }
+}
+
static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
{
void *addr = vm_map_ram(pages, num, -1, pgprot);
@@ -116,7 +144,7 @@ static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
}
static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
- pgprot_t pgprot)
+ pgprot_t pgprot)
{
int p = 0;
int ret = 0;
@@ -181,7 +209,7 @@ size_t ion_heap_freelist_size(struct ion_heap *heap)
}
static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
- bool skip_pools)
+ bool skip_pools)
{
struct ion_buffer *buffer;
size_t total_drained = 0;
@@ -266,7 +294,7 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
}
static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
- struct shrink_control *sc)
+ struct shrink_control *sc)
{
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
shrinker);
@@ -279,7 +307,7 @@ static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
}
static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
- struct shrink_control *sc)
+ struct shrink_control *sc)
{
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
shrinker);
diff --git a/drivers/staging/android/ion/ion_of.c b/drivers/staging/android/ion/ion_of.c
deleted file mode 100644
index 46b2bb99bfd6..000000000000
--- a/drivers/staging/android/ion/ion_of.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Based on work from:
- * Andrew Andrianov <andrew@ncrmnt.org>
- * Google
- * The Linux Foundation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/cma.h>
-#include <linux/dma-contiguous.h>
-#include <linux/io.h>
-#include <linux/of_reserved_mem.h>
-#include "ion.h"
-#include "ion_priv.h"
-#include "ion_of.h"
-
-static int ion_parse_dt_heap_common(struct device_node *heap_node,
- struct ion_platform_heap *heap,
- struct ion_of_heap *compatible)
-{
- int i;
-
- for (i = 0; compatible[i].name; i++) {
- if (of_device_is_compatible(heap_node, compatible[i].compat))
- break;
- }
-
- if (!compatible[i].name)
- return -ENODEV;
-
- heap->id = compatible[i].heap_id;
- heap->type = compatible[i].type;
- heap->name = compatible[i].name;
- heap->align = compatible[i].align;
-
- /* Some kind of callback function pointer? */
-
- pr_info("%s: id %d type %d name %s align %lx\n", __func__,
- heap->id, heap->type, heap->name, heap->align);
- return 0;
-}
-
-static int ion_setup_heap_common(struct platform_device *parent,
- struct device_node *heap_node,
- struct ion_platform_heap *heap)
-{
- int ret = 0;
-
- switch (heap->type) {
- case ION_HEAP_TYPE_CARVEOUT:
- case ION_HEAP_TYPE_CHUNK:
- if (heap->base && heap->size)
- return 0;
-
- ret = of_reserved_mem_device_init(heap->priv);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
- struct ion_of_heap *compatible)
-{
- int num_heaps, ret;
- const struct device_node *dt_node = pdev->dev.of_node;
- struct device_node *node;
- struct ion_platform_heap *heaps;
- struct ion_platform_data *data;
- int i = 0;
-
- num_heaps = of_get_available_child_count(dt_node);
-
- if (!num_heaps)
- return ERR_PTR(-EINVAL);
-
- heaps = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_platform_heap) * num_heaps,
- GFP_KERNEL);
- if (!heaps)
- return ERR_PTR(-ENOMEM);
-
- data = devm_kzalloc(&pdev->dev, sizeof(struct ion_platform_data),
- GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- for_each_available_child_of_node(dt_node, node) {
- struct platform_device *heap_pdev;
-
- ret = ion_parse_dt_heap_common(node, &heaps[i], compatible);
- if (ret)
- return ERR_PTR(ret);
-
- heap_pdev = of_platform_device_create(node, heaps[i].name,
- &pdev->dev);
- if (!heap_pdev)
- return ERR_PTR(-ENOMEM);
- heap_pdev->dev.platform_data = &heaps[i];
-
- heaps[i].priv = &heap_pdev->dev;
-
- ret = ion_setup_heap_common(pdev, node, &heaps[i]);
- if (ret)
- goto out_err;
- i++;
- }
-
- data->heaps = heaps;
- data->nr = num_heaps;
- return data;
-
-out_err:
- for ( ; i >= 0; i--)
- if (heaps[i].priv)
- of_device_unregister(to_platform_device(heaps[i].priv));
-
- return ERR_PTR(ret);
-}
-
-void ion_destroy_platform_data(struct ion_platform_data *data)
-{
- int i;
-
- for (i = 0; i < data->nr; i++)
- if (data->heaps[i].priv)
- of_device_unregister(to_platform_device(
- data->heaps[i].priv));
-}
-
-#ifdef CONFIG_OF_RESERVED_MEM
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/of_reserved_mem.h>
-
-static int rmem_ion_device_init(struct reserved_mem *rmem, struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ion_platform_heap *heap = pdev->dev.platform_data;
-
- heap->base = rmem->base;
- heap->base = rmem->size;
- pr_debug("%s: heap %s base %pa size %pa dev %p\n", __func__,
- heap->name, &rmem->base, &rmem->size, dev);
- return 0;
-}
-
-static void rmem_ion_device_release(struct reserved_mem *rmem,
- struct device *dev)
-{
- return;
-}
-
-static const struct reserved_mem_ops rmem_dma_ops = {
- .device_init = rmem_ion_device_init,
- .device_release = rmem_ion_device_release,
-};
-
-static int __init rmem_ion_setup(struct reserved_mem *rmem)
-{
- phys_addr_t size = rmem->size;
-
- size = size / 1024;
-
- pr_info("Ion memory setup at %pa size %pa MiB\n",
- &rmem->base, &size);
- rmem->ops = &rmem_dma_ops;
- return 0;
-}
-
-RESERVEDMEM_OF_DECLARE(ion, "ion-region", rmem_ion_setup);
-#endif
diff --git a/drivers/staging/android/ion/ion_of.h b/drivers/staging/android/ion/ion_of.h
deleted file mode 100644
index 8241a1770f0a..000000000000
--- a/drivers/staging/android/ion/ion_of.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Based on work from:
- * Andrew Andrianov <andrew@ncrmnt.org>
- * Google
- * The Linux Foundation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ION_OF_H
-#define _ION_OF_H
-
-struct ion_of_heap {
- const char *compat;
- int heap_id;
- int type;
- const char *name;
- int align;
-};
-
-#define PLATFORM_HEAP(_compat, _id, _type, _name) \
-{ \
- .compat = _compat, \
- .heap_id = _id, \
- .type = _type, \
- .name = _name, \
- .align = PAGE_SIZE, \
-}
-
-struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
- struct ion_of_heap *compatible);
-
-void ion_destroy_platform_data(struct ion_platform_data *data);
-
-#endif
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index aea89c1ec345..59ee2f8f6761 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -30,15 +30,17 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
if (!page)
return NULL;
- if (!pool->cached)
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
- DMA_BIDIRECTIONAL);
+ ion_page_pool_alloc_set_cache_policy(pool, page);
+
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+ DMA_BIDIRECTIONAL);
return page;
}
static void ion_page_pool_free_pages(struct ion_page_pool *pool,
struct page *page)
{
+ ion_page_pool_free_set_cache_policy(pool, page);
__free_pages(page, pool->order);
}
@@ -104,6 +106,11 @@ void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
ion_page_pool_free_pages(pool, page);
}
+void ion_page_pool_free_immediate(struct ion_page_pool *pool, struct page *page)
+{
+ ion_page_pool_free_pages(pool, page);
+}
+
static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
{
int count = pool->low_count;
@@ -115,7 +122,7 @@ static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
}
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
- int nr_to_scan)
+ int nr_to_scan)
{
int freed = 0;
bool high;
@@ -148,11 +155,10 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
return freed;
}
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
- bool cached)
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
{
- struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
-
+ struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
+ GFP_KERNEL);
if (!pool)
return NULL;
pool->high_count = 0;
@@ -163,8 +169,6 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
pool->order = order;
mutex_init(&pool->mutex);
plist_node_init(&pool->list, order);
- if (cached)
- pool->cached = true;
return pool;
}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index 3c3b3245275d..39b1f4a78145 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -26,10 +26,21 @@
#include <linux/sched.h>
#include <linux/shrinker.h>
#include <linux/types.h>
-#include <linux/miscdevice.h>
+#include <linux/iommu.h>
+#ifdef CONFIG_ION_POOL_CACHE_POLICY
+#include <asm/cacheflush.h>
+#endif
#include "ion.h"
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+struct ion_iommu_map {
+ struct ion_buffer *buffer;
+ struct kref ref;
+ struct iommu_map_format format;
+};
+
/**
* struct ion_buffer - metadata for a particular buffer
* @ref: reference count
@@ -41,6 +52,8 @@
* @size: size of the buffer
* @priv_virt: private data to the buffer representable as
* a void *
+ * @priv_phys: private data to the buffer representable as
+ * an ion_phys_addr_t (and someday a phys_addr_t)
* @lock: protects the buffers cnt fields
* @kmap_cnt: number of times the buffer is mapped to the kernel
* @vaddr: the kernel mapping if kmap_cnt is not zero
@@ -66,7 +79,10 @@ struct ion_buffer {
unsigned long flags;
unsigned long private_flags;
size_t size;
- void *priv_virt;
+ union {
+ void *priv_virt;
+ ion_phys_addr_t priv_phys;
+ };
struct mutex lock;
int kmap_cnt;
void *vaddr;
@@ -78,88 +94,21 @@ struct ion_buffer {
int handle_count;
char task_comm[TASK_COMM_LEN];
pid_t pid;
+ struct ion_iommu_map *iommu_map;
+ /*use for sync & free when buffer alloc from cpu draw heap*/
+ struct sg_table *cpudraw_sg_table;
+ size_t cpu_buffer_size;
};
void ion_buffer_destroy(struct ion_buffer *buffer);
/**
- * struct ion_device - the metadata of the ion device node
- * @dev: the actual misc device
- * @buffers: an rb tree of all the existing buffers
- * @buffer_lock: lock protecting the tree of buffers
- * @lock: rwsem protecting the tree of heaps and clients
- * @heaps: list of all the heaps in the system
- * @user_clients: list of all the clients created from userspace
- */
-struct ion_device {
- struct miscdevice dev;
- struct rb_root buffers;
- struct mutex buffer_lock;
- struct rw_semaphore lock;
- struct plist_head heaps;
- long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
- unsigned long arg);
- struct rb_root clients;
- struct dentry *debug_root;
- struct dentry *heaps_debug_root;
- struct dentry *clients_debug_root;
- int heap_cnt;
-};
-
-/**
- * struct ion_client - a process/hw block local address space
- * @node: node in the tree of all clients
- * @dev: backpointer to ion device
- * @handles: an rb tree of all the handles in this client
- * @idr: an idr space for allocating handle ids
- * @lock: lock protecting the tree of handles
- * @name: used for debugging
- * @display_name: used for debugging (unique version of @name)
- * @display_serial: used for debugging (to make display_name unique)
- * @task: used for debugging
- *
- * A client represents a list of buffers this client may access.
- * The mutex stored here is used to protect both handles tree
- * as well as the handles themselves, and should be held while modifying either.
- */
-struct ion_client {
- struct rb_node node;
- struct ion_device *dev;
- struct rb_root handles;
- struct idr idr;
- struct mutex lock;
- const char *name;
- char *display_name;
- int display_serial;
- struct task_struct *task;
- pid_t pid;
- struct dentry *debug_root;
-};
-
-/**
- * ion_handle - a client local reference to a buffer
- * @ref: reference count
- * @client: back pointer to the client the buffer resides in
- * @buffer: pointer to the buffer
- * @node: node in the client's handle rbtree
- * @kmap_cnt: count of times this client has mapped to kernel
- * @id: client-unique id allocated by client->idr
- *
- * Modifications to node, map_cnt or mapping should be protected by the
- * lock in the client. Other fields are never changed after initialization.
- */
-struct ion_handle {
- struct kref ref;
- struct ion_client *client;
- struct ion_buffer *buffer;
- struct rb_node node;
- unsigned int kmap_cnt;
- int id;
-};
-
-/**
* struct ion_heap_ops - ops to operate on a given heap
* @allocate: allocate memory
* @free: free memory
+ * @phys get physical address of a buffer (only define on
+ * physically contiguous heaps)
+ * @map_dma map the memory for dma to a scatterlist
+ * @unmap_dma unmap the memory for dma
* @map_kernel map memory to the kernel
* @unmap_kernel unmap memory to the kernel
* @map_user map memory to userspace
@@ -176,10 +125,19 @@ struct ion_heap_ops {
struct ion_buffer *buffer, unsigned long len,
unsigned long align, unsigned long flags);
void (*free)(struct ion_buffer *buffer);
+ int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len);
+ struct sg_table * (*map_dma)(struct ion_heap *heap,
+ struct ion_buffer *buffer);
+ void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma);
+ int (*map_iommu)(struct ion_buffer *buffer,
+ struct ion_iommu_map *map_data);
+ void (*unmap_iommu)(struct ion_iommu_map *map_data);
+ void (*buffer_zero)(struct ion_buffer *buffer);
int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
};
@@ -291,6 +249,10 @@ void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
struct vm_area_struct *);
+int ion_heap_map_iommu(struct ion_buffer *buffer,
+ struct ion_iommu_map *map_data);
+void ion_heap_unmap_iommu(struct ion_iommu_map *map_data);
+void ion_flush_all_cpus_caches(void);
int ion_heap_buffer_zero(struct ion_buffer *buffer);
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
@@ -388,6 +350,20 @@ struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
void ion_cma_heap_destroy(struct ion_heap *);
/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+ unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size);
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
+/**
* functions for creating and destroying a heap pool -- allows you
* to keep a pool of pre allocated memory to use from your heap. Keeping
* a pool of memory that is ready for dma, ie any cached mapping have been
@@ -406,7 +382,6 @@ void ion_cma_heap_destroy(struct ion_heap *);
* @gfp_mask: gfp_mask to use from alloc
* @order: order of pages in the pool
* @list: plist node for list of pools
- * @cached: it's cached pool or not
*
* Allows you to keep a pool of pre allocated pages to use from your heap.
* Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -416,7 +391,6 @@ void ion_cma_heap_destroy(struct ion_heap *);
struct ion_page_pool {
int high_count;
int low_count;
- bool cached;
struct list_head high_items;
struct list_head low_items;
struct mutex mutex;
@@ -425,11 +399,41 @@ struct ion_page_pool {
struct plist_node list;
};
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
- bool cached);
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
void ion_page_pool_destroy(struct ion_page_pool *);
struct page *ion_page_pool_alloc(struct ion_page_pool *);
void ion_page_pool_free(struct ion_page_pool *, struct page *);
+void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *);
+
+#ifdef CONFIG_ION_POOL_CACHE_POLICY
+static inline void ion_page_pool_alloc_set_cache_policy
+ (struct ion_page_pool *pool,
+ struct page *page){
+ void *va = page_address(page);
+
+ if (va)
+ set_memory_wc((unsigned long)va, 1 << pool->order);
+}
+
+static inline void ion_page_pool_free_set_cache_policy
+ (struct ion_page_pool *pool,
+ struct page *page){
+ void *va = page_address(page);
+
+ if (va)
+ set_memory_wb((unsigned long)va, 1 << pool->order);
+
+}
+#else
+static inline void ion_page_pool_alloc_set_cache_policy
+ (struct ion_page_pool *pool,
+ struct page *page){ }
+
+static inline void ion_page_pool_free_set_cache_policy
+ (struct ion_page_pool *pool,
+ struct page *page){ }
+#endif
+
/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
* @pool: the pool
@@ -452,22 +456,4 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
void ion_pages_sync_for_device(struct device *dev, struct page *page,
size_t size, enum dma_data_direction dir);
-long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-
-int ion_sync_for_device(struct ion_client *client, int fd);
-
-struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
- int id);
-
-void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
-
-int ion_handle_put_nolock(struct ion_handle *handle);
-
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id);
-
-int ion_handle_put(struct ion_handle *handle);
-
-int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query);
-
#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7e023d505af8..7392ec19ed9a 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -26,18 +26,16 @@
#include "ion.h"
#include "ion_priv.h"
-#define NUM_ORDERS ARRAY_SIZE(orders)
-
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
- __GFP_NORETRY) & ~__GFP_RECLAIM;
-static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO);
+ __GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM;
+static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
static const unsigned int orders[] = {8, 4, 0};
-
+static const int num_orders = ARRAY_SIZE(orders);
static int order_to_index(unsigned int order)
{
int i;
- for (i = 0; i < NUM_ORDERS; i++)
+ for (i = 0; i < num_orders; i++)
if (order == orders[i])
return i;
BUG();
@@ -51,55 +49,49 @@ static inline unsigned int order_to_size(int order)
struct ion_system_heap {
struct ion_heap heap;
- struct ion_page_pool *uncached_pools[NUM_ORDERS];
- struct ion_page_pool *cached_pools[NUM_ORDERS];
+ struct ion_page_pool *pools[0];
};
-/**
- * The page from page-pool are all zeroed before. We need do cache
- * clean for cached buffer. The uncached buffer are always non-cached
- * since it's allocated. So no need for non-cached pages.
- */
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long order)
{
bool cached = ion_buffer_cached(buffer);
- struct ion_page_pool *pool;
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
struct page *page;
- if (!cached)
- pool = heap->uncached_pools[order_to_index(order)];
- else
- pool = heap->cached_pools[order_to_index(order)];
+ if (!cached) {
+ page = ion_page_pool_alloc(pool);
+ } else {
+ gfp_t gfp_flags = low_order_gfp_flags;
- page = ion_page_pool_alloc(pool);
-
- if (cached)
+ if (order > 4)
+ gfp_flags = high_order_gfp_flags;
+ page = alloc_pages(gfp_flags | __GFP_COMP, order);
+ if (!page)
+ return NULL;
ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
+ }
+
return page;
}
static void free_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer, struct page *page)
{
- struct ion_page_pool *pool;
unsigned int order = compound_order(page);
bool cached = ion_buffer_cached(buffer);
- /* go to system */
- if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
+ if (!cached) {
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)
+ ion_page_pool_free_immediate(pool, page);
+ else
+ ion_page_pool_free(pool, page);
+ } else {
__free_pages(page, order);
- return;
}
-
- if (!cached)
- pool = heap->uncached_pools[order_to_index(order)];
- else
- pool = heap->cached_pools[order_to_index(order)];
-
- ion_page_pool_free(pool, page);
}
@@ -111,7 +103,7 @@ static struct page *alloc_largest_available(struct ion_system_heap *heap,
struct page *page;
int i;
- for (i = 0; i < NUM_ORDERS; i++) {
+ for (i = 0; i < num_orders; i++) {
if (size < order_to_size(orders[i]))
continue;
if (max_order < orders[i])
@@ -128,9 +120,9 @@ static struct page *alloc_largest_available(struct ion_system_heap *heap,
}
static int ion_system_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
{
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
@@ -152,7 +144,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
page = alloc_largest_available(sys_heap, buffer, size_remaining,
- max_order);
+ max_order);
if (!page)
goto free_pages;
list_add_tail(&page->lru, &pages);
@@ -174,7 +166,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
list_del(&page->lru);
}
- buffer->sg_table = table;
+ buffer->priv_virt = table;
return 0;
free_table:
@@ -191,11 +183,16 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
struct ion_system_heap,
heap);
struct sg_table *table = buffer->sg_table;
+ bool cached = ion_buffer_cached(buffer);
struct scatterlist *sg;
int i;
- /* zero the buffer before goto page pool */
- if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
+ /*
+ * uncached pages come from the page pools, zero them before returning
+ * for security purposes (other allocations are zerod at
+ * alloc time
+ */
+ if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
ion_heap_buffer_zero(buffer);
for_each_sg(table->sgl, sg, table->nents, i)
@@ -204,11 +201,20 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
kfree(table);
}
+static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_system_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
- int nr_to_scan)
+ int nr_to_scan)
{
- struct ion_page_pool *uncached_pool;
- struct ion_page_pool *cached_pool;
struct ion_system_heap *sys_heap;
int nr_total = 0;
int i, nr_freed;
@@ -219,44 +225,33 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
if (!nr_to_scan)
only_scan = 1;
- for (i = 0; i < NUM_ORDERS; i++) {
- uncached_pool = sys_heap->uncached_pools[i];
- cached_pool = sys_heap->cached_pools[i];
-
- if (only_scan) {
- nr_total += ion_page_pool_shrink(uncached_pool,
- gfp_mask,
- nr_to_scan);
-
- nr_total += ion_page_pool_shrink(cached_pool,
- gfp_mask,
- nr_to_scan);
- } else {
- nr_freed = ion_page_pool_shrink(uncached_pool,
- gfp_mask,
- nr_to_scan);
- nr_to_scan -= nr_freed;
- nr_total += nr_freed;
- if (nr_to_scan <= 0)
- break;
- nr_freed = ion_page_pool_shrink(cached_pool,
- gfp_mask,
- nr_to_scan);
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+
+ nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+ nr_total += nr_freed;
+
+ if (!only_scan) {
nr_to_scan -= nr_freed;
- nr_total += nr_freed;
+ /* shrink completed */
if (nr_to_scan <= 0)
break;
}
}
+
return nr_total;
}
static struct ion_heap_ops system_heap_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
+ .map_dma = ion_system_heap_map_dma,
+ .unmap_dma = ion_system_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
+ .map_iommu = ion_heap_map_iommu,
+ .unmap_iommu = ion_heap_unmap_iommu,
.shrink = ion_system_heap_shrink,
};
@@ -268,89 +263,52 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
struct ion_system_heap,
heap);
int i;
- struct ion_page_pool *pool;
- for (i = 0; i < NUM_ORDERS; i++) {
- pool = sys_heap->uncached_pools[i];
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
- seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
+ seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
pool->high_count, pool->order,
(PAGE_SIZE << pool->order) * pool->high_count);
- seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
+ seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
pool->low_count, pool->order,
(PAGE_SIZE << pool->order) * pool->low_count);
}
-
- for (i = 0; i < NUM_ORDERS; i++) {
- pool = sys_heap->cached_pools[i];
-
- seq_printf(s, "%d order %u highmem pages cached %lu total\n",
- pool->high_count, pool->order,
- (PAGE_SIZE << pool->order) * pool->high_count);
- seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
- pool->low_count, pool->order,
- (PAGE_SIZE << pool->order) * pool->low_count);
- }
- return 0;
-}
-
-static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
-{
- int i;
-
- for (i = 0; i < NUM_ORDERS; i++)
- if (pools[i])
- ion_page_pool_destroy(pools[i]);
-}
-
-static int ion_system_heap_create_pools(struct ion_page_pool **pools,
- bool cached)
-{
- int i;
- gfp_t gfp_flags = low_order_gfp_flags;
-
- for (i = 0; i < NUM_ORDERS; i++) {
- struct ion_page_pool *pool;
-
- if (orders[i] > 4)
- gfp_flags = high_order_gfp_flags;
-
- pool = ion_page_pool_create(gfp_flags, orders[i], cached);
- if (!pool)
- goto err_create_pool;
- pools[i] = pool;
- }
return 0;
-
-err_create_pool:
- ion_system_heap_destroy_pools(pools);
- return -ENOMEM;
}
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
{
struct ion_system_heap *heap;
+ int i;
- heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ heap = kzalloc(sizeof(struct ion_system_heap) +
+ sizeof(struct ion_page_pool *) * num_orders,
+ GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
heap->heap.ops = &system_heap_ops;
heap->heap.type = ION_HEAP_TYPE_SYSTEM;
heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- if (ion_system_heap_create_pools(heap->uncached_pools, false))
- goto free_heap;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+ gfp_t gfp_flags = low_order_gfp_flags;
- if (ion_system_heap_create_pools(heap->cached_pools, true))
- goto destroy_uncached_pools;
+ if (orders[i] > 4)
+ gfp_flags = high_order_gfp_flags;
+ pool = ion_page_pool_create(gfp_flags, orders[i]);
+ if (!pool)
+ goto destroy_pools;
+ heap->pools[i] = pool;
+ }
heap->heap.debug_show = ion_system_heap_debug_show;
return &heap->heap;
-destroy_uncached_pools:
- ion_system_heap_destroy_pools(heap->uncached_pools);
-
-free_heap:
+destroy_pools:
+ while (i--)
+ ion_page_pool_destroy(heap->pools[i]);
kfree(heap);
return ERR_PTR(-ENOMEM);
}
@@ -362,10 +320,8 @@ void ion_system_heap_destroy(struct ion_heap *heap)
heap);
int i;
- for (i = 0; i < NUM_ORDERS; i++) {
- ion_page_pool_destroy(sys_heap->uncached_pools[i]);
- ion_page_pool_destroy(sys_heap->cached_pools[i]);
- }
+ for (i = 0; i < num_orders; i++)
+ ion_page_pool_destroy(sys_heap->pools[i]);
kfree(sys_heap);
}
@@ -406,7 +362,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
sg_set_page(table->sgl, page, len, 0);
- buffer->sg_table = table;
+ buffer->priv_virt = table;
ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
@@ -423,7 +379,7 @@ free_pages:
static void ion_system_contig_heap_free(struct ion_buffer *buffer)
{
- struct sg_table *table = buffer->sg_table;
+ struct sg_table *table = buffer->priv_virt;
struct page *page = sg_page(table->sgl);
unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
unsigned long i;
@@ -434,9 +390,34 @@ static void ion_system_contig_heap_free(struct ion_buffer *buffer)
kfree(table);
}
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ *addr = page_to_phys(page);
+ *len = buffer->size;
+ return 0;
+}
+
+static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
static struct ion_heap_ops kmalloc_ops = {
.allocate = ion_system_contig_heap_allocate,
.free = ion_system_contig_heap_free,
+ .phys = ion_system_contig_heap_phys,
+ .map_dma = ion_system_contig_heap_map_dma,
+ .unmap_dma = ion_system_contig_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index 5abf8320a96a..58d46893e5ff 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -42,8 +42,7 @@ struct ion_test_data {
};
static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
- void __user *ptr, size_t offset, size_t size,
- bool write)
+ void __user *ptr, size_t offset, size_t size, bool write)
{
int ret = 0;
struct dma_buf_attachment *attach;
@@ -99,7 +98,7 @@ err:
}
static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
- size_t offset, size_t size, bool write)
+ size_t offset, size_t size, bool write)
{
int ret;
unsigned long page_offset = offset >> PAGE_SHIFT;
@@ -110,7 +109,7 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
if (offset > dma_buf->size || size > dma_buf->size - offset)
return -EINVAL;
- ret = dma_buf_begin_cpu_access(dma_buf, dir);
+ ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
if (ret)
return ret;
@@ -140,12 +139,12 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
copy_offset = 0;
}
err:
- dma_buf_end_cpu_access(dma_buf, dir);
+ dma_buf_end_cpu_access(dma_buf, offset, size, dir);
return ret;
}
static long ion_test_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
struct ion_test_data *test_data = filp->private_data;
int ret = 0;
@@ -180,19 +179,17 @@ static long ion_test_ioctl(struct file *filp, unsigned int cmd,
case ION_IOC_TEST_DMA_MAPPING:
{
ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
- u64_to_uptr(data.test_rw.ptr),
- data.test_rw.offset,
- data.test_rw.size,
- data.test_rw.write);
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
break;
}
case ION_IOC_TEST_KERNEL_MAPPING:
{
ret = ion_handle_test_kernel(test_data->dma_buf,
- u64_to_uptr(data.test_rw.ptr),
- data.test_rw.offset,
- data.test_rw.size,
- data.test_rw.write);
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
break;
}
default:
@@ -211,7 +208,7 @@ static int ion_test_open(struct inode *inode, struct file *file)
struct ion_test_data *data;
struct miscdevice *miscdev = file->private_data;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -245,7 +242,7 @@ static int __init ion_test_probe(struct platform_device *pdev)
struct ion_test_device *testdev;
testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!testdev)
return -ENOMEM;
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
index 49e55e5acead..4d3c516cc15e 100644
--- a/drivers/staging/android/ion/tegra/tegra_ion.c
+++ b/drivers/staging/android/ion/tegra/tegra_ion.c
@@ -33,11 +33,12 @@ static int tegra_ion_probe(struct platform_device *pdev)
num_heaps = pdata->nr;
- heaps = devm_kcalloc(&pdev->dev, pdata->nr,
- sizeof(struct ion_heap *), GFP_KERNEL);
+ heaps = devm_kzalloc(&pdev->dev,
+ sizeof(struct ion_heap *) * pdata->nr,
+ GFP_KERNEL);
idev = ion_device_create(NULL);
- if (IS_ERR(idev))
+ if (IS_ERR_OR_NULL(idev))
return PTR_ERR(idev);
/* create the heaps as specified in the board file */
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
index 14cd8738ecfc..4ebc07824f68 100644
--- a/drivers/staging/android/uapi/ion.h
+++ b/drivers/staging/android/uapi/ion.h
@@ -19,6 +19,7 @@
#include <linux/ioctl.h>
#include <linux/types.h>
+#include <linux/iommu.h>
typedef int ion_user_handle_t;
@@ -40,30 +41,45 @@ enum ion_heap_type {
ION_HEAP_TYPE_CARVEOUT,
ION_HEAP_TYPE_CHUNK,
ION_HEAP_TYPE_DMA,
- ION_HEAP_TYPE_CUSTOM, /*
- * must be last so device specific heaps always
- * are at the end of this enum
- */
+ ION_HEAP_TYPE_DMA_POOL,
+ ION_HEAP_TYPE_CPUDRAW,
+ ION_HEAP_TYPE_IOMMU,
+ ION_HEAP_TYPE_SECCM,
+ ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+ are at the end of this enum */
+ ION_HEAP_TYPE_RESERVED,
+ ION_NUM_HEAPS = 16,
};
+#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
+#define ION_HEAP_TYPE_DMA_POOL_MASK (1 << ION_HEAP_TYPE_DMA_POOL)
+#define ION_HEAP_CPUDRAW_MASK (1 << ION_HEAP_TYPE_CPUDRAW)
+#define ION_HEAP_TYPE_IOMMU_MASK (1 << ION_HEAP_TYPE_IOMMU)
+
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
/**
* allocation flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves.
*/
+#define ION_FLAG_CACHED (0x1 << 0) /* mappings of this buffer should be
+ cached, ion will do cache
+ maintenance when the buffer is
+ mapped for dma */
+#define ION_FLAG_CACHED_NEEDS_SYNC (0x1 << 1) /* mappings of this buffer
+ will created at mmap time,
+ if this is set caches
+ must be managed manually */
+#define ION_FLAG_NOT_ZERO_BUFFER (0x1 << 2) /* do not zero buffer*/
-/*
- * mappings of this buffer should be cached, ion will do cache maintenance
- * when the buffer is mapped for dma
- */
-#define ION_FLAG_CACHED 1
+#define ION_FLAG_SECURE_BUFFER (0x1 << 3)
-/*
- * mappings of this buffer will created at mmap time, if this is set
- * caches must be managed manually
- */
-#define ION_FLAG_CACHED_NEEDS_SYNC 2
+#define ION_FLAG_GRAPHIC_BUFFER (0x1 << 4)
+
+#define ION_FLAG_GRAPHIC_GPU_BUFFER (0x1 << 5)
/**
* DOC: Ion Userspace API
@@ -128,36 +144,6 @@ struct ion_custom_data {
unsigned long arg;
};
-#define MAX_HEAP_NAME 32
-
-/**
- * struct ion_heap_data - data about a heap
- * @name - first 32 characters of the heap name
- * @type - heap type
- * @heap_id - heap id for the heap
- */
-struct ion_heap_data {
- char name[MAX_HEAP_NAME];
- __u32 type;
- __u32 heap_id;
- __u32 reserved0;
- __u32 reserved1;
- __u32 reserved2;
-};
-
-/**
- * struct ion_heap_query - collection of data about all heaps
- * @cnt - total number of heaps to be copied
- * @heaps - buffer to copy heap data
- */
-struct ion_heap_query {
- __u32 cnt; /* Total number of heaps to be copied */
- __u32 reserved0; /* align to 64bits */
- __u64 heaps; /* buffer to be populated */
- __u32 reserved1;
- __u32 reserved2;
-};
-
#define ION_IOC_MAGIC 'I'
/**
@@ -225,12 +211,33 @@ struct ion_heap_query {
#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
/**
- * DOC: ION_IOC_HEAP_QUERY - information about available heaps
+ * struct ion_map_iommu_data - metadata passed between userspace for iommu mapping
+ * @handle: the handle of buffer
+ * @format: the format of iommu mapping
*
- * Takes an ion_heap_query structure and populates information about
- * available Ion heaps.
+ * Provided by userspace as an argument to the ioctl
+ */
+struct iommu_map_format;
+struct ion_map_iommu_data {
+ ion_user_handle_t handle;
+ struct iommu_map_format format;
+};
+
+/**
+ * DOC: ION_IOC_MAP_IOMMU - map a buffr to iova
+ */
+#define ION_IOC_MAP_IOMMU _IOWR(ION_IOC_MAGIC, \
+ 8, struct ion_map_iommu_data)
+
+/**
+ * DOC: ION_IOC_UNMAP_IOMMU - destory iommu mapping of a buffer
+ */
+#define ION_IOC_UNMAP_IOMMU _IOWR(ION_IOC_MAGIC, \
+ 9, struct ion_map_iommu_data)
+
+/**
+ * DOC: ION_IOC_INV - invalidate a shared file descriptors in cache
*/
-#define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, \
- struct ion_heap_query)
+#define ION_IOC_INV _IOWR(ION_IOC_MAGIC, 10, struct ion_fd_data)
#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/nanohub/Kconfig b/drivers/staging/nanohub/Kconfig
new file mode 100644
index 000000000000..811506488ff7
--- /dev/null
+++ b/drivers/staging/nanohub/Kconfig
@@ -0,0 +1,22 @@
+config NANOHUB
+ tristate "Nanohub"
+ select IIO
+ default N
+ help
+ Enable support for the nanohub sensorhub driver.
+
+ This driver supports the android nanohub sensorhub.
+
+ If in doubt, say N here.
+
+if NANOHUB
+
+config NANOHUB_SPI
+ bool "Nanohub SPI"
+ default Y
+ help
+ Enable nanohub SPI support.
+
+ If in doubt, say Y here.
+
+endif # NANOHUB
diff --git a/drivers/staging/nanohub/Makefile b/drivers/staging/nanohub/Makefile
new file mode 100644
index 000000000000..950cae2b5c6a
--- /dev/null
+++ b/drivers/staging/nanohub/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for nanohub
+#
+
+obj-$(CONFIG_NANOHUB) += nanohub.o
+nanohub-y := main.o bl.o comms.o
+nanohub-$(CONFIG_NANOHUB_SPI) += spi.o
diff --git a/drivers/staging/nanohub/bl.c b/drivers/staging/nanohub/bl.c
new file mode 100644
index 000000000000..9a657f420cbf
--- /dev/null
+++ b/drivers/staging/nanohub/bl.c
@@ -0,0 +1,500 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_data/nanohub.h>
+#include <linux/vmalloc.h>
+
+#include "main.h"
+#include "bl.h"
+
+#define MAX_BUFFER_SIZE 1024
+#define MAX_FLASH_BANKS 16
+#define READ_ACK_TIMEOUT 100000
+
+static uint8_t write_len(struct nanohub_data *data, int len)
+{
+ uint8_t buffer[sizeof(uint8_t) + 1];
+
+ buffer[0] = len - 1;
+
+ return data->bl.write_data(data, buffer, sizeof(uint8_t));
+}
+
+static uint8_t write_cnt(struct nanohub_data *data, uint16_t cnt)
+{
+ uint8_t buffer[sizeof(uint16_t) + 1];
+
+ buffer[0] = (cnt >> 8) & 0xFF;
+ buffer[1] = (cnt >> 0) & 0xFF;
+
+ return data->bl.write_data(data, buffer, sizeof(uint16_t));
+}
+
+static uint8_t write_addr(struct nanohub_data *data, uint32_t addr)
+{
+ uint8_t buffer[sizeof(uint32_t) + 1];
+
+ buffer[0] = (addr >> 24) & 0xFF;
+ buffer[1] = (addr >> 16) & 0xFF;
+ buffer[2] = (addr >> 8) & 0xFF;
+ buffer[3] = addr & 0xFF;
+
+ return data->bl.write_data(data, buffer, sizeof(uint32_t));
+}
+
+/* write length followed by the data */
+static uint8_t write_len_data(struct nanohub_data *data, int len,
+ const uint8_t *buf)
+{
+ uint8_t buffer[sizeof(uint8_t) + 256 + sizeof(uint8_t)];
+
+ buffer[0] = len - 1;
+
+ memcpy(&buffer[1], buf, len);
+
+ return data->bl.write_data(data, buffer, sizeof(uint8_t) + len);
+}
+
+/* keep checking for ack until we receive a ack or nack */
+static uint8_t read_ack_loop(struct nanohub_data *data)
+{
+ uint8_t ret;
+ int32_t timeout = READ_ACK_TIMEOUT;
+
+ do {
+ ret = data->bl.read_ack(data);
+ if (ret != CMD_ACK && ret != CMD_NACK)
+ schedule();
+ } while (ret != CMD_ACK && ret != CMD_NACK && timeout-- > 0);
+
+ return ret;
+}
+
+uint8_t nanohub_bl_sync(struct nanohub_data *data)
+{
+ return data->bl.sync(data);
+}
+
+int nanohub_bl_open(struct nanohub_data *data)
+{
+ int ret = -1;
+
+ data->bl.tx_buffer = kmalloc(MAX_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!data->bl.tx_buffer)
+ goto out;
+
+ data->bl.rx_buffer = kmalloc(MAX_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!data->bl.rx_buffer)
+ goto free_tx;
+
+ ret = data->bl.open(data);
+ if (!ret)
+ goto out;
+
+ kfree(data->bl.rx_buffer);
+free_tx:
+ kfree(data->bl.tx_buffer);
+out:
+ return ret;
+}
+
+void nanohub_bl_close(struct nanohub_data *data)
+{
+ data->bl.close(data);
+ kfree(data->bl.tx_buffer);
+ kfree(data->bl.rx_buffer);
+}
+
+static uint8_t write_bank(struct nanohub_data *data, int bank, uint32_t addr,
+ const uint8_t *buf, size_t length)
+{
+ const struct nanohub_platform_data *pdata = data->pdata;
+ uint8_t status = CMD_ACK;
+ uint32_t offset;
+
+ if (addr <= pdata->flash_banks[bank].address) {
+ offset = pdata->flash_banks[bank].address - addr;
+ if (addr + length >
+ pdata->flash_banks[bank].address +
+ pdata->flash_banks[bank].length)
+ status =
+ nanohub_bl_write_memory(data,
+ pdata->flash_banks[bank].
+ address,
+ pdata->flash_banks[bank].
+ length, buf + offset);
+ else
+ status =
+ nanohub_bl_write_memory(data,
+ pdata->flash_banks[bank].
+ address, length - offset,
+ buf + offset);
+ } else {
+ if (addr + length >
+ pdata->flash_banks[bank].address +
+ pdata->flash_banks[bank].length)
+ status =
+ nanohub_bl_write_memory(data, addr,
+ pdata->flash_banks[bank].
+ address +
+ pdata->flash_banks[bank].
+ length - addr, buf);
+ else
+ status =
+ nanohub_bl_write_memory(data, addr, length, buf);
+ }
+
+ return status;
+}
+
+uint8_t nanohub_bl_download(struct nanohub_data *data, uint32_t addr,
+ const uint8_t *image, size_t length)
+{
+ const struct nanohub_platform_data *pdata = data->pdata;
+ uint8_t *ptr;
+ int i, j;
+ uint8_t status;
+ uint32_t offset;
+ uint8_t erase_mask[MAX_FLASH_BANKS] = { 0 };
+ uint8_t erase_write_mask[MAX_FLASH_BANKS] = { 0 };
+ uint8_t write_mask[MAX_FLASH_BANKS] = { 0 };
+
+ if (pdata->num_flash_banks > MAX_FLASH_BANKS) {
+ status = CMD_NACK;
+ goto out;
+ }
+
+ status = nanohub_bl_sync(data);
+
+ if (status != CMD_ACK) {
+ pr_err("nanohub_bl_download: sync=%02x\n", status);
+ goto out;
+ }
+
+ ptr = vmalloc(length);
+ if (!ptr) {
+ status = CMD_NACK;
+ goto out;
+ }
+
+ status = nanohub_bl_read_memory(data, addr, length, ptr);
+ pr_info(
+ "nanohub: nanohub_bl_read_memory: status=%02x, addr=%08x, length=%zd\n",
+ status, addr, length);
+
+ for (i = 0; i < pdata->num_flash_banks; i++) {
+ if (addr >= pdata->flash_banks[i].address &&
+ addr <
+ pdata->flash_banks[i].address +
+ pdata->flash_banks[i].length) {
+ break;
+ }
+ }
+
+ offset = (uint32_t) (addr - pdata->flash_banks[i].address);
+ j = 0;
+ while (j < length && i < pdata->num_flash_banks) {
+ if (image[j] != 0xFF)
+ erase_write_mask[i] = true;
+
+ if ((ptr[j] & image[j]) != image[j]) {
+ erase_mask[i] = true;
+ if (erase_write_mask[i]) {
+ j += pdata->flash_banks[i].length - offset;
+ offset = pdata->flash_banks[i].length;
+ } else {
+ j++;
+ offset++;
+ }
+ } else {
+ if (ptr[j] != image[j])
+ write_mask[i] = true;
+ j++;
+ offset++;
+ }
+
+ if (offset == pdata->flash_banks[i].length) {
+ i++;
+ offset = 0;
+ if (i < pdata->num_flash_banks)
+ j += (pdata->flash_banks[i].address -
+ pdata->flash_banks[i - 1].address -
+ pdata->flash_banks[i - 1].length);
+ else
+ j = length;
+ }
+ }
+
+ for (i = 0; status == CMD_ACK && i < pdata->num_flash_banks; i++) {
+ pr_info("nanohub: i=%d, erase=%d, erase_write=%d, write=%d\n",
+ i, erase_mask[i], erase_write_mask[i], write_mask[i]);
+ if (erase_mask[i]) {
+ status =
+ nanohub_bl_erase_sector(data,
+ pdata->flash_banks[i].bank);
+ if (status == CMD_ACK && erase_write_mask[i])
+ status =
+ write_bank(data, i, addr, image, length);
+ } else if (write_mask[i]) {
+ status = write_bank(data, i, addr, image, length);
+ }
+ }
+
+ vfree(ptr);
+out:
+ return status;
+}
+
+uint8_t nanohub_bl_erase_shared(struct nanohub_data *data)
+{
+ const struct nanohub_platform_data *pdata = data->pdata;
+ int i;
+ uint8_t status;
+
+ if (pdata->num_shared_flash_banks > MAX_FLASH_BANKS) {
+ status = CMD_NACK;
+ goto out;
+ }
+
+ status = nanohub_bl_sync(data);
+
+ if (status != CMD_ACK) {
+ pr_err("nanohub_bl_erase_shared: sync=%02x\n", status);
+ goto out;
+ }
+
+ for (i = 0;
+ status == CMD_ACK && i < pdata->num_shared_flash_banks;
+ i++) {
+ status = nanohub_bl_erase_sector(data,
+ pdata->shared_flash_banks[i].bank);
+ }
+out:
+ return status;
+}
+
+uint8_t nanohub_bl_erase_shared_bl(struct nanohub_data *data)
+{
+ uint8_t status;
+
+ status = nanohub_bl_sync(data);
+
+ if (status != CMD_ACK) {
+ pr_err("nanohub_bl_erase_shared_bl: sync=%02x\n", status);
+ goto out;
+ }
+
+ status = nanohub_bl_erase_special(data, 0xFFF0);
+out:
+ return status;
+}
+
+/* erase a single sector */
+uint8_t nanohub_bl_erase_sector(struct nanohub_data *data, uint16_t sector)
+{
+ uint8_t ret;
+
+ data->bl.write_cmd(data, data->bl.cmd_erase);
+ ret = data->bl.read_ack(data);
+ if (ret == CMD_ACK)
+ ret = write_cnt(data, 0x0000);
+ if (ret != CMD_NACK)
+ ret = read_ack_loop(data);
+ if (ret == CMD_ACK)
+ ret = write_cnt(data, sector);
+ if (ret != CMD_NACK)
+ ret = read_ack_loop(data);
+
+ return ret;
+}
+
+/* erase special */
+uint8_t nanohub_bl_erase_special(struct nanohub_data *data, uint16_t special)
+{
+ uint8_t ret;
+
+ data->bl.write_cmd(data, data->bl.cmd_erase);
+ ret = data->bl.read_ack(data);
+ if (ret == CMD_ACK)
+ ret = write_cnt(data, special);
+ if (ret != CMD_NACK)
+ ret = read_ack_loop(data);
+
+ return ret;
+}
+
+/* read memory - this will chop the request into 256 byte reads */
+uint8_t nanohub_bl_read_memory(struct nanohub_data *data, uint32_t addr,
+ uint32_t length, uint8_t *buffer)
+{
+ uint8_t ret = CMD_ACK;
+ uint32_t offset = 0;
+
+ while (ret == CMD_ACK && length > offset) {
+ data->bl.write_cmd(data, data->bl.cmd_read_memory);
+ ret = data->bl.read_ack(data);
+ if (ret == CMD_ACK) {
+ write_addr(data, addr + offset);
+ ret = read_ack_loop(data);
+ if (ret == CMD_ACK) {
+ if (length - offset >= 256) {
+ write_len(data, 256);
+ ret = read_ack_loop(data);
+ if (ret == CMD_ACK) {
+ data->bl.read_data(data,
+ &buffer
+ [offset],
+ 256);
+ offset += 256;
+ }
+ } else {
+ write_len(data, length - offset);
+ ret = read_ack_loop(data);
+ if (ret == CMD_ACK) {
+ data->bl.read_data(data,
+ &buffer
+ [offset],
+ length -
+ offset);
+ offset = length;
+ }
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+/* write memory - this will chop the request into 256 byte writes */
+uint8_t nanohub_bl_write_memory(struct nanohub_data *data, uint32_t addr,
+ uint32_t length, const uint8_t *buffer)
+{
+ uint8_t ret = CMD_ACK;
+ uint32_t offset = 0;
+
+ while (ret == CMD_ACK && length > offset) {
+ data->bl.write_cmd(data, data->bl.cmd_write_memory);
+ ret = data->bl.read_ack(data);
+ if (ret == CMD_ACK) {
+ write_addr(data, addr + offset);
+ ret = read_ack_loop(data);
+ if (ret == CMD_ACK) {
+ if (length - offset >= 256) {
+ write_len_data(data, 256,
+ &buffer[offset]);
+ offset += 256;
+ } else {
+ write_len_data(data, length - offset,
+ &buffer[offset]);
+ offset = length;
+ }
+ ret = read_ack_loop(data);
+ }
+ }
+ }
+
+ return ret;
+}
+
+uint8_t nanohub_bl_get_version(struct nanohub_data *data, uint8_t *version)
+{
+ uint8_t status;
+
+ status = nanohub_bl_sync(data);
+ if (status != CMD_ACK) {
+ pr_err("nanohub_bl_get_version: sync=%02x\n", status);
+ goto out;
+ }
+
+ data->bl.write_cmd(data, data->bl.cmd_get_version);
+ status = data->bl.read_ack(data);
+ if (status == CMD_ACK)
+ data->bl.read_data(data, version, 1);
+ status = data->bl.read_ack(data);
+out:
+ return status;
+}
+
+uint8_t nanohub_bl_get_id(struct nanohub_data *data, uint16_t *id)
+{
+ uint8_t status;
+ uint8_t len;
+ uint8_t buffer[256];
+
+ status = nanohub_bl_sync(data);
+ if (status != CMD_ACK) {
+ pr_err("nanohub_bl_get_id: sync=%02x\n", status);
+ goto out;
+ }
+
+ data->bl.write_cmd(data, data->bl.cmd_get_id);
+ status = data->bl.read_ack(data);
+ if (status == CMD_ACK) {
+ data->bl.read_data(data, &len, 1);
+ data->bl.read_data(data, buffer, len+1);
+ *id = (buffer[0] << 8) | buffer[1];
+ }
+ status = data->bl.read_ack(data);
+out:
+ return status;
+}
+
+uint8_t nanohub_bl_lock(struct nanohub_data *data)
+{
+ uint8_t status;
+
+ status = nanohub_bl_sync(data);
+
+ if (status != CMD_ACK) {
+ pr_err("nanohub_bl_lock: sync=%02x\n", status);
+ goto out;
+ }
+
+ data->bl.write_cmd(data, data->bl.cmd_readout_protect);
+ status = data->bl.read_ack(data);
+ if (status == CMD_ACK)
+ status = read_ack_loop(data);
+out:
+ return status;
+}
+
+uint8_t nanohub_bl_unlock(struct nanohub_data *data)
+{
+ uint8_t status;
+
+ status = nanohub_bl_sync(data);
+
+ if (status != CMD_ACK) {
+ pr_err("nanohub_bl_lock: sync=%02x\n", status);
+ goto out;
+ }
+
+ data->bl.write_cmd(data, data->bl.cmd_readout_unprotect);
+ status = data->bl.read_ack(data);
+ if (status == CMD_ACK)
+ status = read_ack_loop(data);
+out:
+ return status;
+}
+
+uint8_t nanohub_bl_update_finished(struct nanohub_data *data)
+{
+ uint8_t ret;
+
+ data->bl.write_cmd(data, data->bl.cmd_update_finished);
+ ret = read_ack_loop(data);
+
+ return ret;
+}
diff --git a/drivers/staging/nanohub/bl.h b/drivers/staging/nanohub/bl.h
new file mode 100644
index 000000000000..9ed8c403d865
--- /dev/null
+++ b/drivers/staging/nanohub/bl.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _NANOHUB_BL_H
+#define _NANOHUB_BL_H
+
+#include <linux/platform_data/nanohub.h>
+#include <linux/spi/spi.h>
+
+struct nanohub_data;
+
+struct nanohub_bl {
+ uint8_t cmd_erase;
+ uint8_t cmd_read_memory;
+ uint8_t cmd_write_memory;
+ uint8_t cmd_get_version;
+ uint8_t cmd_get_id;
+ uint8_t cmd_readout_protect;
+ uint8_t cmd_readout_unprotect;
+ uint8_t cmd_update_finished;
+
+ int (*open)(const void *);
+ void (*close)(const void *);
+ uint8_t (*sync)(const void *);
+ uint8_t (*write_data)(const void *, uint8_t *, int);
+ uint8_t (*write_cmd)(const void *, uint8_t);
+ uint8_t (*read_data)(const void *, uint8_t *, int);
+ uint8_t (*read_ack)(const void *);
+
+ uint8_t *tx_buffer;
+ uint8_t *rx_buffer;
+};
+
+int nanohub_bl_open(struct nanohub_data *);
+uint8_t nanohub_bl_sync(struct nanohub_data *);
+void nanohub_bl_close(struct nanohub_data *);
+uint8_t nanohub_bl_download(struct nanohub_data *, uint32_t addr,
+ const uint8_t *data, size_t length);
+uint8_t nanohub_bl_erase_shared(struct nanohub_data *);
+uint8_t nanohub_bl_erase_shared_bl(struct nanohub_data *);
+uint8_t nanohub_bl_erase_sector(struct nanohub_data *, uint16_t);
+uint8_t nanohub_bl_erase_special(struct nanohub_data *, uint16_t);
+uint8_t nanohub_bl_read_memory(struct nanohub_data *, uint32_t, uint32_t,
+ uint8_t *);
+uint8_t nanohub_bl_write_memory(struct nanohub_data *, uint32_t, uint32_t,
+ const uint8_t *);
+uint8_t nanohub_bl_get_version(struct nanohub_data *, uint8_t *);
+uint8_t nanohub_bl_get_id(struct nanohub_data *, uint16_t *);
+uint8_t nanohub_bl_lock(struct nanohub_data *);
+uint8_t nanohub_bl_unlock(struct nanohub_data *);
+uint8_t nanohub_bl_update_finished(struct nanohub_data *);
+
+/*
+ * Bootloader commands
+ * _NS versions are no-stretch. (Only valid on I2C)
+ * will return CMD_BUSY instead of stretching the clock
+ */
+
+#define CMD_GET 0x00
+#define CMD_GET_VERSION 0x01
+#define CMD_GET_ID 0x02
+#define CMD_READ_MEMORY 0x11
+#define CMD_NACK 0x1F
+#define CMD_GO 0x21
+#define CMD_WRITE_MEMORY 0x31
+#define CMD_WRITE_MEMORY_NS 0x32
+#define CMD_ERASE 0x44
+#define CMD_ERASE_NS 0x45
+#define CMD_SOF 0x5A
+#define CMD_WRITE_PROTECT 0x63
+#define CMD_WRITE_PROTECT_NS 0x64
+#define CMD_WRITE_UNPROTECT 0x73
+#define CMD_WRITE_UNPROTECT_NS 0x74
+#define CMD_BUSY 0x76
+#define CMD_ACK 0x79
+#define CMD_READOUT_PROTECT 0x82
+#define CMD_READOUT_PROTECT_NS 0x83
+#define CMD_READOUT_UNPROTECT 0x92
+#define CMD_READOUT_UNPROTECT_NS 0x93
+#define CMD_SOF_ACK 0xA5
+#define CMD_GET_SIZES 0xEE
+#define CMD_UPDATE_FINISHED 0xEF
+
+#endif
diff --git a/drivers/staging/nanohub/comms.c b/drivers/staging/nanohub/comms.c
new file mode 100644
index 000000000000..82590d5b7390
--- /dev/null
+++ b/drivers/staging/nanohub/comms.c
@@ -0,0 +1,582 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+#include <linux/gpio.h>
+
+#include "main.h"
+#include "comms.h"
+
+#define READ_ACK_TIMEOUT_MS 10
+#define READ_MSG_TIMEOUT_MS 70
+
+#define RESEND_SHORT_DELAY_US 500 /* 500us - 1ms */
+#define RESEND_LONG_DELAY_US 100000 /* 100ms - 200ms */
+
+static const uint32_t crc_table[] = {
+ 0x00000000, 0x04C11DB7, 0x09823B6E, 0x0D4326D9,
+ 0x130476DC, 0x17C56B6B, 0x1A864DB2, 0x1E475005,
+ 0x2608EDB8, 0x22C9F00F, 0x2F8AD6D6, 0x2B4BCB61,
+ 0x350C9B64, 0x31CD86D3, 0x3C8EA00A, 0x384FBDBD
+};
+
+static uint32_t crc32_word(uint32_t crc, uint32_t data, int cnt)
+{
+ int i;
+ crc = crc ^ data;
+
+ for (i = 0; i < cnt; i++)
+ crc = (crc << 4) ^ crc_table[crc >> 28];
+
+ return crc;
+}
+
+uint32_t crc32(const uint8_t *buffer, int length, uint32_t crc)
+{
+ uint32_t *data = (uint32_t *)buffer;
+ uint32_t word;
+ int i;
+
+ /* word by word crc32 */
+ for (i = 0; i < (length >> 2); i++)
+ crc = crc32_word(crc, data[i], 8);
+
+ /* zero pad last word if required */
+ if (length & 0x3) {
+ for (i *= 4, word = 0; i < length; i++)
+ word |= buffer[i] << ((i & 0x3) * 8);
+ crc = crc32_word(crc, word, 8);
+ }
+
+ return crc;
+}
+
+static inline size_t pad(size_t length)
+{
+ return (length + 3) & ~3;
+}
+
+static inline size_t tot_len(size_t length)
+{
+ /* [TYPE:1] [LENGTH:3] [DATA] [PAD:0-3] [CRC:4] */
+ return sizeof(uint32_t) + pad(length) + sizeof(uint32_t);
+}
+
+static struct nanohub_packet_pad *packet_alloc(int flags)
+{
+ int len =
+ sizeof(struct nanohub_packet_pad) + MAX_UINT8 +
+ sizeof(struct nanohub_packet_crc);
+ uint8_t *packet = kmalloc(len, flags);
+ memset(packet, 0xFF, len);
+ return (struct nanohub_packet_pad *)packet;
+}
+
+static int packet_create(struct nanohub_packet *packet, uint32_t seq,
+ uint32_t reason, uint8_t len, const uint8_t *data,
+ bool user)
+{
+ struct nanohub_packet_crc crc;
+ int ret = sizeof(struct nanohub_packet) + len +
+ sizeof(struct nanohub_packet_crc);
+
+ if (packet) {
+ packet->sync = COMMS_SYNC;
+ packet->seq = seq;
+ packet->reason = reason;
+ packet->len = len;
+ if (len > 0) {
+ if (user) {
+ if (copy_from_user(packet->data, data, len) !=
+ 0)
+ ret = ERROR_NACK;
+ } else {
+ memcpy(packet->data, data, len);
+ }
+ }
+ crc.crc =
+ crc32((uint8_t *) packet,
+ sizeof(struct nanohub_packet) + len, ~0);
+ memcpy(&packet->data[len], &crc.crc,
+ sizeof(struct nanohub_packet_crc));
+ } else {
+ ret = ERROR_NACK;
+ }
+
+ return ret;
+}
+
+static int packet_verify(struct nanohub_packet *packet)
+{
+ struct nanohub_packet_crc crc;
+ int cmp;
+
+ crc.crc =
+ crc32((uint8_t *) packet,
+ sizeof(struct nanohub_packet) + packet->len, ~0);
+
+ cmp =
+ memcmp(&crc.crc, &packet->data[packet->len],
+ sizeof(struct nanohub_packet_crc));
+
+ if (cmp != 0) {
+ uint8_t *ptr = (uint8_t *)packet;
+ pr_debug("nanohub: gen crc: %08x, got crc: %08x\n", crc.crc,
+ *(uint32_t *)&packet->data[packet->len]);
+ pr_debug(
+ "nanohub: %02x [%02x %02x %02x %02x] [%02x %02x %02x %02x] [%02x] [%02x %02x %02x %02x\n",
+ ptr[0], ptr[1], ptr[2], ptr[3], ptr[4], ptr[5], ptr[6],
+ ptr[7], ptr[8], ptr[9], ptr[10], ptr[11], ptr[12],
+ ptr[13]);
+ }
+
+ return cmp;
+}
+
+static void packet_free(struct nanohub_packet_pad *packet)
+{
+ kfree(packet);
+}
+
+static int read_ack(struct nanohub_data *data, struct nanohub_packet *response,
+ int timeout)
+{
+ int ret, i;
+ const int max_size = sizeof(struct nanohub_packet) + MAX_UINT8 +
+ sizeof(struct nanohub_packet_crc);
+ unsigned long end = jiffies + msecs_to_jiffies(READ_ACK_TIMEOUT_MS);
+
+ for (i = 0; time_before_eq(jiffies, end); i++) {
+ ret =
+ data->comms.read(data, (uint8_t *) response, max_size,
+ timeout);
+
+ if (ret == 0) {
+ pr_debug("nanohub: read_ack: %d: empty packet\n", i);
+ ret = ERROR_NACK;
+ continue;
+ } else if (ret < sizeof(struct nanohub_packet)) {
+ pr_debug("nanohub: read_ack: %d: too small\n", i);
+ ret = ERROR_NACK;
+ continue;
+ } else if (ret <
+ sizeof(struct nanohub_packet) + response->len +
+ sizeof(struct nanohub_packet_crc)) {
+ pr_debug("nanohub: read_ack: %d: too small length\n",
+ i);
+ ret = ERROR_NACK;
+ continue;
+ } else if (ret !=
+ sizeof(struct nanohub_packet) + response->len +
+ sizeof(struct nanohub_packet_crc)) {
+ pr_debug("nanohub: read_ack: %d: wrong length\n", i);
+ ret = ERROR_NACK;
+ break;
+ } else if (packet_verify(response) != 0) {
+ pr_debug("nanohub: read_ack: %d: invalid crc\n", i);
+ ret = ERROR_NACK;
+ break;
+ } else {
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int read_msg(struct nanohub_data *data, struct nanohub_packet *response,
+ int timeout)
+{
+ int ret, i;
+ const int max_size = sizeof(struct nanohub_packet) + MAX_UINT8 +
+ sizeof(struct nanohub_packet_crc);
+ unsigned long end = jiffies + msecs_to_jiffies(READ_MSG_TIMEOUT_MS);
+
+ for (i = 0; time_before_eq(jiffies, end); i++) {
+ ret =
+ data->comms.read(data, (uint8_t *) response, max_size,
+ timeout);
+
+ if (ret == 0) {
+ pr_debug("nanohub: read_msg: %d: empty packet\n", i);
+ ret = ERROR_NACK;
+ continue;
+ } else if (ret < sizeof(struct nanohub_packet)) {
+ pr_debug("nanohub: read_msg: %d: too small\n", i);
+ ret = ERROR_NACK;
+ continue;
+ } else if (ret <
+ sizeof(struct nanohub_packet) + response->len +
+ sizeof(struct nanohub_packet_crc)) {
+ pr_debug("nanohub: read_msg: %d: too small length\n",
+ i);
+ ret = ERROR_NACK;
+ continue;
+ } else if (ret !=
+ sizeof(struct nanohub_packet) + response->len +
+ sizeof(struct nanohub_packet_crc)) {
+ pr_debug("nanohub: read_msg: %d: wrong length\n", i);
+ ret = ERROR_NACK;
+ break;
+ } else if (packet_verify(response) != 0) {
+ pr_debug("nanohub: read_msg: %d: invalid crc\n", i);
+ ret = ERROR_NACK;
+ break;
+ } else {
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int get_reply(struct nanohub_data *data, struct nanohub_packet *response,
+ uint32_t seq)
+{
+ int ret;
+
+ ret = read_ack(data, response, data->comms.timeout_ack);
+
+ if (ret >= 0 && response->seq == seq) {
+ if (response->reason == CMD_COMMS_ACK) {
+ if (response->len == sizeof(data->interrupts))
+ memcpy(data->interrupts, response->data,
+ response->len);
+ ret =
+ read_msg(data, response, data->comms.timeout_reply);
+ if (ret < 0)
+ ret = ERROR_NACK;
+ } else {
+ int i;
+ uint8_t *b = (uint8_t *) response;
+ for (i = 0; i < ret; i += 25)
+ pr_debug(
+ "nanohub: %d: %d: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ ret, i, b[i], b[i + 1], b[i + 2], b[i + 3],
+ b[i + 4], b[i + 5], b[i + 6], b[i + 7],
+ b[i + 8], b[i + 9], b[i + 10], b[i + 11],
+ b[i + 12], b[i + 13], b[i + 14], b[i + 15],
+ b[i + 16], b[i + 17], b[i + 18], b[i + 19],
+ b[i + 20], b[i + 21], b[i + 22], b[i + 23],
+ b[i + 24]);
+ if (response->reason == CMD_COMMS_NACK)
+ ret = ERROR_NACK;
+ else if (response->reason == CMD_COMMS_BUSY)
+ ret = ERROR_BUSY;
+ }
+
+ if (response->seq != seq)
+ ret = ERROR_NACK;
+ } else {
+ if (ret >= 0) {
+ int i;
+ uint8_t *b = (uint8_t *) response;
+ for (i = 0; i < ret; i += 25)
+ pr_debug(
+ "nanohub: %d: %d: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ ret, i, b[i], b[i + 1], b[i + 2], b[i + 3],
+ b[i + 4], b[i + 5], b[i + 6], b[i + 7],
+ b[i + 8], b[i + 9], b[i + 10], b[i + 11],
+ b[i + 12], b[i + 13], b[i + 14], b[i + 15],
+ b[i + 16], b[i + 17], b[i + 18], b[i + 19],
+ b[i + 20], b[i + 21], b[i + 22], b[i + 23],
+ b[i + 24]);
+ }
+ ret = ERROR_NACK;
+ }
+
+ return ret;
+}
+
+static int nanohub_comms_tx_rx(struct nanohub_data *data,
+ struct nanohub_packet_pad *pad, int packet_size,
+ uint32_t seq, uint8_t *rx, size_t rx_len)
+{
+ int ret;
+
+ ret = data->comms.write(data, (uint8_t *)&pad->packet, packet_size,
+ data->comms.timeout_write);
+
+ if (ret == packet_size) {
+ ret = get_reply(data, &pad->packet, seq);
+
+ if (ret >= 0) {
+ if (pad->packet.len > 0) {
+ if (pad->packet.len > rx_len) {
+ memcpy(rx, pad->packet.data, rx_len);
+ ret = rx_len;
+ } else {
+ memcpy(rx, pad->packet.data,
+ pad->packet.len);
+ ret = pad->packet.len;
+ }
+ } else {
+ ret = 0;
+ }
+ }
+ } else {
+ ret = ERROR_NACK;
+ }
+
+ return ret;
+}
+
+int nanohub_comms_rx_retrans_boottime(struct nanohub_data *data, uint32_t cmd,
+ uint8_t *rx, size_t rx_len,
+ int retrans_cnt, int retrans_delay)
+{
+ int packet_size = 0;
+ struct nanohub_packet_pad *pad = packet_alloc(GFP_KERNEL);
+ int delay = 0;
+ int ret;
+ uint32_t seq;
+ struct timespec ts;
+ s64 boottime;
+
+ if (pad == NULL)
+ return ERROR_NACK;
+
+ seq = data->comms.seq++;
+
+ do {
+ data->comms.open(data);
+ get_monotonic_boottime(&ts);
+ boottime = timespec_to_ns(&ts);
+ packet_size =
+ packet_create(&pad->packet, seq, cmd, sizeof(boottime),
+ (uint8_t *)&boottime, false);
+
+ ret =
+ nanohub_comms_tx_rx(data, pad, packet_size, seq, rx,
+ rx_len);
+
+ if (nanohub_wakeup_eom(data,
+ (ret == ERROR_BUSY) ||
+ (ret == ERROR_NACK && retrans_cnt >= 0)))
+ ret = -EFAULT;
+
+ data->comms.close(data);
+
+ if (ret == ERROR_NACK) {
+ retrans_cnt--;
+ delay += retrans_delay;
+ if (retrans_cnt >= 0)
+ udelay(retrans_delay);
+ } else if (ret == ERROR_BUSY) {
+ usleep_range(RESEND_LONG_DELAY_US,
+ RESEND_LONG_DELAY_US * 2);
+ }
+ } while ((ret == ERROR_BUSY)
+ || (ret == ERROR_NACK && retrans_cnt >= 0));
+
+ packet_free(pad);
+
+ return ret;
+}
+
+int nanohub_comms_tx_rx_retrans(struct nanohub_data *data, uint32_t cmd,
+ const uint8_t *tx, uint8_t tx_len,
+ uint8_t *rx, size_t rx_len, bool user,
+ int retrans_cnt, int retrans_delay)
+{
+ int packet_size = 0;
+ struct nanohub_packet_pad *pad = packet_alloc(GFP_KERNEL);
+ int delay = 0;
+ int ret;
+ uint32_t seq;
+
+ if (pad == NULL)
+ return ERROR_NACK;
+
+ seq = data->comms.seq++;
+
+ do {
+ packet_size =
+ packet_create(&pad->packet, seq, cmd, tx_len, tx, user);
+
+ data->comms.open(data);
+ ret =
+ nanohub_comms_tx_rx(data, pad, packet_size, seq, rx,
+ rx_len);
+
+ if (nanohub_wakeup_eom(data,
+ (ret == ERROR_BUSY) ||
+ (ret == ERROR_NACK && retrans_cnt >= 0)))
+ ret = -EFAULT;
+
+ data->comms.close(data);
+
+ if (ret == ERROR_NACK) {
+ retrans_cnt--;
+ delay += retrans_delay;
+ if (retrans_cnt >= 0)
+ udelay(retrans_delay);
+ } else if (ret == ERROR_BUSY) {
+ usleep_range(RESEND_LONG_DELAY_US,
+ RESEND_LONG_DELAY_US * 2);
+ }
+ } while ((ret == ERROR_BUSY)
+ || (ret == ERROR_NACK && retrans_cnt >= 0));
+
+ packet_free(pad);
+
+ return ret;
+}
+
+struct firmware_header {
+ uint32_t size;
+ uint32_t crc;
+ uint8_t type;
+} __packed;
+
+struct firmware_chunk {
+ uint32_t offset;
+ uint8_t data[128];
+}
+__packed;
+
+static int nanohub_comms_download(struct nanohub_data *data,
+ const uint8_t *image, size_t length,
+ uint8_t type)
+{
+ uint8_t accepted;
+ struct firmware_header header;
+ struct firmware_chunk chunk;
+ int max_chunk_size = sizeof(chunk.data);
+ int chunk_size;
+ uint32_t offset = 0;
+ int ret;
+ uint8_t chunk_reply, upload_reply = 0, last_reply = 0;
+ uint32_t clear_interrupts[8] = { 0x00000008 };
+ uint32_t delay;
+
+ header.type = type;
+ header.size = cpu_to_le32(length);
+ header.crc = cpu_to_le32(~crc32(image, length, ~0));
+
+ if (request_wakeup(data))
+ return -ERESTARTSYS;
+ ret = nanohub_comms_tx_rx_retrans(data, CMD_COMMS_START_KERNEL_UPLOAD,
+ (const uint8_t *)&header,
+ sizeof(header), &accepted,
+ sizeof(accepted), false, 10, 10);
+ release_wakeup(data);
+
+ if (ret == 1 && accepted == 1) {
+ do {
+ if (request_wakeup(data))
+ continue;
+
+ delay = 0;
+ chunk.offset = cpu_to_le32(offset);
+ if (offset + max_chunk_size > length)
+ chunk_size = length - offset;
+ else
+ chunk_size = max_chunk_size;
+ memcpy(chunk.data, image + offset, chunk_size);
+
+ ret =
+ nanohub_comms_tx_rx_retrans(data,
+ CMD_COMMS_KERNEL_CHUNK,
+ (const uint8_t *)&chunk,
+ sizeof(uint32_t) +
+ chunk_size,
+ &chunk_reply,
+ sizeof(chunk_reply),
+ false, 10, 10);
+
+ pr_debug("nanohub: ret=%d, chunk_reply=%d, offset=%d\n",
+ ret, chunk_reply, offset);
+ if (ret == sizeof(chunk_reply)) {
+ if (chunk_reply == CHUNK_REPLY_ACCEPTED) {
+ offset += chunk_size;
+ } else if (chunk_reply == CHUNK_REPLY_WAIT) {
+ ret = nanohub_wait_for_interrupt(data);
+ if (ret < 0) {
+ release_wakeup(data);
+ continue;
+ }
+ nanohub_comms_tx_rx_retrans(data,
+ CMD_COMMS_CLR_GET_INTR,
+ (uint8_t *)clear_interrupts,
+ sizeof(clear_interrupts),
+ (uint8_t *)data->interrupts,
+ sizeof(data->interrupts),
+ false, 10, 0);
+ } else if (chunk_reply == CHUNK_REPLY_RESEND) {
+ if (last_reply == CHUNK_REPLY_RESEND)
+ delay = RESEND_LONG_DELAY_US;
+ else
+ delay = RESEND_SHORT_DELAY_US;
+ } else if (chunk_reply == CHUNK_REPLY_RESTART)
+ offset = 0;
+ else if (chunk_reply == CHUNK_REPLY_CANCEL ||
+ chunk_reply ==
+ CHUNK_REPLY_CANCEL_NO_RETRY) {
+ release_wakeup(data);
+ break;
+ }
+ last_reply = chunk_reply;
+ } else if (ret <= 0) {
+ release_wakeup(data);
+ break;
+ }
+ release_wakeup(data);
+ if (delay > 0)
+ usleep_range(delay, delay * 2);
+ } while (offset < length);
+ }
+
+ do {
+ if (upload_reply == UPLOAD_REPLY_PROCESSING)
+ usleep_range(RESEND_LONG_DELAY_US,
+ RESEND_LONG_DELAY_US * 2);
+
+ if (request_wakeup(data)) {
+ ret = sizeof(upload_reply);
+ upload_reply = UPLOAD_REPLY_PROCESSING;
+ continue;
+ }
+ ret = nanohub_comms_tx_rx_retrans(data,
+ CMD_COMMS_FINISH_KERNEL_UPLOAD,
+ NULL, 0,
+ &upload_reply, sizeof(upload_reply),
+ false, 10, 10);
+ release_wakeup(data);
+ } while (ret == sizeof(upload_reply) &&
+ upload_reply == UPLOAD_REPLY_PROCESSING);
+
+ pr_info("nanohub: nanohub_comms_download: ret=%d, upload_reply=%d\n",
+ ret, upload_reply);
+
+ return 0;
+}
+
+int nanohub_comms_kernel_download(struct nanohub_data *data,
+ const uint8_t *image, size_t length)
+{
+ return nanohub_comms_download(data, image, length,
+ COMMS_FLASH_KERNEL_ID);
+}
+
+int nanohub_comms_app_download(struct nanohub_data *data, const uint8_t *image,
+ size_t length)
+{
+ return nanohub_comms_download(data, image, length, COMMS_FLASH_APP_ID);
+}
diff --git a/drivers/staging/nanohub/comms.h b/drivers/staging/nanohub/comms.h
new file mode 100644
index 000000000000..b22e55bbb8dd
--- /dev/null
+++ b/drivers/staging/nanohub/comms.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _NANOHUB_COMMS_H
+#define _NANOHUB_COMMS_H
+
+struct __attribute__ ((__packed__)) nanohub_packet {
+ uint8_t sync;
+ uint32_t seq;
+ uint32_t reason;
+ uint8_t len;
+ uint8_t data[];
+};
+
+struct __attribute__ ((__packed__)) nanohub_packet_pad {
+ uint8_t pad[3];
+ struct nanohub_packet
+ packet;
+};
+
+struct __attribute__ ((__packed__)) nanohub_packet_crc {
+ uint32_t crc;
+};
+
+struct nanohub_data;
+
+struct nanohub_comms {
+ uint32_t seq;
+ int timeout_write;
+ int timeout_ack;
+ int timeout_reply;
+ int (*open)(void *);
+ void (*close)(void *);
+ int (*write)(void *, uint8_t *, int, int);
+ int (*read)(void *, uint8_t *, int, int);
+
+ uint8_t *tx_buffer;
+ uint8_t *rx_buffer;
+};
+
+int nanohub_comms_kernel_download(struct nanohub_data *, const uint8_t *,
+ size_t);
+int nanohub_comms_app_download(struct nanohub_data *, const uint8_t *, size_t);
+int nanohub_comms_rx_retrans_boottime(struct nanohub_data *, uint32_t,
+ uint8_t *, size_t, int, int);
+int nanohub_comms_tx_rx_retrans(struct nanohub_data *, uint32_t,
+ const uint8_t *, uint8_t, uint8_t *, size_t,
+ bool, int, int);
+
+#define ERROR_NACK -1
+#define ERROR_BUSY -2
+
+#define MAX_UINT8 ((1 << (8*sizeof(uint8_t))) - 1)
+
+#define COMMS_SYNC 0x31
+#define COMMS_FLASH_KERNEL_ID 0x1
+#define COMMS_FLASH_EEDATA_ID 0x2
+#define COMMS_FLASH_APP_ID 0x4
+
+#define CMD_COMMS_ACK 0x00000000
+#define CMD_COMMS_NACK 0x00000001
+#define CMD_COMMS_BUSY 0x00000002
+
+#define CMD_COMMS_GET_OS_HW_VERSIONS 0x00001000
+#define CMD_COMMS_GET_APP_VERSIONS 0x00001001
+#define CMD_COMMS_QUERY_APP_INFO 0x00001002
+
+#define CMD_COMMS_START_KERNEL_UPLOAD 0x00001040
+#define CMD_COMMS_KERNEL_CHUNK 0x00001041
+#define CMD_COMMS_FINISH_KERNEL_UPLOAD 0x00001042
+
+#define CMD_COMMS_START_APP_UPLOAD 0x00001050
+#define CMD_COMMS_APP_CHUNK 0x00001051
+
+#define CMD_COMMS_CLR_GET_INTR 0x00001080
+#define CMD_COMMS_MASK_INTR 0x00001081
+#define CMD_COMMS_UNMASK_INTR 0x00001082
+#define CMD_COMMS_READ 0x00001090
+#define CMD_COMMS_WRITE 0x00001091
+
+#define CHUNK_REPLY_ACCEPTED 0
+#define CHUNK_REPLY_WAIT 1
+#define CHUNK_REPLY_RESEND 2
+#define CHUNK_REPLY_RESTART 3
+#define CHUNK_REPLY_CANCEL 4
+#define CHUNK_REPLY_CANCEL_NO_RETRY 5
+
+#define UPLOAD_REPLY_SUCCESS 0
+#define UPLOAD_REPLY_PROCESSING 1
+#define UPLOAD_REPLY_WAITING_FOR_DATA 2
+#define UPLOAD_REPLY_APP_SEC_KEY_NOT_FOUND 3
+#define UPLOAD_REPLY_APP_SEC_HEADER_ERROR 4
+#define UPLOAD_REPLY_APP_SEC_TOO_MUCH_DATA 5
+#define UPLOAD_REPLY_APP_SEC_TOO_LITTLE_DATA 6
+#define UPLOAD_REPLY_APP_SEC_SIG_VERIFY_FAIL 7
+#define UPLOAD_REPLY_APP_SEC_SIG_DECODE_FAIL 8
+#define UPLOAD_REPLY_APP_SEC_SIG_ROOT_UNKNOWN 9
+#define UPLOAD_REPLY_APP_SEC_MEMORY_ERROR 10
+#define UPLOAD_REPLY_APP_SEC_INVALID_DATA 11
+#define UPLOAD_REPLY_APP_SEC_BAD 12
+
+static inline int nanohub_comms_write(struct nanohub_data *data,
+ const uint8_t *buffer, size_t buffer_len)
+{
+ uint8_t ret;
+ if (nanohub_comms_tx_rx_retrans
+ (data, CMD_COMMS_WRITE, buffer, buffer_len, &ret, sizeof(ret), true,
+ 10, 10) == sizeof(ret)) {
+ if (ret)
+ return buffer_len;
+ else
+ return 0;
+ } else {
+ return ERROR_NACK;
+ }
+}
+
+#endif
diff --git a/drivers/staging/nanohub/main.c b/drivers/staging/nanohub/main.c
new file mode 100644
index 000000000000..0268f6f88cd8
--- /dev/null
+++ b/drivers/staging/nanohub/main.c
@@ -0,0 +1,1793 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/time.h>
+#include <linux/platform_data/nanohub.h>
+
+#include "main.h"
+#include "comms.h"
+#include "bl.h"
+#include "spi.h"
+
+#define READ_QUEUE_DEPTH 10
+#define APP_FROM_HOST_EVENTID 0x000000F8
+#define FIRST_SENSOR_EVENTID 0x00000200
+#define LAST_SENSOR_EVENTID 0x000002FF
+#define APP_TO_HOST_EVENTID 0x00000401
+#define OS_LOG_EVENTID 0x3B474F4C
+#define WAKEUP_INTERRUPT 1
+#define WAKEUP_TIMEOUT_MS 1000
+#define SUSPEND_TIMEOUT_MS 100
+#define KTHREAD_ERR_TIME_NS (60LL * NSEC_PER_SEC)
+#define KTHREAD_ERR_CNT 70
+#define KTHREAD_WARN_CNT 10
+#define WAKEUP_ERR_TIME_NS (60LL * NSEC_PER_SEC)
+#define WAKEUP_ERR_CNT 4
+
+/**
+ * struct gpio_config - this is a binding between platform data and driver data
+ * @label: for diagnostics
+ * @flags: to pass to gpio_request_one()
+ * @options: one or more of GPIO_OPT_* flags, below
+ * @pdata_off: offset of u32 field in platform data with gpio #
+ * @data_off: offset of int field in driver data with irq # (optional)
+ */
+struct gpio_config {
+ const char *label;
+ u16 flags;
+ u16 options;
+ u16 pdata_off;
+ u16 data_off;
+};
+
+#define GPIO_OPT_HAS_IRQ 0x0001
+#define GPIO_OPT_OPTIONAL 0x8000
+
+#define PLAT_GPIO_DEF(name, _flags) \
+ .pdata_off = offsetof(struct nanohub_platform_data, name ## _gpio), \
+ .label = "nanohub_" #name, \
+ .flags = _flags \
+
+#define PLAT_GPIO_DEF_IRQ(name, _flags, _opts) \
+ PLAT_GPIO_DEF(name, _flags), \
+ .data_off = offsetof(struct nanohub_data, name), \
+ .options = GPIO_OPT_HAS_IRQ | (_opts) \
+
+static int nanohub_open(struct inode *, struct file *);
+static ssize_t nanohub_read(struct file *, char *, size_t, loff_t *);
+static ssize_t nanohub_write(struct file *, const char *, size_t, loff_t *);
+static unsigned int nanohub_poll(struct file *, poll_table *);
+static int nanohub_release(struct inode *, struct file *);
+static int nanohub_hw_reset(struct nanohub_data *data);
+
+static struct class *sensor_class;
+static int major;
+
+static const struct gpio_config gconf[] = {
+ { PLAT_GPIO_DEF(nreset, GPIOF_OUT_INIT_HIGH) },
+ { PLAT_GPIO_DEF(wakeup, GPIOF_OUT_INIT_HIGH) },
+ { PLAT_GPIO_DEF(boot0, GPIOF_OUT_INIT_LOW) },
+ { PLAT_GPIO_DEF_IRQ(irq1, GPIOF_DIR_IN, 0) },
+ { PLAT_GPIO_DEF_IRQ(irq2, GPIOF_DIR_IN, GPIO_OPT_OPTIONAL) },
+};
+
+static const struct iio_info nanohub_iio_info = {
+ .driver_module = THIS_MODULE,
+};
+
+static const struct file_operations nanohub_fileops = {
+ .owner = THIS_MODULE,
+ .open = nanohub_open,
+ .read = nanohub_read,
+ .write = nanohub_write,
+ .poll = nanohub_poll,
+ .release = nanohub_release,
+};
+
+enum {
+ ST_IDLE,
+ ST_ERROR,
+ ST_RUNNING
+};
+
+static inline bool gpio_is_optional(const struct gpio_config *_cfg)
+{
+ return _cfg->options & GPIO_OPT_OPTIONAL;
+}
+
+static inline bool gpio_has_irq(const struct gpio_config *_cfg)
+{
+ return _cfg->options & GPIO_OPT_HAS_IRQ;
+}
+
+static inline bool nanohub_has_priority_lock_locked(struct nanohub_data *data)
+{
+ return atomic_read(&data->wakeup_lock_cnt) >
+ atomic_read(&data->wakeup_cnt);
+}
+
+static inline void nanohub_notify_thread(struct nanohub_data *data)
+{
+ atomic_set(&data->kthread_run, 1);
+ /* wake_up implementation works as memory barrier */
+ wake_up_interruptible_sync(&data->kthread_wait);
+}
+
+static inline void nanohub_io_init(struct nanohub_io *io,
+ struct nanohub_data *data,
+ struct device *dev)
+{
+ init_waitqueue_head(&io->buf_wait);
+ INIT_LIST_HEAD(&io->buf_list);
+ io->data = data;
+ io->dev = dev;
+}
+
+static inline bool nanohub_io_has_buf(struct nanohub_io *io)
+{
+ return !list_empty(&io->buf_list);
+}
+
+static struct nanohub_buf *nanohub_io_get_buf(struct nanohub_io *io,
+ bool wait)
+{
+ struct nanohub_buf *buf = NULL;
+ int ret;
+
+ spin_lock(&io->buf_wait.lock);
+ if (wait) {
+ ret = wait_event_interruptible_locked(io->buf_wait,
+ nanohub_io_has_buf(io));
+ if (ret < 0) {
+ spin_unlock(&io->buf_wait.lock);
+ return ERR_PTR(ret);
+ }
+ }
+
+ if (nanohub_io_has_buf(io)) {
+ buf = list_first_entry(&io->buf_list, struct nanohub_buf, list);
+ list_del(&buf->list);
+ }
+ spin_unlock(&io->buf_wait.lock);
+
+ return buf;
+}
+
+static void nanohub_io_put_buf(struct nanohub_io *io,
+ struct nanohub_buf *buf)
+{
+ bool was_empty;
+
+ spin_lock(&io->buf_wait.lock);
+ was_empty = !nanohub_io_has_buf(io);
+ list_add_tail(&buf->list, &io->buf_list);
+ spin_unlock(&io->buf_wait.lock);
+
+ if (was_empty) {
+ if (&io->data->free_pool == io)
+ nanohub_notify_thread(io->data);
+ else
+ wake_up_interruptible(&io->buf_wait);
+ }
+}
+
+static inline int plat_gpio_get(struct nanohub_data *data,
+ const struct gpio_config *_cfg)
+{
+ const struct nanohub_platform_data *pdata = data->pdata;
+
+ return *(u32 *)(((char *)pdata) + (_cfg)->pdata_off);
+}
+
+static inline void nanohub_set_irq_data(struct nanohub_data *data,
+ const struct gpio_config *_cfg, int val)
+{
+ int *data_addr = ((int *)(((char *)data) + _cfg->data_off));
+
+ if ((void *)data_addr > (void *)data &&
+ (void *)data_addr < (void *)(data + 1))
+ *data_addr = val;
+ else
+ WARN(1, "No data binding defined for %s", _cfg->label);
+}
+
+static inline void mcu_wakeup_gpio_set_value(struct nanohub_data *data,
+ int val)
+{
+ const struct nanohub_platform_data *pdata = data->pdata;
+
+ gpio_set_value(pdata->wakeup_gpio, val);
+}
+
+static inline void mcu_wakeup_gpio_get_locked(struct nanohub_data *data,
+ int priority_lock)
+{
+ atomic_inc(&data->wakeup_lock_cnt);
+ if (!priority_lock && atomic_inc_return(&data->wakeup_cnt) == 1 &&
+ !nanohub_has_priority_lock_locked(data))
+ mcu_wakeup_gpio_set_value(data, 0);
+}
+
+static inline bool mcu_wakeup_gpio_put_locked(struct nanohub_data *data,
+ int priority_lock)
+{
+ bool gpio_done = priority_lock ?
+ atomic_read(&data->wakeup_cnt) == 0 :
+ atomic_dec_and_test(&data->wakeup_cnt);
+ bool done = atomic_dec_and_test(&data->wakeup_lock_cnt);
+
+ if (!nanohub_has_priority_lock_locked(data))
+ mcu_wakeup_gpio_set_value(data, gpio_done ? 1 : 0);
+
+ return done;
+}
+
+static inline bool mcu_wakeup_gpio_is_locked(struct nanohub_data *data)
+{
+ return atomic_read(&data->wakeup_lock_cnt) != 0;
+}
+
+static inline void nanohub_handle_irq1(struct nanohub_data *data)
+{
+ bool locked;
+
+ spin_lock(&data->wakeup_wait.lock);
+ locked = mcu_wakeup_gpio_is_locked(data);
+ spin_unlock(&data->wakeup_wait.lock);
+ if (!locked)
+ nanohub_notify_thread(data);
+ else
+ wake_up_interruptible_sync(&data->wakeup_wait);
+}
+
+static inline void nanohub_handle_irq2(struct nanohub_data *data)
+{
+ nanohub_notify_thread(data);
+}
+
+static inline bool mcu_wakeup_try_lock(struct nanohub_data *data, int key)
+{
+ /* implementation contains memory barrier */
+ return atomic_cmpxchg(&data->wakeup_acquired, 0, key) == 0;
+}
+
+static inline void mcu_wakeup_unlock(struct nanohub_data *data, int key)
+{
+ WARN(atomic_cmpxchg(&data->wakeup_acquired, key, 0) != key,
+ "%s: failed to unlock with key %d; current state: %d",
+ __func__, key, atomic_read(&data->wakeup_acquired));
+}
+
+static inline void nanohub_set_state(struct nanohub_data *data, int state)
+{
+ atomic_set(&data->thread_state, state);
+ smp_mb__after_atomic(); /* updated thread state is now visible */
+}
+
+static inline int nanohub_get_state(struct nanohub_data *data)
+{
+ smp_mb__before_atomic(); /* wait for all updates to finish */
+ return atomic_read(&data->thread_state);
+}
+
+static inline void nanohub_clear_err_cnt(struct nanohub_data *data)
+{
+ data->kthread_err_cnt = data->wakeup_err_cnt = 0;
+}
+
+/* the following fragment is based on wait_event_* code from wait.h */
+#define wait_event_interruptible_timeout_locked(q, cond, tmo) \
+({ \
+ long __ret = (tmo); \
+ DEFINE_WAIT(__wait); \
+ if (!(cond)) { \
+ for (;;) { \
+ __wait.flags &= ~WQ_FLAG_EXCLUSIVE; \
+ if (list_empty(&__wait.task_list)) \
+ __add_wait_queue_tail(&(q), &__wait); \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if ((cond)) \
+ break; \
+ if (signal_pending(current)) { \
+ __ret = -ERESTARTSYS; \
+ break; \
+ } \
+ spin_unlock(&(q).lock); \
+ __ret = schedule_timeout(__ret); \
+ spin_lock(&(q).lock); \
+ if (!__ret) { \
+ if ((cond)) \
+ __ret = 1; \
+ break; \
+ } \
+ } \
+ __set_current_state(TASK_RUNNING); \
+ if (!list_empty(&__wait.task_list)) \
+ list_del_init(&__wait.task_list); \
+ else if (__ret == -ERESTARTSYS && \
+ /*reimplementation of wait_abort_exclusive() */\
+ waitqueue_active(&(q))) \
+ __wake_up_locked_key(&(q), TASK_INTERRUPTIBLE, \
+ NULL); \
+ } else { \
+ __ret = 1; \
+ } \
+ __ret; \
+}) \
+
+int request_wakeup_ex(struct nanohub_data *data, long timeout_ms,
+ int key, int lock_mode)
+{
+ long timeout;
+ bool priority_lock = lock_mode > LOCK_MODE_NORMAL;
+ struct device *sensor_dev = data->io[ID_NANOHUB_SENSOR].dev;
+ int ret;
+ ktime_t ktime_delta;
+ ktime_t wakeup_ktime;
+
+ spin_lock(&data->wakeup_wait.lock);
+ mcu_wakeup_gpio_get_locked(data, priority_lock);
+ timeout = (timeout_ms != MAX_SCHEDULE_TIMEOUT) ?
+ msecs_to_jiffies(timeout_ms) :
+ MAX_SCHEDULE_TIMEOUT;
+
+ if (!priority_lock && !data->wakeup_err_cnt)
+ wakeup_ktime = ktime_get_boottime();
+ timeout = wait_event_interruptible_timeout_locked(
+ data->wakeup_wait,
+ ((priority_lock || nanohub_irq1_fired(data)) &&
+ mcu_wakeup_try_lock(data, key)),
+ timeout
+ );
+
+ if (timeout <= 0) {
+ if (!timeout && !priority_lock) {
+ if (!data->wakeup_err_cnt)
+ data->wakeup_err_ktime = wakeup_ktime;
+ ktime_delta = ktime_sub(ktime_get_boottime(),
+ data->wakeup_err_ktime);
+ data->wakeup_err_cnt++;
+ if (ktime_to_ns(ktime_delta) > WAKEUP_ERR_TIME_NS
+ && data->wakeup_err_cnt > WAKEUP_ERR_CNT) {
+ mcu_wakeup_gpio_put_locked(data, priority_lock);
+ spin_unlock(&data->wakeup_wait.lock);
+ dev_info(sensor_dev,
+ "wakeup: hard reset due to consistent error\n");
+ ret = nanohub_hw_reset(data);
+ if (ret) {
+ dev_info(sensor_dev,
+ "%s: failed to reset nanohub: ret=%d\n",
+ __func__, ret);
+ }
+ return -ETIME;
+ }
+ }
+ mcu_wakeup_gpio_put_locked(data, priority_lock);
+
+ if (timeout == 0)
+ timeout = -ETIME;
+ } else {
+ data->wakeup_err_cnt = 0;
+ timeout = 0;
+ }
+ spin_unlock(&data->wakeup_wait.lock);
+
+ return timeout;
+}
+
+void release_wakeup_ex(struct nanohub_data *data, int key, int lock_mode)
+{
+ bool done;
+ bool priority_lock = lock_mode > LOCK_MODE_NORMAL;
+
+ spin_lock(&data->wakeup_wait.lock);
+ done = mcu_wakeup_gpio_put_locked(data, priority_lock);
+ mcu_wakeup_unlock(data, key);
+ spin_unlock(&data->wakeup_wait.lock);
+
+ if (!done)
+ wake_up_interruptible_sync(&data->wakeup_wait);
+ else if (nanohub_irq1_fired(data) || nanohub_irq2_fired(data))
+ nanohub_notify_thread(data);
+}
+
+int nanohub_wait_for_interrupt(struct nanohub_data *data)
+{
+ int ret = -EFAULT;
+
+ /* release the wakeup line, and wait for nanohub to send
+ * us an interrupt indicating the transaction completed.
+ */
+ spin_lock(&data->wakeup_wait.lock);
+ if (mcu_wakeup_gpio_is_locked(data)) {
+ mcu_wakeup_gpio_set_value(data, 1);
+ ret = wait_event_interruptible_locked(data->wakeup_wait,
+ nanohub_irq1_fired(data));
+ mcu_wakeup_gpio_set_value(data, 0);
+ }
+ spin_unlock(&data->wakeup_wait.lock);
+
+ return ret;
+}
+
+int nanohub_wakeup_eom(struct nanohub_data *data, bool repeat)
+{
+ int ret = -EFAULT;
+
+ spin_lock(&data->wakeup_wait.lock);
+ if (mcu_wakeup_gpio_is_locked(data)) {
+ mcu_wakeup_gpio_set_value(data, 1);
+ if (repeat)
+ mcu_wakeup_gpio_set_value(data, 0);
+ ret = 0;
+ }
+ spin_unlock(&data->wakeup_wait.lock);
+
+ return ret;
+}
+
+static void __nanohub_interrupt_cfg(struct nanohub_data *data,
+ u8 interrupt, bool mask)
+{
+ int ret;
+ uint8_t mask_ret;
+ int cnt = 10;
+ struct device *dev = data->io[ID_NANOHUB_SENSOR].dev;
+ int cmd = mask ? CMD_COMMS_MASK_INTR : CMD_COMMS_UNMASK_INTR;
+
+ do {
+ ret = request_wakeup_timeout(data, WAKEUP_TIMEOUT_MS);
+ if (ret) {
+ dev_err(dev,
+ "%s: interrupt %d %smask failed: ret=%d\n",
+ __func__, interrupt, mask ? "" : "un", ret);
+ return;
+ }
+
+ ret =
+ nanohub_comms_tx_rx_retrans(data, cmd,
+ &interrupt, sizeof(interrupt),
+ &mask_ret, sizeof(mask_ret),
+ false, 10, 0);
+ release_wakeup(data);
+ dev_dbg(dev,
+ "%smasking interrupt %d, ret=%d, mask_ret=%d\n",
+ mask ? "" : "un",
+ interrupt, ret, mask_ret);
+ } while ((ret != 1 || mask_ret != 1) && --cnt > 0);
+}
+
+static inline void nanohub_mask_interrupt(struct nanohub_data *data,
+ u8 interrupt)
+{
+ __nanohub_interrupt_cfg(data, interrupt, true);
+}
+
+static inline void nanohub_unmask_interrupt(struct nanohub_data *data,
+ u8 interrupt)
+{
+ __nanohub_interrupt_cfg(data, interrupt, false);
+}
+
+static ssize_t nanohub_wakeup_query(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nanohub_data *data = dev_get_nanohub_data(dev);
+ const struct nanohub_platform_data *pdata = data->pdata;
+
+ nanohub_clear_err_cnt(data);
+ if (nanohub_irq1_fired(data) || nanohub_irq2_fired(data))
+ wake_up_interruptible(&data->wakeup_wait);
+
+ return scnprintf(buf, PAGE_SIZE, "WAKEUP: %d INT1: %d INT2: %d\n",
+ gpio_get_value(pdata->wakeup_gpio),
+ gpio_get_value(pdata->irq1_gpio),
+ data->irq2 ? gpio_get_value(pdata->irq2_gpio) : -1);
+}
+
+static ssize_t nanohub_app_info(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nanohub_data *data = dev_get_nanohub_data(dev);
+ struct {
+ uint64_t appId;
+ uint32_t appVer;
+ uint32_t appSize;
+ } __packed buffer;
+ uint32_t i = 0;
+ int ret;
+ ssize_t len = 0;
+
+ do {
+ if (request_wakeup(data))
+ return -ERESTARTSYS;
+
+ if (nanohub_comms_tx_rx_retrans
+ (data, CMD_COMMS_QUERY_APP_INFO, (uint8_t *)&i,
+ sizeof(i), (u8 *)&buffer, sizeof(buffer),
+ false, 10, 10) == sizeof(buffer)) {
+ ret =
+ scnprintf(buf + len, PAGE_SIZE - len,
+ "app: %d id: %016llx ver: %08x size: %08x\n",
+ i, buffer.appId, buffer.appVer,
+ buffer.appSize);
+ if (ret > 0) {
+ len += ret;
+ i++;
+ }
+ } else {
+ ret = -1;
+ }
+
+ release_wakeup(data);
+ } while (ret > 0);
+
+ return len;
+}
+
+static ssize_t nanohub_firmware_query(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nanohub_data *data = dev_get_nanohub_data(dev);
+ uint16_t buffer[6];
+
+ if (request_wakeup(data))
+ return -ERESTARTSYS;
+
+ if (nanohub_comms_tx_rx_retrans
+ (data, CMD_COMMS_GET_OS_HW_VERSIONS, NULL, 0, (uint8_t *)&buffer,
+ sizeof(buffer), false, 10, 10) == sizeof(buffer)) {
+ release_wakeup(data);
+ return scnprintf(buf, PAGE_SIZE,
+ "hw type: %04x hw ver: %04x bl ver: %04x os ver: %04x variant ver: %08x\n",
+ buffer[0], buffer[1], buffer[2], buffer[3],
+ buffer[5] << 16 | buffer[4]);
+ } else {
+ release_wakeup(data);
+ return 0;
+ }
+}
+
+static inline int nanohub_wakeup_lock(struct nanohub_data *data, int mode)
+{
+ int ret;
+
+ if (data->irq2)
+ disable_irq(data->irq2);
+ else
+ nanohub_mask_interrupt(data, 2);
+
+ ret = request_wakeup_ex(data,
+ mode == LOCK_MODE_SUSPEND_RESUME ?
+ SUSPEND_TIMEOUT_MS : WAKEUP_TIMEOUT_MS,
+ KEY_WAKEUP_LOCK, mode);
+ if (ret < 0) {
+ if (data->irq2)
+ enable_irq(data->irq2);
+ else
+ nanohub_unmask_interrupt(data, 2);
+ return ret;
+ }
+
+ if (mode == LOCK_MODE_IO || mode == LOCK_MODE_IO_BL)
+ ret = nanohub_bl_open(data);
+ if (ret < 0) {
+ release_wakeup_ex(data, KEY_WAKEUP_LOCK, mode);
+ return ret;
+ }
+ if (mode != LOCK_MODE_SUSPEND_RESUME)
+ disable_irq(data->irq1);
+
+ atomic_set(&data->lock_mode, mode);
+ mcu_wakeup_gpio_set_value(data, mode != LOCK_MODE_IO_BL);
+
+ return 0;
+}
+
+/* returns lock mode used to perform this lock */
+static inline int nanohub_wakeup_unlock(struct nanohub_data *data)
+{
+ int mode = atomic_read(&data->lock_mode);
+
+ atomic_set(&data->lock_mode, LOCK_MODE_NONE);
+ if (mode != LOCK_MODE_SUSPEND_RESUME)
+ enable_irq(data->irq1);
+ if (mode == LOCK_MODE_IO || mode == LOCK_MODE_IO_BL)
+ nanohub_bl_close(data);
+ if (data->irq2)
+ enable_irq(data->irq2);
+ release_wakeup_ex(data, KEY_WAKEUP_LOCK, mode);
+ if (!data->irq2)
+ nanohub_unmask_interrupt(data, 2);
+ nanohub_notify_thread(data);
+
+ return mode;
+}
+
+static void __nanohub_hw_reset(struct nanohub_data *data, int boot0)
+{
+ const struct nanohub_platform_data *pdata = data->pdata;
+
+ gpio_set_value(pdata->nreset_gpio, 0);
+ gpio_set_value(pdata->boot0_gpio, boot0 > 0);
+ usleep_range(30, 40);
+ gpio_set_value(pdata->nreset_gpio, 1);
+ if (boot0 > 0)
+ usleep_range(70000, 75000);
+ else if (!boot0)
+ usleep_range(750000, 800000);
+ nanohub_clear_err_cnt(data);
+}
+
+static int nanohub_hw_reset(struct nanohub_data *data)
+{
+ int ret;
+ ret = nanohub_wakeup_lock(data, LOCK_MODE_RESET);
+
+ if (!ret) {
+ __nanohub_hw_reset(data, 0);
+ nanohub_wakeup_unlock(data);
+ }
+
+ return ret;
+}
+
+static ssize_t nanohub_try_hw_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nanohub_data *data = dev_get_nanohub_data(dev);
+ int ret;
+
+ ret = nanohub_hw_reset(data);
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t nanohub_erase_shared(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nanohub_data *data = dev_get_nanohub_data(dev);
+ uint8_t status = CMD_ACK;
+ int ret;
+
+ ret = nanohub_wakeup_lock(data, LOCK_MODE_IO);
+ if (ret < 0)
+ return ret;
+
+ __nanohub_hw_reset(data, 1);
+
+ status = nanohub_bl_erase_shared(data);
+ dev_info(dev, "nanohub_bl_erase_shared: status=%02x\n",
+ status);
+
+ __nanohub_hw_reset(data, 0);
+ nanohub_wakeup_unlock(data);
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t nanohub_erase_shared_bl(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nanohub_data *data = dev_get_nanohub_data(dev);
+ uint8_t status = CMD_ACK;
+ int ret;
+
+ ret = nanohub_wakeup_lock(data, LOCK_MODE_IO_BL);
+ if (ret < 0)
+ return ret;
+
+ __nanohub_hw_reset(data, -1);
+
+ status = nanohub_bl_erase_shared_bl(data);
+ dev_info(dev, "%s: status=%02x\n", __func__, status);
+
+ __nanohub_hw_reset(data, 0);
+ nanohub_wakeup_unlock(data);
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t nanohub_download_bl(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nanohub_data *data = dev_get_nanohub_data(dev);
+ const struct nanohub_platform_data *pdata = data->pdata;
+ const struct firmware *fw_entry;
+ int ret;
+ uint8_t status = CMD_ACK;
+
+ ret = nanohub_wakeup_lock(data, LOCK_MODE_IO);
+ if (ret < 0)
+ return ret;
+
+ __nanohub_hw_reset(data, 1);
+
+ ret = request_firmware(&fw_entry, "nanohub.full.bin", dev);
+ if (ret) {
+ dev_err(dev, "%s: err=%d\n", __func__, ret);
+ } else {
+ status = nanohub_bl_download(data, pdata->bl_addr,
+ fw_entry->data, fw_entry->size);
+ dev_info(dev, "%s: status=%02x\n", __func__, status);
+ release_firmware(fw_entry);
+ }
+
+ __nanohub_hw_reset(data, 0);
+ nanohub_wakeup_unlock(data);
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t nanohub_download_kernel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nanohub_data *data = dev_get_nanohub_data(dev);
+ const struct firmware *fw_entry;
+ int ret;
+
+ ret = request_firmware(&fw_entry, "nanohub.update.bin", dev);
+ if (ret) {
+ dev_err(dev, "nanohub_download_kernel: err=%d\n", ret);
+ return -EIO;
+ } else {
+ ret =
+ nanohub_comms_kernel_download(data, fw_entry->data,
+ fw_entry->size);
+
+ release_firmware(fw_entry);
+
+ return count;
+ }
+
+}
+
+static ssize_t nanohub_download_kernel_bl(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nanohub_data *data = dev_get_nanohub_data(dev);
+ const struct firmware *fw_entry;
+ int ret;
+ uint8_t status = CMD_ACK;
+
+ ret = request_firmware(&fw_entry, "nanohub.kernel.signed", dev);
+ if (ret) {
+ dev_err(dev, "%s: err=%d\n", __func__, ret);
+ } else {
+ ret = nanohub_wakeup_lock(data, LOCK_MODE_IO_BL);
+ if (ret < 0)
+ return ret;
+
+ __nanohub_hw_reset(data, -1);
+
+ status = nanohub_bl_erase_shared_bl(data);
+ dev_info(dev, "%s: (erase) status=%02x\n", __func__, status);
+ if (status == CMD_ACK) {
+ status = nanohub_bl_write_memory(data, 0x50000000,
+ fw_entry->size,
+ fw_entry->data);
+ mcu_wakeup_gpio_set_value(data, 1);
+ dev_info(dev, "%s: (write) status=%02x\n", __func__, status);
+ if (status == CMD_ACK) {
+ status = nanohub_bl_update_finished(data);
+ dev_info(dev, "%s: (finish) status=%02x\n", __func__, status);
+ }
+ } else {
+ mcu_wakeup_gpio_set_value(data, 1);
+ }
+
+ __nanohub_hw_reset(data, 0);
+ nanohub_wakeup_unlock(data);
+
+ release_firmware(fw_entry);
+ }
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t nanohub_download_app(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nanohub_data *data = dev_get_nanohub_data(dev);
+ const struct firmware *fw_entry;
+ char buffer[70];
+ int i, ret, ret1, ret2, file_len = 0, appid_len = 0, ver_len = 0;
+ const char *appid = NULL, *ver = NULL;
+ unsigned long version;
+ uint64_t id;
+ uint32_t cur_version;
+ bool update = true;
+
+ for (i = 0; i < count; i++) {
+ if (buf[i] == ' ') {
+ if (i + 1 == count) {
+ break;
+ } else {
+ if (appid == NULL)
+ appid = buf + i + 1;
+ else if (ver == NULL)
+ ver = buf + i + 1;
+ else
+ break;
+ }
+ } else if (buf[i] == '\n' || buf[i] == '\r') {
+ break;
+ } else {
+ if (ver)
+ ver_len++;
+ else if (appid)
+ appid_len++;
+ else
+ file_len++;
+ }
+ }
+
+ if (file_len > 64 || appid_len > 16 || ver_len > 8 || file_len < 1)
+ return -EIO;
+
+ memcpy(buffer, buf, file_len);
+ memcpy(buffer + file_len, ".napp", 5);
+ buffer[file_len + 5] = '\0';
+
+ ret = request_firmware(&fw_entry, buffer, dev);
+ if (ret) {
+ dev_err(dev, "nanohub_download_app(%s): err=%d\n",
+ buffer, ret);
+ return -EIO;
+ }
+ if (appid_len > 0 && ver_len > 0) {
+ memcpy(buffer, appid, appid_len);
+ buffer[appid_len] = '\0';
+
+ ret1 = kstrtoull(buffer, 16, &id);
+
+ memcpy(buffer, ver, ver_len);
+ buffer[ver_len] = '\0';
+
+ ret2 = kstrtoul(buffer, 16, &version);
+
+ if (ret1 == 0 && ret2 == 0) {
+ if (request_wakeup(data))
+ return -ERESTARTSYS;
+ if (nanohub_comms_tx_rx_retrans
+ (data, CMD_COMMS_GET_APP_VERSIONS,
+ (uint8_t *)&id, sizeof(id),
+ (uint8_t *)&cur_version,
+ sizeof(cur_version), false, 10,
+ 10) == sizeof(cur_version)) {
+ if (cur_version == version)
+ update = false;
+ }
+ release_wakeup(data);
+ }
+ }
+
+ if (update)
+ ret =
+ nanohub_comms_app_download(data, fw_entry->data,
+ fw_entry->size);
+
+ release_firmware(fw_entry);
+
+ return count;
+}
+
+static ssize_t nanohub_lock_bl(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nanohub_data *data = dev_get_nanohub_data(dev);
+ int ret;
+ uint8_t status = CMD_ACK;
+
+ ret = nanohub_wakeup_lock(data, LOCK_MODE_IO);
+ if (ret < 0)
+ return ret;
+
+ __nanohub_hw_reset(data, 1);
+
+ gpio_set_value(data->pdata->boot0_gpio, 0);
+ /* this command reboots itself */
+ status = nanohub_bl_lock(data);
+ dev_info(dev, "%s: status=%02x\n", __func__, status);
+ msleep(350);
+
+ nanohub_wakeup_unlock(data);
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t nanohub_unlock_bl(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nanohub_data *data = dev_get_nanohub_data(dev);
+ int ret;
+ uint8_t status = CMD_ACK;
+
+ ret = nanohub_wakeup_lock(data, LOCK_MODE_IO);
+ if (ret < 0)
+ return ret;
+
+ __nanohub_hw_reset(data, 1);
+
+ gpio_set_value(data->pdata->boot0_gpio, 0);
+ /* this command reboots itself (erasing the flash) */
+ status = nanohub_bl_unlock(data);
+ dev_info(dev, "%s: status=%02x\n", __func__, status);
+ msleep(20);
+
+ nanohub_wakeup_unlock(data);
+
+ return ret < 0 ? ret : count;
+}
+
+static struct device_attribute attributes[] = {
+ __ATTR(wakeup, 0440, nanohub_wakeup_query, NULL),
+ __ATTR(app_info, 0440, nanohub_app_info, NULL),
+ __ATTR(firmware_version, 0440, nanohub_firmware_query, NULL),
+ __ATTR(download_bl, 0220, NULL, nanohub_download_bl),
+ __ATTR(download_kernel, 0220, NULL, nanohub_download_kernel),
+ __ATTR(download_kernel_bl, 0220, NULL, nanohub_download_kernel_bl),
+ __ATTR(download_app, 0220, NULL, nanohub_download_app),
+ __ATTR(erase_shared, 0220, NULL, nanohub_erase_shared),
+ __ATTR(erase_shared_bl, 0220, NULL, nanohub_erase_shared_bl),
+ __ATTR(reset, 0220, NULL, nanohub_try_hw_reset),
+ __ATTR(lock, 0220, NULL, nanohub_lock_bl),
+ __ATTR(unlock, 0220, NULL, nanohub_unlock_bl),
+};
+
+static inline int nanohub_create_sensor(struct nanohub_data *data)
+{
+ int i, ret;
+ struct device *sensor_dev = data->io[ID_NANOHUB_SENSOR].dev;
+
+ for (i = 0, ret = 0; i < ARRAY_SIZE(attributes); i++) {
+ ret = device_create_file(sensor_dev, &attributes[i]);
+ if (ret) {
+ dev_err(sensor_dev,
+ "create sysfs attr %d [%s] failed; err=%d\n",
+ i, attributes[i].attr.name, ret);
+ goto fail_attr;
+ }
+ }
+
+ ret = sysfs_create_link(&sensor_dev->kobj,
+ &data->iio_dev->dev.kobj, "iio");
+ if (ret) {
+ dev_err(sensor_dev,
+ "sysfs_create_link failed; err=%d\n", ret);
+ goto fail_attr;
+ }
+ goto done;
+
+fail_attr:
+ for (i--; i >= 0; i--)
+ device_remove_file(sensor_dev, &attributes[i]);
+done:
+ return ret;
+}
+
+static int nanohub_create_devices(struct nanohub_data *data)
+{
+ int i, ret;
+ static const char *names[ID_NANOHUB_MAX] = {
+ "nanohub", "nanohub_comms"
+ };
+
+ for (i = 0; i < ID_NANOHUB_MAX; ++i) {
+ struct nanohub_io *io = &data->io[i];
+
+ nanohub_io_init(io, data, device_create(sensor_class, NULL,
+ MKDEV(major, i),
+ io, names[i]));
+ if (IS_ERR(io->dev)) {
+ ret = PTR_ERR(io->dev);
+ pr_err("nanohub: device_create failed for %s; err=%d\n",
+ names[i], ret);
+ goto fail_dev;
+ }
+ }
+
+ ret = nanohub_create_sensor(data);
+ if (!ret)
+ goto done;
+
+fail_dev:
+ for (--i; i >= 0; --i)
+ device_destroy(sensor_class, MKDEV(major, i));
+done:
+ return ret;
+}
+
+static int nanohub_match_devt(struct device *dev, const void *data)
+{
+ const dev_t *devt = data;
+
+ return dev->devt == *devt;
+}
+
+static int nanohub_open(struct inode *inode, struct file *file)
+{
+ dev_t devt = inode->i_rdev;
+ struct device *dev;
+
+ dev = class_find_device(sensor_class, NULL, &devt, nanohub_match_devt);
+ if (dev) {
+ file->private_data = dev_get_drvdata(dev);
+ nonseekable_open(inode, file);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static ssize_t nanohub_read(struct file *file, char *buffer, size_t length,
+ loff_t *offset)
+{
+ struct nanohub_io *io = file->private_data;
+ struct nanohub_data *data = io->data;
+ struct nanohub_buf *buf;
+ int ret;
+
+ if (!nanohub_io_has_buf(io) && (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ buf = nanohub_io_get_buf(io, true);
+ if (IS_ERR_OR_NULL(buf))
+ return PTR_ERR(buf);
+
+ ret = copy_to_user(buffer, buf->buffer, buf->length);
+ if (ret != 0)
+ ret = -EFAULT;
+ else
+ ret = buf->length;
+
+ nanohub_io_put_buf(&data->free_pool, buf);
+
+ return ret;
+}
+
+static ssize_t nanohub_write(struct file *file, const char *buffer,
+ size_t length, loff_t *offset)
+{
+ struct nanohub_io *io = file->private_data;
+ struct nanohub_data *data = io->data;
+ int ret;
+
+ ret = request_wakeup_timeout(data, WAKEUP_TIMEOUT_MS);
+ if (ret)
+ return ret;
+
+ ret = nanohub_comms_write(data, buffer, length);
+
+ release_wakeup(data);
+
+ return ret;
+}
+
+static unsigned int nanohub_poll(struct file *file, poll_table *wait)
+{
+ struct nanohub_io *io = file->private_data;
+ unsigned int mask = POLLOUT | POLLWRNORM;
+
+ poll_wait(file, &io->buf_wait, wait);
+
+ if (nanohub_io_has_buf(io))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static int nanohub_release(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static void nanohub_destroy_devices(struct nanohub_data *data)
+{
+ int i;
+ struct device *sensor_dev = data->io[ID_NANOHUB_SENSOR].dev;
+
+ sysfs_remove_link(&sensor_dev->kobj, "iio");
+ for (i = 0; i < ARRAY_SIZE(attributes); i++)
+ device_remove_file(sensor_dev, &attributes[i]);
+ for (i = 0; i < ID_NANOHUB_MAX; ++i)
+ device_destroy(sensor_class, MKDEV(major, i));
+}
+
+static irqreturn_t nanohub_irq1(int irq, void *dev_id)
+{
+ struct nanohub_data *data = (struct nanohub_data *)dev_id;
+
+ nanohub_handle_irq1(data);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nanohub_irq2(int irq, void *dev_id)
+{
+ struct nanohub_data *data = (struct nanohub_data *)dev_id;
+
+ nanohub_handle_irq2(data);
+
+ return IRQ_HANDLED;
+}
+
+static bool nanohub_os_log(char *buffer, int len)
+{
+ if (le32_to_cpu((((uint32_t *)buffer)[0]) & 0x7FFFFFFF) ==
+ OS_LOG_EVENTID) {
+ char *mtype, *mdata = &buffer[5];
+
+ buffer[len] = 0x00;
+
+ switch (buffer[4]) {
+ case 'E':
+ mtype = KERN_ERR;
+ break;
+ case 'W':
+ mtype = KERN_WARNING;
+ break;
+ case 'I':
+ mtype = KERN_INFO;
+ break;
+ case 'D':
+ mtype = KERN_DEBUG;
+ break;
+ default:
+ mtype = KERN_DEFAULT;
+ mdata--;
+ break;
+ }
+ printk("%snanohub: %s", mtype, mdata);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static void nanohub_process_buffer(struct nanohub_data *data,
+ struct nanohub_buf **buf,
+ int ret)
+{
+ uint32_t event_id;
+ uint8_t interrupt;
+ bool wakeup = false;
+ struct nanohub_io *io = &data->io[ID_NANOHUB_SENSOR];
+
+ data->kthread_err_cnt = 0;
+ if (ret < 4 || nanohub_os_log((*buf)->buffer, ret)) {
+ release_wakeup(data);
+ return;
+ }
+
+ (*buf)->length = ret;
+
+ event_id = le32_to_cpu((((uint32_t *)(*buf)->buffer)[0]) & 0x7FFFFFFF);
+ if (ret >= sizeof(uint32_t) + sizeof(uint64_t) + sizeof(uint32_t) &&
+ event_id > FIRST_SENSOR_EVENTID &&
+ event_id <= LAST_SENSOR_EVENTID) {
+ interrupt = (*buf)->buffer[sizeof(uint32_t) +
+ sizeof(uint64_t) + 3];
+ if (interrupt == WAKEUP_INTERRUPT)
+ wakeup = true;
+ }
+ if (event_id == APP_TO_HOST_EVENTID) {
+ wakeup = true;
+ io = &data->io[ID_NANOHUB_COMMS];
+ }
+
+ nanohub_io_put_buf(io, *buf);
+
+ *buf = NULL;
+ /* (for wakeup interrupts): hold a wake lock for 250ms so the sensor hal
+ * has time to grab its own wake lock */
+ if (wakeup)
+ __pm_wakeup_event(&data->wakesrc_read, 250);
+ release_wakeup(data);
+}
+
+static int nanohub_kthread(void *arg)
+{
+ struct nanohub_data *data = (struct nanohub_data *)arg;
+ struct nanohub_buf *buf = NULL;
+ int ret;
+ ktime_t ktime_delta;
+ uint32_t clear_interrupts[8] = { 0x00000006 };
+ struct device *sensor_dev = data->io[ID_NANOHUB_SENSOR].dev;
+ static const struct sched_param param = {
+ .sched_priority = (MAX_USER_RT_PRIO/2)-1,
+ };
+
+ data->kthread_err_cnt = 0;
+ sched_setscheduler(current, SCHED_FIFO, &param);
+ nanohub_set_state(data, ST_IDLE);
+
+ while (!kthread_should_stop()) {
+ switch (nanohub_get_state(data)) {
+ case ST_IDLE:
+ wait_event_interruptible(data->kthread_wait,
+ atomic_read(&data->kthread_run)
+ );
+ nanohub_set_state(data, ST_RUNNING);
+ break;
+ case ST_ERROR:
+ ktime_delta = ktime_sub(ktime_get_boottime(),
+ data->kthread_err_ktime);
+ if (ktime_to_ns(ktime_delta) > KTHREAD_ERR_TIME_NS
+ && data->kthread_err_cnt > KTHREAD_ERR_CNT) {
+ dev_info(sensor_dev,
+ "kthread: hard reset due to consistent error\n");
+ ret = nanohub_hw_reset(data);
+ if (ret) {
+ dev_info(sensor_dev,
+ "%s: failed to reset nanohub: ret=%d\n",
+ __func__, ret);
+ }
+ }
+ msleep_interruptible(WAKEUP_TIMEOUT_MS);
+ nanohub_set_state(data, ST_RUNNING);
+ break;
+ case ST_RUNNING:
+ break;
+ }
+ atomic_set(&data->kthread_run, 0);
+ if (!buf)
+ buf = nanohub_io_get_buf(&data->free_pool,
+ false);
+ if (buf) {
+ ret = request_wakeup_timeout(data, WAKEUP_TIMEOUT_MS);
+ if (ret) {
+ dev_info(sensor_dev,
+ "%s: request_wakeup_timeout: ret=%d\n",
+ __func__, ret);
+ continue;
+ }
+
+ ret = nanohub_comms_rx_retrans_boottime(
+ data, CMD_COMMS_READ, buf->buffer,
+ sizeof(buf->buffer), 10, 0);
+
+ if (ret > 0) {
+ nanohub_process_buffer(data, &buf, ret);
+ if (!nanohub_irq1_fired(data) &&
+ !nanohub_irq2_fired(data)) {
+ nanohub_set_state(data, ST_IDLE);
+ continue;
+ }
+ } else if (ret == 0) {
+ /* queue empty, go to sleep */
+ data->kthread_err_cnt = 0;
+ data->interrupts[0] &= ~0x00000006;
+ release_wakeup(data);
+ nanohub_set_state(data, ST_IDLE);
+ continue;
+ } else {
+ release_wakeup(data);
+ if (data->kthread_err_cnt == 0)
+ data->kthread_err_ktime =
+ ktime_get_boottime();
+
+ data->kthread_err_cnt++;
+ if (data->kthread_err_cnt >= KTHREAD_WARN_CNT) {
+ dev_err(sensor_dev,
+ "%s: kthread_err_cnt=%d\n",
+ __func__,
+ data->kthread_err_cnt);
+ nanohub_set_state(data, ST_ERROR);
+ continue;
+ }
+ }
+ } else {
+ if (!nanohub_irq1_fired(data) &&
+ !nanohub_irq2_fired(data)) {
+ nanohub_set_state(data, ST_IDLE);
+ continue;
+ }
+ /* pending interrupt, but no room to read data -
+ * clear interrupts */
+ if (request_wakeup(data))
+ continue;
+ nanohub_comms_tx_rx_retrans(data,
+ CMD_COMMS_CLR_GET_INTR,
+ (uint8_t *)
+ clear_interrupts,
+ sizeof(clear_interrupts),
+ (uint8_t *) data->
+ interrupts,
+ sizeof(data->interrupts),
+ false, 10, 0);
+ release_wakeup(data);
+ nanohub_set_state(data, ST_IDLE);
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct nanohub_platform_data *nanohub_parse_dt(struct device *dev)
+{
+ struct nanohub_platform_data *pdata;
+ struct device_node *dt = dev->of_node;
+ const uint32_t *tmp;
+ struct property *prop;
+ uint32_t u, i;
+ int ret;
+
+ if (!dt)
+ return ERR_PTR(-ENODEV);
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ ret = pdata->irq1_gpio =
+ of_get_named_gpio(dt, "sensorhub,irq1-gpio", 0);
+ if (ret < 0) {
+ pr_err("nanohub: missing sensorhub,irq1-gpio in device tree\n");
+ goto free_pdata;
+ }
+
+ /* optional (strongly recommended) */
+ pdata->irq2_gpio = of_get_named_gpio(dt, "sensorhub,irq2-gpio", 0);
+
+ ret = pdata->wakeup_gpio =
+ of_get_named_gpio(dt, "sensorhub,wakeup-gpio", 0);
+ if (ret < 0) {
+ pr_err
+ ("nanohub: missing sensorhub,wakeup-gpio in device tree\n");
+ goto free_pdata;
+ }
+
+ ret = pdata->nreset_gpio =
+ of_get_named_gpio(dt, "sensorhub,nreset-gpio", 0);
+ if (ret < 0) {
+ pr_err
+ ("nanohub: missing sensorhub,nreset-gpio in device tree\n");
+ goto free_pdata;
+ }
+
+ /* optional (stm32f bootloader) */
+ pdata->boot0_gpio = of_get_named_gpio(dt, "sensorhub,boot0-gpio", 0);
+
+ /* optional (spi) */
+ pdata->spi_cs_gpio = of_get_named_gpio(dt, "sensorhub,spi-cs-gpio", 0);
+
+ /* optional (stm32f bootloader) */
+ of_property_read_u32(dt, "sensorhub,bl-addr", &pdata->bl_addr);
+
+ /* optional (stm32f bootloader) */
+ tmp = of_get_property(dt, "sensorhub,num-flash-banks", NULL);
+ if (tmp) {
+ pdata->num_flash_banks = be32_to_cpup(tmp);
+ pdata->flash_banks =
+ devm_kzalloc(dev,
+ sizeof(struct nanohub_flash_bank) *
+ pdata->num_flash_banks, GFP_KERNEL);
+ if (!pdata->flash_banks)
+ goto no_mem;
+
+ /* TODO: investigate replacing with of_property_read_u32_array
+ */
+ i = 0;
+ of_property_for_each_u32(dt, "sensorhub,flash-banks", prop, tmp,
+ u) {
+ if (i / 3 >= pdata->num_flash_banks)
+ break;
+ switch (i % 3) {
+ case 0:
+ pdata->flash_banks[i / 3].bank = u;
+ break;
+ case 1:
+ pdata->flash_banks[i / 3].address = u;
+ break;
+ case 2:
+ pdata->flash_banks[i / 3].length = u;
+ break;
+ }
+ i++;
+ }
+ }
+
+ /* optional (stm32f bootloader) */
+ tmp = of_get_property(dt, "sensorhub,num-shared-flash-banks", NULL);
+ if (tmp) {
+ pdata->num_shared_flash_banks = be32_to_cpup(tmp);
+ pdata->shared_flash_banks =
+ devm_kzalloc(dev,
+ sizeof(struct nanohub_flash_bank) *
+ pdata->num_shared_flash_banks, GFP_KERNEL);
+ if (!pdata->shared_flash_banks)
+ goto no_mem_shared;
+
+ /* TODO: investigate replacing with of_property_read_u32_array
+ */
+ i = 0;
+ of_property_for_each_u32(dt, "sensorhub,shared-flash-banks",
+ prop, tmp, u) {
+ if (i / 3 >= pdata->num_shared_flash_banks)
+ break;
+ switch (i % 3) {
+ case 0:
+ pdata->shared_flash_banks[i / 3].bank = u;
+ break;
+ case 1:
+ pdata->shared_flash_banks[i / 3].address = u;
+ break;
+ case 2:
+ pdata->shared_flash_banks[i / 3].length = u;
+ break;
+ }
+ i++;
+ }
+ }
+
+ return pdata;
+
+no_mem_shared:
+ devm_kfree(dev, pdata->flash_banks);
+no_mem:
+ ret = -ENOMEM;
+free_pdata:
+ devm_kfree(dev, pdata);
+ return ERR_PTR(ret);
+}
+#else
+static struct nanohub_platform_data *nanohub_parse_dt(struct device *dev)
+{
+ struct nanohub_platform_data *pdata;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ return pdata;
+}
+#endif
+
+static int nanohub_request_irqs(struct nanohub_data *data)
+{
+ int ret;
+
+ ret = request_threaded_irq(data->irq1, NULL, nanohub_irq1,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "nanohub-irq1", data);
+ if (ret < 0)
+ data->irq1 = 0;
+ else
+ disable_irq(data->irq1);
+ if (data->irq2 <= 0 || ret < 0) {
+ data->irq2 = 0;
+ return ret;
+ }
+
+ ret = request_threaded_irq(data->irq2, NULL, nanohub_irq2,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "nanohub-irq2", data);
+ if (ret < 0) {
+ data->irq2 = 0;
+ WARN(1, "failed to request optional IRQ %d; err=%d",
+ data->irq2, ret);
+ } else {
+ disable_irq(data->irq2);
+ }
+
+ /* if 2d request fails, hide this; it is optional IRQ,
+ * and failure should not interrupt driver init sequence.
+ */
+ return 0;
+}
+
+static int nanohub_request_gpios(struct nanohub_data *data)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(gconf); ++i) {
+ const struct gpio_config *cfg = &gconf[i];
+ unsigned int gpio = plat_gpio_get(data, cfg);
+ const char *label;
+ bool optional = gpio_is_optional(cfg);
+
+ ret = 0; /* clear errors on optional pins, if any */
+
+ if (!gpio_is_valid(gpio) && optional)
+ continue;
+
+ label = cfg->label;
+ ret = gpio_request_one(gpio, cfg->flags, label);
+ if (ret && !optional) {
+ pr_err("nanohub: gpio %d[%s] request failed;err=%d\n",
+ gpio, label, ret);
+ break;
+ }
+ if (gpio_has_irq(cfg)) {
+ int irq = gpio_to_irq(gpio);
+ if (irq > 0) {
+ nanohub_set_irq_data(data, cfg, irq);
+ } else if (!optional) {
+ ret = -EINVAL;
+ pr_err("nanohub: no irq; gpio %d[%s];err=%d\n",
+ gpio, label, irq);
+ break;
+ }
+ }
+ }
+ if (i < ARRAY_SIZE(gconf)) {
+ for (--i; i >= 0; --i)
+ gpio_free(plat_gpio_get(data, &gconf[i]));
+ }
+
+ return ret;
+}
+
+static void nanohub_release_gpios_irqs(struct nanohub_data *data)
+{
+ const struct nanohub_platform_data *pdata = data->pdata;
+
+ if (data->irq2)
+ free_irq(data->irq2, data);
+ if (data->irq1)
+ free_irq(data->irq1, data);
+ if (gpio_is_valid(pdata->irq2_gpio))
+ gpio_free(pdata->irq2_gpio);
+ gpio_free(pdata->irq1_gpio);
+ gpio_set_value(pdata->nreset_gpio, 0);
+ gpio_free(pdata->nreset_gpio);
+ mcu_wakeup_gpio_set_value(data, 1);
+ gpio_free(pdata->wakeup_gpio);
+ gpio_set_value(pdata->boot0_gpio, 0);
+ gpio_free(pdata->boot0_gpio);
+}
+
+struct iio_dev *nanohub_probe(struct device *dev, struct iio_dev *iio_dev)
+{
+ int ret, i;
+ const struct nanohub_platform_data *pdata;
+ struct nanohub_data *data;
+ struct nanohub_buf *buf;
+ bool own_iio_dev = !iio_dev;
+
+ pdata = dev_get_platdata(dev);
+ if (!pdata) {
+ pdata = nanohub_parse_dt(dev);
+ if (IS_ERR(pdata))
+ return ERR_PTR(PTR_ERR(pdata));
+ }
+
+ if (own_iio_dev) {
+ iio_dev = iio_device_alloc(sizeof(struct nanohub_data));
+ if (!iio_dev)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ iio_dev->name = "nanohub";
+ iio_dev->dev.parent = dev;
+ iio_dev->info = &nanohub_iio_info;
+ iio_dev->channels = NULL;
+ iio_dev->num_channels = 0;
+
+ data = iio_priv(iio_dev);
+ data->iio_dev = iio_dev;
+ data->pdata = pdata;
+
+ init_waitqueue_head(&data->kthread_wait);
+
+ nanohub_io_init(&data->free_pool, data, dev);
+
+ buf = vmalloc(sizeof(*buf) * READ_QUEUE_DEPTH);
+ data->vbuf = buf;
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fail_vma;
+ }
+
+ for (i = 0; i < READ_QUEUE_DEPTH; i++)
+ nanohub_io_put_buf(&data->free_pool, &buf[i]);
+ atomic_set(&data->kthread_run, 0);
+ wakeup_source_init(&data->wakesrc_read, "nanohub_wakelock_read");
+
+ atomic_set(&data->lock_mode, LOCK_MODE_NONE);
+ atomic_set(&data->wakeup_cnt, 0);
+ atomic_set(&data->wakeup_lock_cnt, 0);
+ atomic_set(&data->wakeup_acquired, 0);
+ init_waitqueue_head(&data->wakeup_wait);
+
+ ret = nanohub_request_gpios(data);
+ if (ret)
+ goto fail_gpio;
+
+ ret = nanohub_request_irqs(data);
+ if (ret)
+ goto fail_irq;
+
+ ret = iio_device_register(iio_dev);
+ if (ret) {
+ pr_err("nanohub: iio_device_register failed\n");
+ goto fail_irq;
+ }
+
+ ret = nanohub_create_devices(data);
+ if (ret)
+ goto fail_dev;
+
+ data->thread = kthread_run(nanohub_kthread, data, "nanohub");
+
+ udelay(30);
+
+ return iio_dev;
+
+fail_dev:
+ iio_device_unregister(iio_dev);
+fail_irq:
+ nanohub_release_gpios_irqs(data);
+fail_gpio:
+ wakeup_source_trash(&data->wakesrc_read);
+ vfree(buf);
+fail_vma:
+ if (own_iio_dev)
+ iio_device_free(iio_dev);
+
+ return ERR_PTR(ret);
+}
+
+int nanohub_reset(struct nanohub_data *data)
+{
+ const struct nanohub_platform_data *pdata = data->pdata;
+
+ gpio_set_value(pdata->nreset_gpio, 1);
+ usleep_range(650000, 700000);
+ enable_irq(data->irq1);
+ if (data->irq2)
+ enable_irq(data->irq2);
+ else
+ nanohub_unmask_interrupt(data, 2);
+
+ return 0;
+}
+
+int nanohub_remove(struct iio_dev *iio_dev)
+{
+ struct nanohub_data *data = iio_priv(iio_dev);
+
+ nanohub_notify_thread(data);
+ kthread_stop(data->thread);
+
+ nanohub_destroy_devices(data);
+ iio_device_unregister(iio_dev);
+ nanohub_release_gpios_irqs(data);
+ wakeup_source_trash(&data->wakesrc_read);
+ vfree(data->vbuf);
+ iio_device_free(iio_dev);
+
+ return 0;
+}
+
+int nanohub_suspend(struct iio_dev *iio_dev)
+{
+ struct nanohub_data *data = iio_priv(iio_dev);
+ int ret;
+
+ ret = nanohub_wakeup_lock(data, LOCK_MODE_SUSPEND_RESUME);
+ if (!ret) {
+ int cnt;
+ const int max_cnt = 10;
+
+ for (cnt = 0; cnt < max_cnt; ++cnt) {
+ if (!nanohub_irq1_fired(data))
+ break;
+ usleep_range(10, 15);
+ }
+ if (cnt < max_cnt) {
+ dev_dbg(&iio_dev->dev, "%s: cnt=%d\n", __func__, cnt);
+ enable_irq_wake(data->irq1);
+ return 0;
+ }
+ ret = -EBUSY;
+ dev_info(&iio_dev->dev,
+ "%s: failed to suspend: IRQ1=%d, state=%d\n",
+ __func__, nanohub_irq1_fired(data),
+ nanohub_get_state(data));
+ nanohub_wakeup_unlock(data);
+ } else {
+ dev_info(&iio_dev->dev, "%s: could not take wakeup lock\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+int nanohub_resume(struct iio_dev *iio_dev)
+{
+ struct nanohub_data *data = iio_priv(iio_dev);
+
+ disable_irq_wake(data->irq1);
+ nanohub_wakeup_unlock(data);
+
+ return 0;
+}
+
+static int __init nanohub_init(void)
+{
+ int ret = 0;
+
+ sensor_class = class_create(THIS_MODULE, "nanohub");
+ if (IS_ERR(sensor_class)) {
+ ret = PTR_ERR(sensor_class);
+ pr_err("nanohub: class_create failed; err=%d\n", ret);
+ }
+ if (!ret)
+ major = __register_chrdev(0, 0, ID_NANOHUB_MAX, "nanohub",
+ &nanohub_fileops);
+
+ if (major < 0) {
+ ret = major;
+ major = 0;
+ pr_err("nanohub: can't register; err=%d\n", ret);
+ }
+
+#ifdef CONFIG_NANOHUB_SPI
+ if (ret == 0)
+ ret = nanohub_spi_init();
+#endif
+ pr_info("nanohub: loaded; ret=%d\n", ret);
+ return ret;
+}
+
+static void __exit nanohub_cleanup(void)
+{
+#ifdef CONFIG_NANOHUB_SPI
+ nanohub_spi_cleanup();
+#endif
+ __unregister_chrdev(major, 0, ID_NANOHUB_MAX, "nanohub");
+ class_destroy(sensor_class);
+ major = 0;
+ sensor_class = 0;
+}
+
+module_init(nanohub_init);
+module_exit(nanohub_cleanup);
+
+MODULE_AUTHOR("Ben Fennema");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/nanohub/main.h b/drivers/staging/nanohub/main.h
new file mode 100644
index 000000000000..37fb3148e645
--- /dev/null
+++ b/drivers/staging/nanohub/main.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _NANOHUB_MAIN_H
+#define _NANOHUB_MAIN_H
+
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/gpio.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+
+#include "comms.h"
+#include "bl.h"
+
+#define NANOHUB_NAME "nanohub"
+
+struct nanohub_buf {
+ struct list_head list;
+ uint8_t buffer[255];
+ uint8_t length;
+};
+
+struct nanohub_data;
+
+struct nanohub_io {
+ struct device *dev;
+ struct nanohub_data *data;
+ wait_queue_head_t buf_wait;
+ struct list_head buf_list;
+};
+
+static inline struct nanohub_data *dev_get_nanohub_data(struct device *dev)
+{
+ struct nanohub_io *io = dev_get_drvdata(dev);
+
+ return io->data;
+}
+
+struct nanohub_data {
+ /* indices for io[] array */
+ #define ID_NANOHUB_SENSOR 0
+ #define ID_NANOHUB_COMMS 1
+ #define ID_NANOHUB_MAX 2
+
+ struct iio_dev *iio_dev;
+ struct nanohub_io io[ID_NANOHUB_MAX];
+
+ struct nanohub_comms comms;
+ struct nanohub_bl bl;
+ const struct nanohub_platform_data *pdata;
+ int irq1;
+ int irq2;
+
+ atomic_t kthread_run;
+ atomic_t thread_state;
+ wait_queue_head_t kthread_wait;
+
+ struct wakeup_source wakesrc_read;
+
+ struct nanohub_io free_pool;
+
+ atomic_t lock_mode;
+ /* these 3 vars should be accessed only with wakeup_wait.lock held */
+ atomic_t wakeup_cnt;
+ atomic_t wakeup_lock_cnt;
+ atomic_t wakeup_acquired;
+ wait_queue_head_t wakeup_wait;
+
+ uint32_t interrupts[8];
+
+ ktime_t wakeup_err_ktime;
+ int wakeup_err_cnt;
+
+ ktime_t kthread_err_ktime;
+ int kthread_err_cnt;
+
+ void *vbuf;
+ struct task_struct *thread;
+};
+
+enum {
+ KEY_WAKEUP_NONE,
+ KEY_WAKEUP,
+ KEY_WAKEUP_LOCK,
+};
+
+enum {
+ LOCK_MODE_NONE,
+ LOCK_MODE_NORMAL,
+ LOCK_MODE_IO,
+ LOCK_MODE_IO_BL,
+ LOCK_MODE_RESET,
+ LOCK_MODE_SUSPEND_RESUME,
+};
+
+int request_wakeup_ex(struct nanohub_data *data, long timeout,
+ int key, int lock_mode);
+void release_wakeup_ex(struct nanohub_data *data, int key, int lock_mode);
+int nanohub_wait_for_interrupt(struct nanohub_data *data);
+int nanohub_wakeup_eom(struct nanohub_data *data, bool repeat);
+struct iio_dev *nanohub_probe(struct device *dev, struct iio_dev *iio_dev);
+int nanohub_reset(struct nanohub_data *data);
+int nanohub_remove(struct iio_dev *iio_dev);
+int nanohub_suspend(struct iio_dev *iio_dev);
+int nanohub_resume(struct iio_dev *iio_dev);
+
+static inline int nanohub_irq1_fired(struct nanohub_data *data)
+{
+ const struct nanohub_platform_data *pdata = data->pdata;
+
+ return !gpio_get_value(pdata->irq1_gpio);
+}
+
+static inline int nanohub_irq2_fired(struct nanohub_data *data)
+{
+ const struct nanohub_platform_data *pdata = data->pdata;
+
+ return data->irq2 && !gpio_get_value(pdata->irq2_gpio);
+}
+
+static inline int request_wakeup_timeout(struct nanohub_data *data, int timeout)
+{
+ return request_wakeup_ex(data, timeout, KEY_WAKEUP, LOCK_MODE_NORMAL);
+}
+
+static inline int request_wakeup(struct nanohub_data *data)
+{
+ return request_wakeup_ex(data, MAX_SCHEDULE_TIMEOUT, KEY_WAKEUP,
+ LOCK_MODE_NORMAL);
+}
+
+static inline void release_wakeup(struct nanohub_data *data)
+{
+ release_wakeup_ex(data, KEY_WAKEUP, LOCK_MODE_NORMAL);
+}
+
+#endif
diff --git a/drivers/staging/nanohub/spi.c b/drivers/staging/nanohub/spi.c
new file mode 100644
index 000000000000..a315327d8b74
--- /dev/null
+++ b/drivers/staging/nanohub/spi.c
@@ -0,0 +1,549 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+
+#include "main.h"
+#include "bl.h"
+#include "comms.h"
+
+#define SPI_TIMEOUT 65535
+#define SPI_MIN_DMA 48
+
+struct nanohub_spi_data {
+ struct nanohub_data data;
+ struct spi_device *device;
+ struct semaphore spi_sem;
+ int cs;
+ uint16_t rx_length;
+ uint16_t rx_offset;
+};
+
+static uint8_t bl_checksum(const uint8_t *bytes, int length)
+{
+ int i;
+ uint8_t csum;
+
+ if (length == 1) {
+ csum = ~bytes[0];
+ } else if (length > 1) {
+ for (csum = 0, i = 0; i < length; i++)
+ csum ^= bytes[i];
+ } else {
+ csum = 0xFF;
+ }
+
+ return csum;
+}
+
+static uint8_t spi_bl_write_data(const void *data, uint8_t *tx, int length)
+{
+ const struct nanohub_spi_data *spi_data = data;
+ const struct nanohub_bl *bl = &spi_data->data.bl;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = length + 1,
+ .tx_buf = bl->tx_buffer,
+ .rx_buf = bl->rx_buffer,
+ .cs_change = 1,
+ };
+
+ tx[length] = bl_checksum(tx, length);
+ memcpy(bl->tx_buffer, tx, length + 1);
+
+ spi_message_init_with_transfers(&msg, &xfer, 1);
+
+ if (spi_sync_locked(spi_data->device, &msg) == 0)
+ return bl->rx_buffer[length];
+ else
+ return CMD_NACK;
+}
+
+static uint8_t spi_bl_write_cmd(const void *data, uint8_t cmd)
+{
+ const struct nanohub_spi_data *spi_data = data;
+ const struct nanohub_bl *bl = &spi_data->data.bl;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 3,
+ .tx_buf = bl->tx_buffer,
+ .rx_buf = bl->rx_buffer,
+ .cs_change = 1,
+ };
+ bl->tx_buffer[0] = CMD_SOF;
+ bl->tx_buffer[1] = cmd;
+ bl->tx_buffer[2] = ~cmd;
+
+ spi_message_init_with_transfers(&msg, &xfer, 1);
+
+ if (spi_sync_locked(spi_data->device, &msg) == 0)
+ return CMD_ACK;
+ else
+ return CMD_NACK;
+}
+
+static uint8_t spi_bl_read_data(const void *data, uint8_t *rx, int length)
+{
+ const struct nanohub_spi_data *spi_data = data;
+ const struct nanohub_bl *bl = &spi_data->data.bl;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = length + 1,
+ .tx_buf = bl->tx_buffer,
+ .rx_buf = bl->rx_buffer,
+ .cs_change = 1,
+ };
+ memset(&bl->tx_buffer[0], 0x00, length + 1);
+
+ spi_message_init_with_transfers(&msg, &xfer, 1);
+
+ if (spi_sync_locked(spi_data->device, &msg) == 0) {
+ memcpy(rx, &bl->rx_buffer[1], length);
+ return CMD_ACK;
+ } else {
+ return CMD_NACK;
+ }
+}
+
+static uint8_t spi_bl_read_ack(const void *data)
+{
+ const struct nanohub_spi_data *spi_data = data;
+ const struct nanohub_bl *bl = &spi_data->data.bl;
+ int32_t timeout = SPI_TIMEOUT;
+ uint8_t ret;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 1,
+ .tx_buf = bl->tx_buffer,
+ .rx_buf = bl->rx_buffer,
+ .cs_change = 1,
+ };
+ bl->tx_buffer[0] = 0x00;
+
+ spi_message_init_with_transfers(&msg, &xfer, 1);
+
+ if (spi_sync_locked(spi_data->device, &msg) == 0) {
+ do {
+ spi_sync_locked(spi_data->device, &msg);
+ timeout--;
+ if (bl->rx_buffer[0] != CMD_ACK
+ && bl->rx_buffer[0] != CMD_NACK
+ && timeout % 256 == 0)
+ schedule();
+ } while (bl->rx_buffer[0] != CMD_ACK
+ && bl->rx_buffer[0] != CMD_NACK && timeout > 0);
+
+ if (bl->rx_buffer[0] != CMD_ACK && bl->rx_buffer[0] != CMD_NACK
+ && timeout == 0)
+ ret = CMD_NACK;
+ else
+ ret = bl->rx_buffer[0];
+
+ bl->tx_buffer[0] = CMD_ACK;
+ spi_sync_locked(spi_data->device, &msg);
+ return ret;
+ } else {
+ return CMD_NACK;
+ }
+}
+
+static int spi_bl_open(const void *data)
+{
+ const struct nanohub_spi_data *spi_data = data;
+ int ret;
+
+ spi_bus_lock(spi_data->device->master);
+ spi_data->device->max_speed_hz = 1000000;
+ spi_data->device->mode = SPI_MODE_0;
+ spi_data->device->bits_per_word = 8;
+ ret = spi_setup(spi_data->device);
+ if (!ret)
+ gpio_set_value(spi_data->cs, 0);
+
+ return ret;
+}
+
+static void spi_bl_close(const void *data)
+{
+ const struct nanohub_spi_data *spi_data = data;
+
+ gpio_set_value(spi_data->cs, 1);
+ spi_bus_unlock(spi_data->device->master);
+}
+
+static uint8_t spi_bl_sync(const void *data)
+{
+ const struct nanohub_spi_data *spi_data = data;
+ const struct nanohub_bl *bl = &spi_data->data.bl;
+ int32_t timeout = SPI_TIMEOUT;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 1,
+ .tx_buf = bl->tx_buffer,
+ .rx_buf = bl->rx_buffer,
+ .cs_change = 1,
+ };
+ bl->tx_buffer[0] = CMD_SOF;
+
+ spi_message_init_with_transfers(&msg, &xfer, 1);
+
+ do {
+ if (spi_sync_locked(spi_data->device, &msg) != 0)
+ return CMD_NACK;
+ timeout--;
+ if (bl->rx_buffer[0] != CMD_SOF_ACK && timeout % 256 == 0)
+ schedule();
+ } while (bl->rx_buffer[0] != CMD_SOF_ACK && timeout > 0);
+
+ if (bl->rx_buffer[0] == CMD_SOF_ACK)
+ return bl->read_ack(data);
+ else
+ return CMD_NACK;
+}
+
+void nanohub_spi_bl_init(struct nanohub_spi_data *spi_data)
+{
+ struct nanohub_bl *bl = &spi_data->data.bl;
+
+ bl->open = spi_bl_open;
+ bl->sync = spi_bl_sync;
+ bl->write_data = spi_bl_write_data;
+ bl->write_cmd = spi_bl_write_cmd;
+ bl->read_data = spi_bl_read_data;
+ bl->read_ack = spi_bl_read_ack;
+ bl->close = spi_bl_close;
+}
+
+int nanohub_spi_write(void *data, uint8_t *tx, int length, int timeout)
+{
+ struct nanohub_spi_data *spi_data = data;
+ const struct nanohub_comms *comms = &spi_data->data.comms;
+ int max_len = sizeof(struct nanohub_packet) + MAX_UINT8 +
+ sizeof(struct nanohub_packet_crc);
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = max_len + timeout,
+ .tx_buf = comms->tx_buffer,
+ .rx_buf = comms->rx_buffer,
+ .cs_change = 1,
+ };
+ spi_data->rx_offset = max_len;
+ spi_data->rx_length = max_len + timeout;
+ memcpy(comms->tx_buffer, tx, length);
+ memset(comms->tx_buffer + length, 0xFF, max_len + timeout - length);
+
+ spi_message_init_with_transfers(&msg, &xfer, 1);
+
+ if (spi_sync_locked(spi_data->device, &msg) == 0)
+ return length;
+ else
+ return ERROR_NACK;
+}
+
+int nanohub_spi_read(void *data, uint8_t *rx, int max_length, int timeout)
+{
+ struct nanohub_spi_data *spi_data = data;
+ struct nanohub_comms *comms = &spi_data->data.comms;
+ const int min_size = sizeof(struct nanohub_packet) +
+ sizeof(struct nanohub_packet_crc);
+ int i, ret;
+ int offset = 0;
+ struct nanohub_packet *packet = NULL;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = timeout,
+ .tx_buf = comms->tx_buffer,
+ .rx_buf = comms->rx_buffer,
+ .cs_change = 1,
+ };
+
+ if (max_length < min_size)
+ return ERROR_NACK;
+
+ /* consume leftover bytes, if any */
+ if (spi_data->rx_offset < spi_data->rx_length) {
+ for (i = spi_data->rx_offset; i < spi_data->rx_length; i++) {
+ if (comms->rx_buffer[i] != 0xFF) {
+ offset = spi_data->rx_length - i;
+
+ if (offset <
+ offsetof(struct nanohub_packet,
+ len) + sizeof(packet->len)) {
+ memcpy(rx, &comms->rx_buffer[i],
+ offset);
+ xfer.len =
+ min_size + MAX_UINT8 - offset;
+ break;
+ } else {
+ packet =
+ (struct nanohub_packet *)&comms->
+ rx_buffer[i];
+ if (offset < min_size + packet->len) {
+ memcpy(rx, packet, offset);
+ xfer.len =
+ min_size + packet->len -
+ offset;
+ break;
+ } else {
+ memcpy(rx, packet,
+ min_size + packet->len);
+ spi_data->rx_offset = i +
+ min_size + packet->len;
+ return min_size + packet->len;
+ }
+ }
+ }
+ }
+ }
+
+ if (xfer.len != 1 && xfer.len < SPI_MIN_DMA)
+ xfer.len = SPI_MIN_DMA;
+ memset(comms->tx_buffer, 0xFF, xfer.len);
+
+ spi_message_init_with_transfers(&msg, &xfer, 1);
+
+ ret = spi_sync_locked(spi_data->device, &msg);
+ if (ret == 0) {
+ if (offset > 0) {
+ packet = (struct nanohub_packet *)rx;
+ if (offset + xfer.len > max_length)
+ memcpy(&rx[offset], comms->rx_buffer,
+ max_length - offset);
+ else
+ memcpy(&rx[offset], comms->rx_buffer, xfer.len);
+ spi_data->rx_length = xfer.len;
+ spi_data->rx_offset = min_size + packet->len - offset;
+ } else {
+ for (i = 0; i < xfer.len; i++) {
+ if (comms->rx_buffer[i] != 0xFF) {
+ spi_data->rx_length = xfer.len;
+
+ if (xfer.len - i < min_size) {
+ spi_data->rx_offset = i;
+ break;
+ } else {
+ packet =
+ (struct nanohub_packet *)
+ &comms->rx_buffer[i];
+ if (xfer.len - i <
+ min_size + packet->len) {
+ packet = NULL;
+ spi_data->rx_offset = i;
+ } else {
+ memcpy(rx, packet,
+ min_size +
+ packet->len);
+ spi_data->rx_offset =
+ i + min_size +
+ packet->len;
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ if (ret < 0)
+ return ret;
+ else if (!packet)
+ return 0;
+ else
+ return min_size + packet->len;
+}
+
+static int nanohub_spi_open(void *data)
+{
+ struct nanohub_spi_data *spi_data = data;
+ int ret;
+
+ down(&spi_data->spi_sem);
+ spi_bus_lock(spi_data->device->master);
+ spi_data->device->max_speed_hz = 10000000;
+ spi_data->device->mode = SPI_MODE_0;
+ spi_data->device->bits_per_word = 8;
+ ret = spi_setup(spi_data->device);
+ if (!ret) {
+ udelay(40);
+ gpio_set_value(spi_data->cs, 0);
+ udelay(30);
+ }
+ return ret;
+}
+
+static void nanohub_spi_close(void *data)
+{
+ struct nanohub_spi_data *spi_data = data;
+
+ gpio_set_value(spi_data->cs, 1);
+ spi_bus_unlock(spi_data->device->master);
+ up(&spi_data->spi_sem);
+ udelay(60);
+}
+
+void nanohub_spi_comms_init(struct nanohub_spi_data *spi_data)
+{
+ struct nanohub_comms *comms = &spi_data->data.comms;
+ int max_len = sizeof(struct nanohub_packet) + MAX_UINT8 +
+ sizeof(struct nanohub_packet_crc);
+
+ comms->seq = 1;
+ comms->timeout_write = 544;
+ comms->timeout_ack = 272;
+ comms->timeout_reply = 512;
+ comms->open = nanohub_spi_open;
+ comms->close = nanohub_spi_close;
+ comms->write = nanohub_spi_write;
+ comms->read = nanohub_spi_read;
+
+ max_len += comms->timeout_write;
+ max_len = max(max_len, comms->timeout_ack);
+ max_len = max(max_len, comms->timeout_reply);
+ comms->tx_buffer = kmalloc(max_len, GFP_KERNEL | GFP_DMA);
+ comms->rx_buffer = kmalloc(max_len, GFP_KERNEL | GFP_DMA);
+
+ spi_data->rx_length = 0;
+ spi_data->rx_offset = 0;
+
+ sema_init(&spi_data->spi_sem, 1);
+}
+
+static int nanohub_spi_probe(struct spi_device *spi)
+{
+ struct nanohub_spi_data *spi_data;
+ struct iio_dev *iio_dev;
+ int error;
+
+ iio_dev = iio_device_alloc(sizeof(struct nanohub_spi_data));
+
+ iio_dev = nanohub_probe(&spi->dev, iio_dev);
+
+ if (IS_ERR(iio_dev))
+ return PTR_ERR(iio_dev);
+
+ spi_data = iio_priv(iio_dev);
+
+ spi_set_drvdata(spi, iio_dev);
+
+ if (gpio_is_valid(spi_data->data.pdata->spi_cs_gpio)) {
+ error =
+ gpio_request(spi_data->data.pdata->spi_cs_gpio,
+ "nanohub_spi_cs");
+ if (error) {
+ pr_err("nanohub: spi_cs_gpio request failed\n");
+ } else {
+ spi_data->cs = spi_data->data.pdata->spi_cs_gpio;
+ gpio_direction_output(spi_data->cs, 1);
+ }
+ } else {
+ pr_err("nanohub: spi_cs_gpio is not valid\n");
+ }
+
+ spi_data->device = spi;
+ nanohub_spi_comms_init(spi_data);
+
+ spi_data->data.bl.cmd_erase = CMD_ERASE;
+ spi_data->data.bl.cmd_read_memory = CMD_READ_MEMORY;
+ spi_data->data.bl.cmd_write_memory = CMD_WRITE_MEMORY;
+ spi_data->data.bl.cmd_get_version = CMD_GET_VERSION;
+ spi_data->data.bl.cmd_get_id = CMD_GET_ID;
+ spi_data->data.bl.cmd_readout_protect = CMD_READOUT_PROTECT;
+ spi_data->data.bl.cmd_readout_unprotect = CMD_READOUT_UNPROTECT;
+ spi_data->data.bl.cmd_update_finished = CMD_UPDATE_FINISHED;
+ nanohub_spi_bl_init(spi_data);
+
+ nanohub_reset(&spi_data->data);
+
+ return 0;
+}
+
+static int nanohub_spi_remove(struct spi_device *spi)
+{
+ struct nanohub_spi_data *spi_data;
+ struct iio_dev *iio_dev;
+
+ iio_dev = spi_get_drvdata(spi);
+ spi_data = iio_priv(iio_dev);
+
+ if (gpio_is_valid(spi_data->cs)) {
+ gpio_direction_output(spi_data->cs, 1);
+ gpio_free(spi_data->cs);
+ }
+
+ return nanohub_remove(iio_dev);
+}
+
+static int nanohub_spi_suspend(struct device *dev)
+{
+ struct iio_dev *iio_dev = spi_get_drvdata(to_spi_device(dev));
+ struct nanohub_spi_data *spi_data = iio_priv(iio_dev);
+ int ret;
+
+ ret = nanohub_suspend(iio_dev);
+
+ if (!ret) {
+ ret = down_interruptible(&spi_data->spi_sem);
+ if (ret)
+ up(&spi_data->spi_sem);
+ }
+
+ return ret;
+}
+
+static int nanohub_spi_resume(struct device *dev)
+{
+ struct iio_dev *iio_dev = spi_get_drvdata(to_spi_device(dev));
+ struct nanohub_spi_data *spi_data = iio_priv(iio_dev);
+
+ up(&spi_data->spi_sem);
+
+ return nanohub_resume(iio_dev);
+}
+
+static struct spi_device_id nanohub_spi_id[] = {
+ {NANOHUB_NAME, 0},
+ {},
+};
+
+static const struct dev_pm_ops nanohub_spi_pm_ops = {
+ .suspend = nanohub_spi_suspend,
+ .resume = nanohub_spi_resume,
+};
+
+static struct spi_driver nanohub_spi_driver = {
+ .driver = {
+ .name = NANOHUB_NAME,
+ .owner = THIS_MODULE,
+ .pm = &nanohub_spi_pm_ops,
+ },
+ .probe = nanohub_spi_probe,
+ .remove = nanohub_spi_remove,
+ .id_table = nanohub_spi_id,
+};
+
+int __init nanohub_spi_init(void)
+{
+ return spi_register_driver(&nanohub_spi_driver);
+}
+
+void nanohub_spi_cleanup(void)
+{
+ spi_unregister_driver(&nanohub_spi_driver);
+}
+
+MODULE_DEVICE_TABLE(spi, nanohub_spi_id);
diff --git a/drivers/staging/nanohub/spi.h b/drivers/staging/nanohub/spi.h
new file mode 100644
index 000000000000..0155c812ac30
--- /dev/null
+++ b/drivers/staging/nanohub/spi.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _NANOHUB_SPI_H
+#define _NANOHUB_SPI_H
+
+int __init nanohub_spi_init(void);
+void nanohub_spi_cleanup(void);
+
+#endif
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index a13541bdc726..121f487f2a34 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -184,6 +184,13 @@ config HISI_THERMAL
thermal framework. cpufreq is used as the cooling device to throttle
CPUs when the passive trip is crossed.
+config HI3660_THERMAL
+ tristate "Hi3660 thermal driver"
+ depends on (ARCH_HISI && OF) || COMPILE_TEST
+ help
+ Enable this to plug Hi3660 thermal driver into the Linux thermal
+ framework.
+
config IMX_THERMAL
tristate "Temperature sensor driver for Freescale i.MX SoCs"
depends on (ARCH_MXC && CPU_THERMAL) || COMPILE_TEST
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index c92eb22a41ff..02a38025b4bd 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -55,3 +55,4 @@ obj-$(CONFIG_TEGRA_SOCTHERM) += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o
+obj-$(CONFIG_HI3660_THERMAL) += hi3660_thermal.o
diff --git a/drivers/thermal/hi3660_thermal.c b/drivers/thermal/hi3660_thermal.c
new file mode 100644
index 000000000000..68fa9018c172
--- /dev/null
+++ b/drivers/thermal/hi3660_thermal.c
@@ -0,0 +1,198 @@
+/*
+ * linux/drivers/thermal/hi3660_thermal.c
+ *
+ * Copyright (c) 2017 Hisilicon Limited.
+ * Copyright (c) 2017 Linaro Limited.
+ *
+ * Author: Tao Wang <kevin.wangtao@hisilicon.com>
+ * Author: Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#include "thermal_core.h"
+
+#define HW_MAX_SENSORS 4
+#define HISI_MAX_SENSORS 6
+#define SENSOR_MAX 4
+#define SENSOR_AVG 5
+
+#define ADC_MIN 116
+#define ADC_MAX 922
+
+/* hi3660 Thermal Sensor Dev Structure */
+struct hi3660_thermal_sensor {
+ struct hi3660_thermal_data *thermal;
+ struct thermal_zone_device *tzd;
+
+ uint32_t id;
+};
+
+struct hi3660_thermal_data {
+ struct platform_device *pdev;
+ struct hi3660_thermal_sensor sensors[HISI_MAX_SENSORS];
+ void __iomem *thermal_base;
+};
+
+unsigned int sensor_reg_offset[HW_MAX_SENSORS] = { 0x1c, 0x5c, 0x9c, 0xdc };
+
+
+static int hi3660_thermal_get_temp(void *_sensor, int *temp)
+{
+ struct hi3660_thermal_sensor *sensor = _sensor;
+ struct hi3660_thermal_data *data = sensor->thermal;
+ unsigned int idx;
+ int val, average = 0, max = 0;
+
+ if (sensor->id < HW_MAX_SENSORS) {
+ val = readl(data->thermal_base + sensor_reg_offset[sensor->id]);
+ val = clamp_val(val, ADC_MIN, ADC_MAX);
+ } else {
+ for (idx = 0; idx < HW_MAX_SENSORS; idx++) {
+ val = readl(data->thermal_base
+ + sensor_reg_offset[idx]);
+ val = clamp_val(val, ADC_MIN, ADC_MAX);
+ average += val;
+ if (val > max)
+ max = val;
+ }
+
+ if (sensor->id == SENSOR_MAX)
+ val = max;
+ else if (sensor->id == SENSOR_AVG)
+ val = average / HW_MAX_SENSORS;
+ }
+
+ *temp = ((val - ADC_MIN) * 165000) / (ADC_MAX - ADC_MIN) - 40000;
+
+ return 0;
+}
+
+static struct thermal_zone_of_device_ops hi3660_of_thermal_ops = {
+ .get_temp = hi3660_thermal_get_temp,
+};
+
+static int hi3660_thermal_register_sensor(struct platform_device *pdev,
+ struct hi3660_thermal_data *data,
+ struct hi3660_thermal_sensor *sensor,
+ int index)
+{
+ int ret = 0;
+
+ sensor->id = index;
+ sensor->thermal = data;
+
+ sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ sensor->id, sensor, &hi3660_of_thermal_ops);
+ if (IS_ERR(sensor->tzd)) {
+ ret = PTR_ERR(sensor->tzd);
+ sensor->tzd = NULL;
+ }
+
+ return ret;
+}
+
+static void hi3660_thermal_toggle_sensor(struct hi3660_thermal_sensor *sensor,
+ bool on)
+{
+ struct thermal_zone_device *tzd = sensor->tzd;
+
+ tzd->ops->set_mode(tzd,
+ on ? THERMAL_DEVICE_ENABLED : THERMAL_DEVICE_DISABLED);
+}
+
+static int hi3660_thermal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hi3660_thermal_data *data;
+ struct resource *res;
+ int ret = 0;
+ int i;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->thermal_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->thermal_base)) {
+ dev_err(dev, "failed get reg base\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ for (i = 0; i < HISI_MAX_SENSORS; ++i) {
+ ret = hi3660_thermal_register_sensor(pdev, data,
+ &data->sensors[i], i);
+ if (ret)
+ dev_err(&pdev->dev,
+ "failed to register thermal sensor%d: %d\n",
+ i, ret);
+ else
+ hi3660_thermal_toggle_sensor(&data->sensors[i], true);
+ }
+
+ dev_info(&pdev->dev, "Thermal Sensor Loaded\n");
+ return 0;
+}
+
+static int hi3660_thermal_exit(struct platform_device *pdev)
+{
+ struct hi3660_thermal_data *data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < HISI_MAX_SENSORS; i++) {
+ struct hi3660_thermal_sensor *sensor = &data->sensors[i];
+
+ if (!sensor->tzd)
+ continue;
+
+ hi3660_thermal_toggle_sensor(sensor, false);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id hi3660_thermal_id_table[] = {
+ { .compatible = "hisilicon,hi3660-thermal" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, hi3660_thermal_id_table);
+
+static struct platform_driver hi3660_thermal_driver = {
+ .probe = hi3660_thermal_probe,
+ .remove = hi3660_thermal_exit,
+ .driver = {
+ .name = "hi3660_thermal",
+ .of_match_table = hi3660_thermal_id_table,
+ },
+};
+
+module_platform_driver(hi3660_thermal_driver);
+
+MODULE_AUTHOR("Tao Wang <kevin.wangtao@hisilicon.com>");
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("hi3660 thermal driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig
new file mode 100644
index 000000000000..052cd8e91ab0
--- /dev/null
+++ b/drivers/trusty/Kconfig
@@ -0,0 +1,52 @@
+#
+# Trusty
+#
+
+menu "Trusty"
+
+config TRUSTY
+ tristate "Trusty"
+ default n
+
+config TRUSTY_FIQ
+ tristate
+ depends on TRUSTY
+
+config TRUSTY_FIQ_ARM
+ tristate
+ depends on TRUSTY
+ depends on ARM
+ select FIQ_GLUE
+ select TRUSTY_FIQ
+ default y
+
+config TRUSTY_FIQ_ARM64
+ tristate
+ depends on TRUSTY
+ depends on ARM64
+ select FIQ_GLUE
+ select TRUSTY_FIQ
+ default y
+
+config TRUSTY_LOG
+ tristate
+ depends on TRUSTY
+ default y
+
+config TRUSTY_VIRTIO
+ tristate "Trusty virtio support"
+ depends on TRUSTY
+ select VIRTIO
+ default y
+
+config TRUSTY_VIRTIO_IPC
+ tristate "Trusty Virtio IPC driver"
+ depends on TRUSTY_VIRTIO
+ default y
+ help
+ This module adds support for communications with Trusty Services
+
+ If you choose to build a module, it'll be called trusty-ipc.
+ Say N if unsure.
+
+endmenu
diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile
new file mode 100644
index 000000000000..9ca451e50dee
--- /dev/null
+++ b/drivers/trusty/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for trusty components
+#
+
+obj-$(CONFIG_TRUSTY) += trusty.o
+obj-$(CONFIG_TRUSTY) += trusty-irq.o
+obj-$(CONFIG_TRUSTY_FIQ) += trusty-fiq.o
+obj-$(CONFIG_TRUSTY_FIQ_ARM) += trusty-fiq-arm.o
+obj-$(CONFIG_TRUSTY_FIQ_ARM64) += trusty-fiq-arm64.o trusty-fiq-arm64-glue.o
+obj-$(CONFIG_TRUSTY_LOG) += trusty-log.o
+obj-$(CONFIG_TRUSTY) += trusty-mem.o
+obj-$(CONFIG_TRUSTY_VIRTIO) += trusty-virtio.o
+obj-$(CONFIG_TRUSTY_VIRTIO_IPC) += trusty-ipc.o
diff --git a/drivers/trusty/trusty-fiq-arm.c b/drivers/trusty/trusty-fiq-arm.c
new file mode 100644
index 000000000000..8c62a00bbc44
--- /dev/null
+++ b/drivers/trusty/trusty-fiq-arm.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/fiq_glue.h>
+#include <linux/platform_device.h>
+#include <linux/trusty/smcall.h>
+#include <linux/trusty/trusty.h>
+
+#include "trusty-fiq.h"
+
+#define _STRINGIFY(x) #x
+#define STRINGIFY(x) _STRINGIFY(x)
+
+static void __naked trusty_fiq_return(void)
+{
+ asm volatile(
+ ".arch_extension sec\n"
+ "mov r12, r0\n"
+ "ldr r0, =" STRINGIFY(SMC_FC_FIQ_EXIT) "\n"
+ "smc #0");
+}
+
+int trusty_fiq_arch_probe(struct platform_device *pdev)
+{
+ return fiq_glue_set_return_handler(trusty_fiq_return);
+}
+
+void trusty_fiq_arch_remove(struct platform_device *pdev)
+{
+ fiq_glue_clear_return_handler(trusty_fiq_return);
+}
diff --git a/drivers/trusty/trusty-fiq-arm64-glue.S b/drivers/trusty/trusty-fiq-arm64-glue.S
new file mode 100644
index 000000000000..efbaca56292c
--- /dev/null
+++ b/drivers/trusty/trusty-fiq-arm64-glue.S
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
+#include <linux/linkage.h>
+#include <linux/trusty/smcall.h>
+
+.macro push reg1,reg2,remregs:vararg
+ .ifnb \remregs
+ push \remregs
+ .endif
+ stp \reg1, \reg2, [sp, #-16]!
+.endm
+
+.macro pop reg1,reg2,remregs:vararg
+ ldp \reg1, \reg2, [sp], #16
+ .ifnb \remregs
+ pop \remregs
+ .endif
+.endm
+
+ENTRY(trusty_fiq_glue_arm64)
+ sub sp, sp, #S_FRAME_SIZE - S_LR
+ push x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, \
+ x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, \
+ x26, x27, x28, x29
+ ldr x0, =SMC_FC64_GET_FIQ_REGS
+ smc #0
+ stp x0, x1, [sp, #S_PC] /* original pc, cpsr */
+ tst x1, PSR_MODE_MASK
+ csel x2, x2, x3, eq /* sp el0, sp el1 */
+ stp x30, x2, [sp, #S_LR] /* lr, original sp */
+ mov x0, sp
+ mov x1, x3
+ bl trusty_fiq_handler
+ pop x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, \
+ x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, \
+ x26, x27, x28, x29
+ ldr x30, [sp], #S_FRAME_SIZE - S_LR /* load LR and restore SP */
+ ldr x0, =SMC_FC_FIQ_EXIT
+ smc #0
+ b . /* should not get here */
+
+ENTRY(trusty_fiq_cpu_resume)
+ ldr x0, =SMC_FC_FIQ_RESUME
+ smc #0
+ ret
diff --git a/drivers/trusty/trusty-fiq-arm64.c b/drivers/trusty/trusty-fiq-arm64.c
new file mode 100644
index 000000000000..8b9a40887587
--- /dev/null
+++ b/drivers/trusty/trusty-fiq-arm64.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/trusty/smcall.h>
+#include <linux/trusty/trusty.h>
+
+#include <asm/fiq_glue.h>
+
+#include "trusty-fiq.h"
+
+extern void trusty_fiq_glue_arm64(void);
+
+static struct device *trusty_dev;
+static DEFINE_PER_CPU(void *, fiq_stack);
+static struct fiq_glue_handler *fiq_handlers;
+static DEFINE_MUTEX(fiq_glue_lock);
+
+void trusty_fiq_handler(struct pt_regs *regs, void *svc_sp)
+{
+ struct fiq_glue_handler *handler;
+
+ for (handler = ACCESS_ONCE(fiq_handlers); handler;
+ handler = ACCESS_ONCE(handler->next)) {
+ /* Barrier paired with smp_wmb in fiq_glue_register_handler */
+ smp_read_barrier_depends();
+ handler->fiq(handler, regs, svc_sp);
+ }
+}
+
+static void smp_nop_call(void *info)
+{
+ /* If this call is reached, the fiq handler is not currently running */
+}
+
+static void fiq_glue_clear_handler(void)
+{
+ int cpu;
+ int ret;
+ void *stack;
+
+ for_each_possible_cpu(cpu) {
+ stack = per_cpu(fiq_stack, cpu);
+ if (!stack)
+ continue;
+
+ ret = trusty_fast_call64(trusty_dev, SMC_FC64_SET_FIQ_HANDLER,
+ cpu, 0, 0);
+ if (ret) {
+ pr_err("%s: SMC_FC_SET_FIQ_HANDLER(%d, 0, 0) failed 0x%x, skip free stack\n",
+ __func__, cpu, ret);
+ continue;
+ }
+
+ per_cpu(fiq_stack, cpu) = NULL;
+ smp_call_function_single(cpu, smp_nop_call, NULL, true);
+ free_pages((unsigned long)stack, THREAD_SIZE_ORDER);
+ }
+}
+
+static int fiq_glue_set_handler(void)
+{
+ int ret;
+ int cpu;
+ void *stack;
+ unsigned long irqflags;
+
+ for_each_possible_cpu(cpu) {
+ stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+ if (WARN_ON(!stack)) {
+ ret = -ENOMEM;
+ goto err_alloc_fiq_stack;
+ }
+ per_cpu(fiq_stack, cpu) = stack;
+ stack += THREAD_START_SP;
+
+ local_irq_save(irqflags);
+ ret = trusty_fast_call64(trusty_dev, SMC_FC64_SET_FIQ_HANDLER,
+ cpu, (uintptr_t)trusty_fiq_glue_arm64,
+ (uintptr_t)stack);
+ local_irq_restore(irqflags);
+ if (ret) {
+ pr_err("%s: SMC_FC_SET_FIQ_HANDLER(%d, %p, %p) failed 0x%x\n",
+ __func__, cpu, trusty_fiq_glue_arm64,
+ stack, ret);
+ ret = -EINVAL;
+ goto err_set_fiq_handler;
+ }
+ }
+ return 0;
+
+err_alloc_fiq_stack:
+err_set_fiq_handler:
+ fiq_glue_clear_handler();
+ return ret;
+}
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler)
+{
+ int ret;
+
+ if (!handler || !handler->fiq) {
+ ret = -EINVAL;
+ goto err_bad_arg;
+ }
+
+ mutex_lock(&fiq_glue_lock);
+
+ if (!trusty_dev) {
+ ret = -ENODEV;
+ goto err_no_trusty;
+ }
+
+ handler->next = fiq_handlers;
+ /*
+ * Write barrier paired with smp_read_barrier_depends in
+ * trusty_fiq_handler. Make sure next pointer is updated before
+ * fiq_handlers so trusty_fiq_handler does not see an uninitialized
+ * value and terminate early or crash.
+ */
+ smp_wmb();
+ fiq_handlers = handler;
+
+ smp_call_function(smp_nop_call, NULL, true);
+
+ if (!handler->next) {
+ ret = fiq_glue_set_handler();
+ if (ret)
+ goto err_set_fiq_handler;
+ }
+
+ mutex_unlock(&fiq_glue_lock);
+ return 0;
+
+err_set_fiq_handler:
+ fiq_handlers = handler->next;
+err_no_trusty:
+ mutex_unlock(&fiq_glue_lock);
+err_bad_arg:
+ pr_err("%s: failed, %d\n", __func__, ret);
+ return ret;
+}
+
+int trusty_fiq_arch_probe(struct platform_device *pdev)
+{
+ mutex_lock(&fiq_glue_lock);
+ trusty_dev = pdev->dev.parent;
+ mutex_unlock(&fiq_glue_lock);
+
+ return 0;
+}
+
+void trusty_fiq_arch_remove(struct platform_device *pdev)
+{
+ mutex_lock(&fiq_glue_lock);
+ fiq_glue_clear_handler();
+ trusty_dev = NULL;
+ mutex_unlock(&fiq_glue_lock);
+}
diff --git a/drivers/trusty/trusty-fiq.c b/drivers/trusty/trusty-fiq.c
new file mode 100644
index 000000000000..1a031c67ea72
--- /dev/null
+++ b/drivers/trusty/trusty-fiq.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/trusty/smcall.h>
+#include <linux/trusty/trusty.h>
+
+#include "trusty-fiq.h"
+
+static int trusty_fiq_remove_child(struct device *dev, void *data)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+static int trusty_fiq_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = trusty_fiq_arch_probe(pdev);
+ if (ret)
+ goto err_set_fiq_return;
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add children: %d\n", ret);
+ goto err_add_children;
+ }
+
+ return 0;
+
+err_add_children:
+ device_for_each_child(&pdev->dev, NULL, trusty_fiq_remove_child);
+ trusty_fiq_arch_remove(pdev);
+err_set_fiq_return:
+ return ret;
+}
+
+static int trusty_fiq_remove(struct platform_device *pdev)
+{
+ device_for_each_child(&pdev->dev, NULL, trusty_fiq_remove_child);
+ trusty_fiq_arch_remove(pdev);
+ return 0;
+}
+
+static const struct of_device_id trusty_fiq_of_match[] = {
+ { .compatible = "android,trusty-fiq-v1", },
+ {},
+};
+
+static struct platform_driver trusty_fiq_driver = {
+ .probe = trusty_fiq_probe,
+ .remove = trusty_fiq_remove,
+ .driver = {
+ .name = "trusty-fiq",
+ .owner = THIS_MODULE,
+ .of_match_table = trusty_fiq_of_match,
+ },
+};
+
+static int __init trusty_fiq_driver_init(void)
+{
+ return platform_driver_register(&trusty_fiq_driver);
+}
+
+static void __exit trusty_fiq_driver_exit(void)
+{
+ platform_driver_unregister(&trusty_fiq_driver);
+}
+
+subsys_initcall(trusty_fiq_driver_init);
+module_exit(trusty_fiq_driver_exit);
diff --git a/drivers/trusty/trusty-fiq.h b/drivers/trusty/trusty-fiq.h
new file mode 100644
index 000000000000..d4ae9a9635f3
--- /dev/null
+++ b/drivers/trusty/trusty-fiq.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+int trusty_fiq_arch_probe(struct platform_device *pdev);
+void trusty_fiq_arch_remove(struct platform_device *pdev);
diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c
new file mode 100644
index 000000000000..06e026344e67
--- /dev/null
+++ b/drivers/trusty/trusty-ipc.c
@@ -0,0 +1,1672 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/aio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/idr.h>
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <linux/compat.h>
+#include <linux/uio.h>
+
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+#include <linux/trusty/trusty_ipc.h>
+
+#define MAX_DEVICES 4
+
+#define REPLY_TIMEOUT 5000
+#define TXBUF_TIMEOUT 15000
+
+#define MAX_SRV_NAME_LEN 256
+#define MAX_DEV_NAME_LEN 32
+
+#define DEFAULT_MSG_BUF_SIZE PAGE_SIZE
+#define DEFAULT_MSG_BUF_ALIGN PAGE_SIZE
+
+#define TIPC_CTRL_ADDR 53
+#define TIPC_ANY_ADDR 0xFFFFFFFF
+
+#define TIPC_MIN_LOCAL_ADDR 1024
+
+#define TIPC_IOC_MAGIC 'r'
+#define TIPC_IOC_CONNECT _IOW(TIPC_IOC_MAGIC, 0x80, char *)
+#if defined(CONFIG_COMPAT)
+#define TIPC_IOC_CONNECT_COMPAT _IOW(TIPC_IOC_MAGIC, 0x80, \
+ compat_uptr_t)
+#endif
+
+struct tipc_virtio_dev;
+
+struct tipc_dev_config {
+ u32 msg_buf_max_size;
+ u32 msg_buf_alignment;
+ char dev_name[MAX_DEV_NAME_LEN];
+} __packed;
+
+struct tipc_msg_hdr {
+ u32 src;
+ u32 dst;
+ u32 reserved;
+ u16 len;
+ u16 flags;
+ u8 data[0];
+} __packed;
+
+enum tipc_ctrl_msg_types {
+ TIPC_CTRL_MSGTYPE_GO_ONLINE = 1,
+ TIPC_CTRL_MSGTYPE_GO_OFFLINE,
+ TIPC_CTRL_MSGTYPE_CONN_REQ,
+ TIPC_CTRL_MSGTYPE_CONN_RSP,
+ TIPC_CTRL_MSGTYPE_DISC_REQ,
+};
+
+struct tipc_ctrl_msg {
+ u32 type;
+ u32 body_len;
+ u8 body[0];
+} __packed;
+
+struct tipc_conn_req_body {
+ char name[MAX_SRV_NAME_LEN];
+} __packed;
+
+struct tipc_conn_rsp_body {
+ u32 target;
+ u32 status;
+ u32 remote;
+ u32 max_msg_size;
+ u32 max_msg_cnt;
+} __packed;
+
+struct tipc_disc_req_body {
+ u32 target;
+} __packed;
+
+struct tipc_cdev_node {
+ struct cdev cdev;
+ struct device *dev;
+ unsigned int minor;
+};
+
+enum tipc_device_state {
+ VDS_OFFLINE = 0,
+ VDS_ONLINE,
+ VDS_DEAD,
+};
+
+struct tipc_virtio_dev {
+ struct kref refcount;
+ struct mutex lock; /* protects access to this device */
+ struct virtio_device *vdev;
+ struct virtqueue *rxvq;
+ struct virtqueue *txvq;
+ uint msg_buf_cnt;
+ uint msg_buf_max_cnt;
+ size_t msg_buf_max_sz;
+ uint free_msg_buf_cnt;
+ struct list_head free_buf_list;
+ wait_queue_head_t sendq;
+ struct idr addr_idr;
+ enum tipc_device_state state;
+ struct tipc_cdev_node cdev_node;
+ char cdev_name[MAX_DEV_NAME_LEN];
+};
+
+enum tipc_chan_state {
+ TIPC_DISCONNECTED = 0,
+ TIPC_CONNECTING,
+ TIPC_CONNECTED,
+ TIPC_STALE,
+};
+
+struct tipc_chan {
+ struct mutex lock; /* protects channel state */
+ struct kref refcount;
+ enum tipc_chan_state state;
+ struct tipc_virtio_dev *vds;
+ const struct tipc_chan_ops *ops;
+ void *ops_arg;
+ u32 remote;
+ u32 local;
+ u32 max_msg_size;
+ u32 max_msg_cnt;
+ char srv_name[MAX_SRV_NAME_LEN];
+};
+
+static struct class *tipc_class;
+static unsigned int tipc_major;
+
+struct virtio_device *default_vdev;
+
+static DEFINE_IDR(tipc_devices);
+static DEFINE_MUTEX(tipc_devices_lock);
+
+static int _match_any(int id, void *p, void *data)
+{
+ return id;
+}
+
+static int _match_data(int id, void *p, void *data)
+{
+ return (p == data);
+}
+
+static void *_alloc_shareable_mem(size_t sz, phys_addr_t *ppa, gfp_t gfp)
+{
+ return alloc_pages_exact(sz, gfp);
+}
+
+static void _free_shareable_mem(size_t sz, void *va, phys_addr_t pa)
+{
+ free_pages_exact(va, sz);
+}
+
+static struct tipc_msg_buf *_alloc_msg_buf(size_t sz)
+{
+ struct tipc_msg_buf *mb;
+
+ /* allocate tracking structure */
+ mb = kzalloc(sizeof(struct tipc_msg_buf), GFP_KERNEL);
+ if (!mb)
+ return NULL;
+
+ /* allocate buffer that can be shared with secure world */
+ mb->buf_va = _alloc_shareable_mem(sz, &mb->buf_pa, GFP_KERNEL);
+ if (!mb->buf_va)
+ goto err_alloc;
+
+ mb->buf_sz = sz;
+
+ return mb;
+
+err_alloc:
+ kfree(mb);
+ return NULL;
+}
+
+static void _free_msg_buf(struct tipc_msg_buf *mb)
+{
+ _free_shareable_mem(mb->buf_sz, mb->buf_va, mb->buf_pa);
+ kfree(mb);
+}
+
+static void _free_msg_buf_list(struct list_head *list)
+{
+ struct tipc_msg_buf *mb = NULL;
+
+ mb = list_first_entry_or_null(list, struct tipc_msg_buf, node);
+ while (mb) {
+ list_del(&mb->node);
+ _free_msg_buf(mb);
+ mb = list_first_entry_or_null(list, struct tipc_msg_buf, node);
+ }
+}
+
+static inline void mb_reset(struct tipc_msg_buf *mb)
+{
+ mb->wpos = 0;
+ mb->rpos = 0;
+}
+
+static void _free_chan(struct kref *kref)
+{
+ struct tipc_chan *ch = container_of(kref, struct tipc_chan, refcount);
+ kfree(ch);
+}
+
+static void _free_vds(struct kref *kref)
+{
+ struct tipc_virtio_dev *vds =
+ container_of(kref, struct tipc_virtio_dev, refcount);
+ kfree(vds);
+}
+
+static struct tipc_msg_buf *vds_alloc_msg_buf(struct tipc_virtio_dev *vds)
+{
+ return _alloc_msg_buf(vds->msg_buf_max_sz);
+}
+
+static void vds_free_msg_buf(struct tipc_virtio_dev *vds,
+ struct tipc_msg_buf *mb)
+{
+ _free_msg_buf(mb);
+}
+
+static bool _put_txbuf_locked(struct tipc_virtio_dev *vds,
+ struct tipc_msg_buf *mb)
+{
+ list_add_tail(&mb->node, &vds->free_buf_list);
+ return vds->free_msg_buf_cnt++ == 0;
+}
+
+static struct tipc_msg_buf *_get_txbuf_locked(struct tipc_virtio_dev *vds)
+{
+ struct tipc_msg_buf *mb;
+
+ if (vds->state != VDS_ONLINE)
+ return ERR_PTR(-ENODEV);
+
+ if (vds->free_msg_buf_cnt) {
+ /* take it out of free list */
+ mb = list_first_entry(&vds->free_buf_list,
+ struct tipc_msg_buf, node);
+ list_del(&mb->node);
+ vds->free_msg_buf_cnt--;
+ } else {
+ if (vds->msg_buf_cnt >= vds->msg_buf_max_cnt)
+ return ERR_PTR(-EAGAIN);
+
+ /* try to allocate it */
+ mb = _alloc_msg_buf(vds->msg_buf_max_sz);
+ if (!mb)
+ return ERR_PTR(-ENOMEM);
+
+ vds->msg_buf_cnt++;
+ }
+ return mb;
+}
+
+static struct tipc_msg_buf *_vds_get_txbuf(struct tipc_virtio_dev *vds)
+{
+ struct tipc_msg_buf *mb;
+
+ mutex_lock(&vds->lock);
+ mb = _get_txbuf_locked(vds);
+ mutex_unlock(&vds->lock);
+
+ return mb;
+}
+
+static void vds_put_txbuf(struct tipc_virtio_dev *vds, struct tipc_msg_buf *mb)
+{
+ if (!vds)
+ return;
+
+ mutex_lock(&vds->lock);
+ _put_txbuf_locked(vds, mb);
+ wake_up_interruptible(&vds->sendq);
+ mutex_unlock(&vds->lock);
+}
+
+static struct tipc_msg_buf *vds_get_txbuf(struct tipc_virtio_dev *vds,
+ long timeout)
+{
+ struct tipc_msg_buf *mb;
+
+ if (!vds)
+ return ERR_PTR(-EINVAL);
+
+ mb = _vds_get_txbuf(vds);
+
+ if ((PTR_ERR(mb) == -EAGAIN) && timeout) {
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+ timeout = msecs_to_jiffies(timeout);
+ add_wait_queue(&vds->sendq, &wait);
+ for (;;) {
+ timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
+ timeout);
+ if (!timeout) {
+ mb = ERR_PTR(-ETIMEDOUT);
+ break;
+ }
+
+ if (signal_pending(current)) {
+ mb = ERR_PTR(-ERESTARTSYS);
+ break;
+ }
+
+ mb = _vds_get_txbuf(vds);
+ if (PTR_ERR(mb) != -EAGAIN)
+ break;
+ }
+ remove_wait_queue(&vds->sendq, &wait);
+ }
+
+ if (IS_ERR(mb))
+ return mb;
+
+ BUG_ON(!mb);
+
+ /* reset and reserve space for message header */
+ mb_reset(mb);
+ mb_put_data(mb, sizeof(struct tipc_msg_hdr));
+
+ return mb;
+}
+
+static int vds_queue_txbuf(struct tipc_virtio_dev *vds,
+ struct tipc_msg_buf *mb)
+{
+ int err;
+ struct scatterlist sg;
+ bool need_notify = false;
+
+ if (!vds)
+ return -EINVAL;
+
+ mutex_lock(&vds->lock);
+ if (vds->state == VDS_ONLINE) {
+ sg_init_one(&sg, mb->buf_va, mb->wpos);
+ err = virtqueue_add_outbuf(vds->txvq, &sg, 1, mb, GFP_KERNEL);
+ need_notify = virtqueue_kick_prepare(vds->txvq);
+ } else {
+ err = -ENODEV;
+ }
+ mutex_unlock(&vds->lock);
+
+ if (need_notify)
+ virtqueue_notify(vds->txvq);
+
+ return err;
+}
+
+static int vds_add_channel(struct tipc_virtio_dev *vds,
+ struct tipc_chan *chan)
+{
+ int ret;
+
+ mutex_lock(&vds->lock);
+ if (vds->state == VDS_ONLINE) {
+ ret = idr_alloc(&vds->addr_idr, chan,
+ TIPC_MIN_LOCAL_ADDR, TIPC_ANY_ADDR - 1,
+ GFP_KERNEL);
+ if (ret > 0) {
+ chan->local = ret;
+ kref_get(&chan->refcount);
+ ret = 0;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+ mutex_unlock(&vds->lock);
+
+ return ret;
+}
+
+static void vds_del_channel(struct tipc_virtio_dev *vds,
+ struct tipc_chan *chan)
+{
+ mutex_lock(&vds->lock);
+ if (chan->local) {
+ idr_remove(&vds->addr_idr, chan->local);
+ chan->local = 0;
+ chan->remote = 0;
+ kref_put(&chan->refcount, _free_chan);
+ }
+ mutex_unlock(&vds->lock);
+}
+
+static struct tipc_chan *vds_lookup_channel(struct tipc_virtio_dev *vds,
+ u32 addr)
+{
+ int id;
+ struct tipc_chan *chan = NULL;
+
+ mutex_lock(&vds->lock);
+ if (addr == TIPC_ANY_ADDR) {
+ id = idr_for_each(&vds->addr_idr, _match_any, NULL);
+ if (id > 0)
+ chan = idr_find(&vds->addr_idr, id);
+ } else {
+ chan = idr_find(&vds->addr_idr, addr);
+ }
+ if (chan)
+ kref_get(&chan->refcount);
+ mutex_unlock(&vds->lock);
+
+ return chan;
+}
+
+static struct tipc_chan *vds_create_channel(struct tipc_virtio_dev *vds,
+ const struct tipc_chan_ops *ops,
+ void *ops_arg)
+{
+ int ret;
+ struct tipc_chan *chan = NULL;
+
+ if (!vds)
+ return ERR_PTR(-ENOENT);
+
+ if (!ops)
+ return ERR_PTR(-EINVAL);
+
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return ERR_PTR(-ENOMEM);
+
+ kref_get(&vds->refcount);
+ chan->vds = vds;
+ chan->ops = ops;
+ chan->ops_arg = ops_arg;
+ mutex_init(&chan->lock);
+ kref_init(&chan->refcount);
+ chan->state = TIPC_DISCONNECTED;
+
+ ret = vds_add_channel(vds, chan);
+ if (ret) {
+ kfree(chan);
+ kref_put(&vds->refcount, _free_vds);
+ return ERR_PTR(ret);
+ }
+
+ return chan;
+}
+
+static void fill_msg_hdr(struct tipc_msg_buf *mb, u32 src, u32 dst)
+{
+ struct tipc_msg_hdr *hdr = mb_get_data(mb, sizeof(*hdr));
+
+ hdr->src = src;
+ hdr->dst = dst;
+ hdr->len = mb_avail_data(mb);
+ hdr->flags = 0;
+ hdr->reserved = 0;
+}
+
+/*****************************************************************************/
+
+struct tipc_chan *tipc_create_channel(struct device *dev,
+ const struct tipc_chan_ops *ops,
+ void *ops_arg)
+{
+ struct virtio_device *vd;
+ struct tipc_chan *chan;
+ struct tipc_virtio_dev *vds;
+
+ mutex_lock(&tipc_devices_lock);
+ if (dev) {
+ vd = container_of(dev, struct virtio_device, dev);
+ } else {
+ vd = default_vdev;
+ if (!vd) {
+ mutex_unlock(&tipc_devices_lock);
+ return ERR_PTR(-ENOENT);
+ }
+ }
+ vds = vd->priv;
+ kref_get(&vds->refcount);
+ mutex_unlock(&tipc_devices_lock);
+
+ chan = vds_create_channel(vds, ops, ops_arg);
+ kref_put(&vds->refcount, _free_vds);
+ return chan;
+}
+EXPORT_SYMBOL(tipc_create_channel);
+
+struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan)
+{
+ return vds_alloc_msg_buf(chan->vds);
+}
+EXPORT_SYMBOL(tipc_chan_get_rxbuf);
+
+void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb)
+{
+ vds_free_msg_buf(chan->vds, mb);
+}
+EXPORT_SYMBOL(tipc_chan_put_rxbuf);
+
+struct tipc_msg_buf *tipc_chan_get_txbuf_timeout(struct tipc_chan *chan,
+ long timeout)
+{
+ return vds_get_txbuf(chan->vds, timeout);
+}
+EXPORT_SYMBOL(tipc_chan_get_txbuf_timeout);
+
+void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb)
+{
+ vds_put_txbuf(chan->vds, mb);
+}
+EXPORT_SYMBOL(tipc_chan_put_txbuf);
+
+int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb)
+{
+ int err;
+
+ mutex_lock(&chan->lock);
+ switch (chan->state) {
+ case TIPC_CONNECTED:
+ fill_msg_hdr(mb, chan->local, chan->remote);
+ err = vds_queue_txbuf(chan->vds, mb);
+ if (err) {
+ /* this should never happen */
+ pr_err("%s: failed to queue tx buffer (%d)\n",
+ __func__, err);
+ }
+ break;
+ case TIPC_DISCONNECTED:
+ case TIPC_CONNECTING:
+ err = -ENOTCONN;
+ break;
+ case TIPC_STALE:
+ err = -ESHUTDOWN;
+ break;
+ default:
+ err = -EBADFD;
+ pr_err("%s: unexpected channel state %d\n",
+ __func__, chan->state);
+ }
+ mutex_unlock(&chan->lock);
+ return err;
+}
+EXPORT_SYMBOL(tipc_chan_queue_msg);
+
+
+int tipc_chan_connect(struct tipc_chan *chan, const char *name)
+{
+ int err;
+ struct tipc_ctrl_msg *msg;
+ struct tipc_conn_req_body *body;
+ struct tipc_msg_buf *txbuf;
+
+ txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT);
+ if (IS_ERR(txbuf))
+ return PTR_ERR(txbuf);
+
+ /* reserve space for connection request control message */
+ msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body));
+ body = (struct tipc_conn_req_body *)msg->body;
+
+ /* fill message */
+ msg->type = TIPC_CTRL_MSGTYPE_CONN_REQ;
+ msg->body_len = sizeof(*body);
+
+ strncpy(body->name, name, sizeof(body->name));
+ body->name[sizeof(body->name)-1] = '\0';
+
+ mutex_lock(&chan->lock);
+ switch (chan->state) {
+ case TIPC_DISCONNECTED:
+ /* save service name we are connecting to */
+ strcpy(chan->srv_name, body->name);
+
+ fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR);
+ err = vds_queue_txbuf(chan->vds, txbuf);
+ if (err) {
+ /* this should never happen */
+ pr_err("%s: failed to queue tx buffer (%d)\n",
+ __func__, err);
+ } else {
+ chan->state = TIPC_CONNECTING;
+ txbuf = NULL; /* prevents discarding buffer */
+ }
+ break;
+ case TIPC_CONNECTED:
+ case TIPC_CONNECTING:
+ /* check if we are trying to connect to the same service */
+ if (strcmp(chan->srv_name, body->name) == 0)
+ err = 0;
+ else
+ if (chan->state == TIPC_CONNECTING)
+ err = -EALREADY; /* in progress */
+ else
+ err = -EISCONN; /* already connected */
+ break;
+
+ case TIPC_STALE:
+ err = -ESHUTDOWN;
+ break;
+ default:
+ err = -EBADFD;
+ pr_err("%s: unexpected channel state %d\n",
+ __func__, chan->state);
+ break;
+ }
+ mutex_unlock(&chan->lock);
+
+ if (txbuf)
+ tipc_chan_put_txbuf(chan, txbuf); /* discard it */
+
+ return err;
+}
+EXPORT_SYMBOL(tipc_chan_connect);
+
+int tipc_chan_shutdown(struct tipc_chan *chan)
+{
+ int err;
+ struct tipc_ctrl_msg *msg;
+ struct tipc_disc_req_body *body;
+ struct tipc_msg_buf *txbuf = NULL;
+
+ /* get tx buffer */
+ txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT);
+ if (IS_ERR(txbuf))
+ return PTR_ERR(txbuf);
+
+ mutex_lock(&chan->lock);
+ if (chan->state == TIPC_CONNECTED || chan->state == TIPC_CONNECTING) {
+ /* reserve space for disconnect request control message */
+ msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body));
+ body = (struct tipc_disc_req_body *)msg->body;
+
+ msg->type = TIPC_CTRL_MSGTYPE_DISC_REQ;
+ msg->body_len = sizeof(*body);
+ body->target = chan->remote;
+
+ fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR);
+ err = vds_queue_txbuf(chan->vds, txbuf);
+ if (err) {
+ /* this should never happen */
+ pr_err("%s: failed to queue tx buffer (%d)\n",
+ __func__, err);
+ }
+ } else {
+ err = -ENOTCONN;
+ }
+ chan->state = TIPC_STALE;
+ mutex_unlock(&chan->lock);
+
+ if (err) {
+ /* release buffer */
+ tipc_chan_put_txbuf(chan, txbuf);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(tipc_chan_shutdown);
+
+void tipc_chan_destroy(struct tipc_chan *chan)
+{
+ mutex_lock(&chan->lock);
+ if (chan->vds) {
+ vds_del_channel(chan->vds, chan);
+ kref_put(&chan->vds->refcount, _free_vds);
+ chan->vds = NULL;
+ }
+ mutex_unlock(&chan->lock);
+ kref_put(&chan->refcount, _free_chan);
+}
+EXPORT_SYMBOL(tipc_chan_destroy);
+
+/***************************************************************************/
+
+struct tipc_dn_chan {
+ int state;
+ struct mutex lock; /* protects rx_msg_queue list and channel state */
+ struct tipc_chan *chan;
+ wait_queue_head_t readq;
+ struct completion reply_comp;
+ struct list_head rx_msg_queue;
+};
+
+static int dn_wait_for_reply(struct tipc_dn_chan *dn, int timeout)
+{
+ int ret;
+
+ ret = wait_for_completion_interruptible_timeout(&dn->reply_comp,
+ msecs_to_jiffies(timeout));
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&dn->lock);
+ if (!ret) {
+ /* no reply from remote */
+ dn->state = TIPC_STALE;
+ ret = -ETIMEDOUT;
+ } else {
+ /* got reply */
+ if (dn->state == TIPC_CONNECTED)
+ ret = 0;
+ else if (dn->state == TIPC_DISCONNECTED)
+ if (!list_empty(&dn->rx_msg_queue))
+ ret = 0;
+ else
+ ret = -ENOTCONN;
+ else
+ ret = -EIO;
+ }
+ mutex_unlock(&dn->lock);
+
+ return ret;
+}
+
+struct tipc_msg_buf *dn_handle_msg(void *data, struct tipc_msg_buf *rxbuf)
+{
+ struct tipc_dn_chan *dn = data;
+ struct tipc_msg_buf *newbuf = rxbuf;
+
+ mutex_lock(&dn->lock);
+ if (dn->state == TIPC_CONNECTED) {
+ /* get new buffer */
+ newbuf = tipc_chan_get_rxbuf(dn->chan);
+ if (newbuf) {
+ /* queue an old buffer and return a new one */
+ list_add_tail(&rxbuf->node, &dn->rx_msg_queue);
+ wake_up_interruptible(&dn->readq);
+ } else {
+ /*
+ * return an old buffer effectively discarding
+ * incoming message
+ */
+ pr_err("%s: discard incoming message\n", __func__);
+ newbuf = rxbuf;
+ }
+ }
+ mutex_unlock(&dn->lock);
+
+ return newbuf;
+}
+
+static void dn_connected(struct tipc_dn_chan *dn)
+{
+ mutex_lock(&dn->lock);
+ dn->state = TIPC_CONNECTED;
+
+ /* complete all pending */
+ complete(&dn->reply_comp);
+
+ mutex_unlock(&dn->lock);
+}
+
+static void dn_disconnected(struct tipc_dn_chan *dn)
+{
+ mutex_lock(&dn->lock);
+ dn->state = TIPC_DISCONNECTED;
+
+ /* complete all pending */
+ complete(&dn->reply_comp);
+
+ /* wakeup all readers */
+ wake_up_interruptible_all(&dn->readq);
+
+ mutex_unlock(&dn->lock);
+}
+
+static void dn_shutdown(struct tipc_dn_chan *dn)
+{
+ mutex_lock(&dn->lock);
+
+ /* set state to STALE */
+ dn->state = TIPC_STALE;
+
+ /* complete all pending */
+ complete(&dn->reply_comp);
+
+ /* wakeup all readers */
+ wake_up_interruptible_all(&dn->readq);
+
+ mutex_unlock(&dn->lock);
+}
+
+static void dn_handle_event(void *data, int event)
+{
+ struct tipc_dn_chan *dn = data;
+
+ switch (event) {
+ case TIPC_CHANNEL_SHUTDOWN:
+ dn_shutdown(dn);
+ break;
+
+ case TIPC_CHANNEL_DISCONNECTED:
+ dn_disconnected(dn);
+ break;
+
+ case TIPC_CHANNEL_CONNECTED:
+ dn_connected(dn);
+ break;
+
+ default:
+ pr_err("%s: unhandled event %d\n", __func__, event);
+ break;
+ }
+}
+
+static struct tipc_chan_ops _dn_ops = {
+ .handle_msg = dn_handle_msg,
+ .handle_event = dn_handle_event,
+};
+
+#define cdev_to_cdn(c) container_of((c), struct tipc_cdev_node, cdev)
+#define cdn_to_vds(cdn) container_of((cdn), struct tipc_virtio_dev, cdev_node)
+
+static struct tipc_virtio_dev *_dn_lookup_vds(struct tipc_cdev_node *cdn)
+{
+ int ret;
+ struct tipc_virtio_dev *vds = NULL;
+
+ mutex_lock(&tipc_devices_lock);
+ ret = idr_for_each(&tipc_devices, _match_data, cdn);
+ if (ret) {
+ vds = cdn_to_vds(cdn);
+ kref_get(&vds->refcount);
+ }
+ mutex_unlock(&tipc_devices_lock);
+ return vds;
+}
+
+static int tipc_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+ struct tipc_virtio_dev *vds;
+ struct tipc_dn_chan *dn;
+ struct tipc_cdev_node *cdn = cdev_to_cdn(inode->i_cdev);
+
+ vds = _dn_lookup_vds(cdn);
+ if (!vds) {
+ ret = -ENOENT;
+ goto err_vds_lookup;
+ }
+
+ dn = kzalloc(sizeof(*dn), GFP_KERNEL);
+ if (!dn) {
+ ret = -ENOMEM;
+ goto err_alloc_chan;
+ }
+
+ mutex_init(&dn->lock);
+ init_waitqueue_head(&dn->readq);
+ init_completion(&dn->reply_comp);
+ INIT_LIST_HEAD(&dn->rx_msg_queue);
+
+ dn->state = TIPC_DISCONNECTED;
+
+ dn->chan = vds_create_channel(vds, &_dn_ops, dn);
+ if (IS_ERR(dn->chan)) {
+ ret = PTR_ERR(dn->chan);
+ goto err_create_chan;
+ }
+
+ filp->private_data = dn;
+ kref_put(&vds->refcount, _free_vds);
+ return 0;
+
+err_create_chan:
+ kfree(dn);
+err_alloc_chan:
+ kref_put(&vds->refcount, _free_vds);
+err_vds_lookup:
+ return ret;
+}
+
+
+static int dn_connect_ioctl(struct tipc_dn_chan *dn, char __user *usr_name)
+{
+ int err;
+ char name[MAX_SRV_NAME_LEN];
+
+ /* copy in service name from user space */
+ err = strncpy_from_user(name, usr_name, sizeof(name));
+ if (err < 0) {
+ pr_err("%s: copy_from_user (%p) failed (%d)\n",
+ __func__, usr_name, err);
+ return err;
+ }
+ name[sizeof(name)-1] = '\0';
+
+ /* send connect request */
+ err = tipc_chan_connect(dn->chan, name);
+ if (err)
+ return err;
+
+ /* and wait for reply */
+ return dn_wait_for_reply(dn, REPLY_TIMEOUT);
+}
+
+static long tipc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct tipc_dn_chan *dn = filp->private_data;
+
+ if (_IOC_TYPE(cmd) != TIPC_IOC_MAGIC)
+ return -EINVAL;
+
+ switch (cmd) {
+ case TIPC_IOC_CONNECT:
+ ret = dn_connect_ioctl(dn, (char __user *)arg);
+ break;
+ default:
+ pr_warn("%s: Unhandled ioctl cmd: 0x%x\n",
+ __func__, cmd);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+#if defined(CONFIG_COMPAT)
+static long tipc_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct tipc_dn_chan *dn = filp->private_data;
+ void __user *user_req = compat_ptr(arg);
+
+ if (_IOC_TYPE(cmd) != TIPC_IOC_MAGIC)
+ return -EINVAL;
+
+ switch (cmd) {
+ case TIPC_IOC_CONNECT_COMPAT:
+ ret = dn_connect_ioctl(dn, user_req);
+ break;
+ default:
+ pr_warn("%s: Unhandled ioctl cmd: 0x%x\n",
+ __func__, cmd);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+#endif
+
+static inline bool _got_rx(struct tipc_dn_chan *dn)
+{
+ if (dn->state != TIPC_CONNECTED)
+ return true;
+
+ if (!list_empty(&dn->rx_msg_queue))
+ return true;
+
+ return false;
+}
+
+static ssize_t tipc_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ ssize_t ret;
+ size_t len;
+ struct tipc_msg_buf *mb;
+ struct file *filp = iocb->ki_filp;
+ struct tipc_dn_chan *dn = filp->private_data;
+
+ mutex_lock(&dn->lock);
+
+ while (list_empty(&dn->rx_msg_queue)) {
+ if (dn->state != TIPC_CONNECTED) {
+ if (dn->state == TIPC_CONNECTING)
+ ret = -ENOTCONN;
+ else if (dn->state == TIPC_DISCONNECTED)
+ ret = -ENOTCONN;
+ else if (dn->state == TIPC_STALE)
+ ret = -ESHUTDOWN;
+ else
+ ret = -EBADFD;
+ goto out;
+ }
+
+ mutex_unlock(&dn->lock);
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(dn->readq, _got_rx(dn)))
+ return -ERESTARTSYS;
+
+ mutex_lock(&dn->lock);
+ }
+
+ mb = list_first_entry(&dn->rx_msg_queue, struct tipc_msg_buf, node);
+
+ len = mb_avail_data(mb);
+ if (len > iov_iter_count(iter)) {
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
+ if (copy_to_iter(mb_get_data(mb, len), len, iter) != len) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = len;
+ list_del(&mb->node);
+ tipc_chan_put_rxbuf(dn->chan, mb);
+
+out:
+ mutex_unlock(&dn->lock);
+ return ret;
+}
+
+static ssize_t tipc_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ ssize_t ret;
+ size_t len;
+ long timeout = TXBUF_TIMEOUT;
+ struct tipc_msg_buf *txbuf = NULL;
+ struct file *filp = iocb->ki_filp;
+ struct tipc_dn_chan *dn = filp->private_data;
+
+ if (filp->f_flags & O_NONBLOCK)
+ timeout = 0;
+
+ txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout);
+ if (IS_ERR(txbuf))
+ return PTR_ERR(txbuf);
+
+ /* message length */
+ len = iov_iter_count(iter);
+
+ /* check available space */
+ if (len > mb_avail_space(txbuf)) {
+ ret = -EMSGSIZE;
+ goto err_out;
+ }
+
+ /* copy in message data */
+ if (copy_from_iter(mb_put_data(txbuf, len), len, iter) != len) {
+ ret = -EFAULT;
+ goto err_out;
+ }
+
+ /* queue message */
+ ret = tipc_chan_queue_msg(dn->chan, txbuf);
+ if (ret)
+ goto err_out;
+
+ return len;
+
+err_out:
+ tipc_chan_put_txbuf(dn->chan, txbuf);
+ return ret;
+}
+
+static unsigned int tipc_poll(struct file *filp, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct tipc_dn_chan *dn = filp->private_data;
+
+ mutex_lock(&dn->lock);
+
+ poll_wait(filp, &dn->readq, wait);
+
+ /* Writes always succeed for now */
+ mask |= POLLOUT | POLLWRNORM;
+
+ if (!list_empty(&dn->rx_msg_queue))
+ mask |= POLLIN | POLLRDNORM;
+
+ if (dn->state != TIPC_CONNECTED)
+ mask |= POLLERR;
+
+ mutex_unlock(&dn->lock);
+ return mask;
+}
+
+
+static int tipc_release(struct inode *inode, struct file *filp)
+{
+ struct tipc_dn_chan *dn = filp->private_data;
+
+ dn_shutdown(dn);
+
+ /* free all pending buffers */
+ _free_msg_buf_list(&dn->rx_msg_queue);
+
+ /* shutdown channel */
+ tipc_chan_shutdown(dn->chan);
+
+ /* and destroy it */
+ tipc_chan_destroy(dn->chan);
+
+ kfree(dn);
+
+ return 0;
+}
+
+static const struct file_operations tipc_fops = {
+ .open = tipc_open,
+ .release = tipc_release,
+ .unlocked_ioctl = tipc_ioctl,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = tipc_compat_ioctl,
+#endif
+ .read_iter = tipc_read_iter,
+ .write_iter = tipc_write_iter,
+ .poll = tipc_poll,
+ .owner = THIS_MODULE,
+};
+
+/*****************************************************************************/
+
+static void chan_trigger_event(struct tipc_chan *chan, int event)
+{
+ if (!event)
+ return;
+
+ chan->ops->handle_event(chan->ops_arg, event);
+}
+
+static void _cleanup_vq(struct virtqueue *vq)
+{
+ struct tipc_msg_buf *mb;
+
+ while ((mb = virtqueue_detach_unused_buf(vq)) != NULL)
+ _free_msg_buf(mb);
+}
+
+static int _create_cdev_node(struct device *parent,
+ struct tipc_cdev_node *cdn,
+ const char *name)
+{
+ int ret;
+ dev_t devt;
+
+ if (!name) {
+ dev_dbg(parent, "%s: cdev name has to be provided\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* allocate minor */
+ ret = idr_alloc(&tipc_devices, cdn, 0, MAX_DEVICES-1, GFP_KERNEL);
+ if (ret < 0) {
+ dev_dbg(parent, "%s: failed (%d) to get id\n",
+ __func__, ret);
+ return ret;
+ }
+
+ cdn->minor = ret;
+ cdev_init(&cdn->cdev, &tipc_fops);
+ cdn->cdev.owner = THIS_MODULE;
+
+ /* Add character device */
+ devt = MKDEV(tipc_major, cdn->minor);
+ ret = cdev_add(&cdn->cdev, devt, 1);
+ if (ret) {
+ dev_dbg(parent, "%s: cdev_add failed (%d)\n",
+ __func__, ret);
+ goto err_add_cdev;
+ }
+
+ /* Create a device node */
+ cdn->dev = device_create(tipc_class, parent,
+ devt, NULL, "trusty-ipc-%s", name);
+ if (IS_ERR(cdn->dev)) {
+ ret = PTR_ERR(cdn->dev);
+ dev_dbg(parent, "%s: device_create failed: %d\n",
+ __func__, ret);
+ goto err_device_create;
+ }
+
+ return 0;
+
+err_device_create:
+ cdn->dev = NULL;
+ cdev_del(&cdn->cdev);
+err_add_cdev:
+ idr_remove(&tipc_devices, cdn->minor);
+ return ret;
+}
+
+static void create_cdev_node(struct tipc_virtio_dev *vds,
+ struct tipc_cdev_node *cdn)
+{
+ int err;
+
+ mutex_lock(&tipc_devices_lock);
+
+ if (!default_vdev) {
+ kref_get(&vds->refcount);
+ default_vdev = vds->vdev;
+ }
+
+ if (vds->cdev_name[0] && !cdn->dev) {
+ kref_get(&vds->refcount);
+ err = _create_cdev_node(&vds->vdev->dev, cdn, vds->cdev_name);
+ if (err) {
+ dev_err(&vds->vdev->dev,
+ "failed (%d) to create cdev node\n", err);
+ kref_put(&vds->refcount, _free_vds);
+ }
+ }
+ mutex_unlock(&tipc_devices_lock);
+}
+
+static void destroy_cdev_node(struct tipc_virtio_dev *vds,
+ struct tipc_cdev_node *cdn)
+{
+ mutex_lock(&tipc_devices_lock);
+ if (cdn->dev) {
+ device_destroy(tipc_class, MKDEV(tipc_major, cdn->minor));
+ cdev_del(&cdn->cdev);
+ idr_remove(&tipc_devices, cdn->minor);
+ cdn->dev = NULL;
+ kref_put(&vds->refcount, _free_vds);
+ }
+
+ if (default_vdev == vds->vdev) {
+ default_vdev = NULL;
+ kref_put(&vds->refcount, _free_vds);
+ }
+
+ mutex_unlock(&tipc_devices_lock);
+}
+
+static void _go_online(struct tipc_virtio_dev *vds)
+{
+ mutex_lock(&vds->lock);
+ if (vds->state == VDS_OFFLINE)
+ vds->state = VDS_ONLINE;
+ mutex_unlock(&vds->lock);
+
+ create_cdev_node(vds, &vds->cdev_node);
+
+ dev_info(&vds->vdev->dev, "is online\n");
+}
+
+static void _go_offline(struct tipc_virtio_dev *vds)
+{
+ struct tipc_chan *chan;
+
+ /* change state to OFFLINE */
+ mutex_lock(&vds->lock);
+ if (vds->state != VDS_ONLINE) {
+ mutex_unlock(&vds->lock);
+ return;
+ }
+ vds->state = VDS_OFFLINE;
+ mutex_unlock(&vds->lock);
+
+ /* wakeup all waiters */
+ wake_up_interruptible_all(&vds->sendq);
+
+ /* shutdown all channels */
+ while ((chan = vds_lookup_channel(vds, TIPC_ANY_ADDR))) {
+ mutex_lock(&chan->lock);
+ chan->state = TIPC_STALE;
+ chan->remote = 0;
+ chan_trigger_event(chan, TIPC_CHANNEL_SHUTDOWN);
+ mutex_unlock(&chan->lock);
+ kref_put(&chan->refcount, _free_chan);
+ }
+
+ /* shutdown device node */
+ destroy_cdev_node(vds, &vds->cdev_node);
+
+ dev_info(&vds->vdev->dev, "is offline\n");
+}
+
+static void _handle_conn_rsp(struct tipc_virtio_dev *vds,
+ struct tipc_conn_rsp_body *rsp, size_t len)
+{
+ struct tipc_chan *chan;
+
+ if (sizeof(*rsp) != len) {
+ dev_err(&vds->vdev->dev, "%s: Invalid response length %zd\n",
+ __func__, len);
+ return;
+ }
+
+ dev_dbg(&vds->vdev->dev,
+ "%s: connection response: for addr 0x%x: "
+ "status %d remote addr 0x%x\n",
+ __func__, rsp->target, rsp->status, rsp->remote);
+
+ /* Lookup channel */
+ chan = vds_lookup_channel(vds, rsp->target);
+ if (chan) {
+ mutex_lock(&chan->lock);
+ if (chan->state == TIPC_CONNECTING) {
+ if (!rsp->status) {
+ chan->state = TIPC_CONNECTED;
+ chan->remote = rsp->remote;
+ chan->max_msg_cnt = rsp->max_msg_cnt;
+ chan->max_msg_size = rsp->max_msg_size;
+ chan_trigger_event(chan,
+ TIPC_CHANNEL_CONNECTED);
+ } else {
+ chan->state = TIPC_DISCONNECTED;
+ chan->remote = 0;
+ chan_trigger_event(chan,
+ TIPC_CHANNEL_DISCONNECTED);
+ }
+ }
+ mutex_unlock(&chan->lock);
+ kref_put(&chan->refcount, _free_chan);
+ }
+}
+
+static void _handle_disc_req(struct tipc_virtio_dev *vds,
+ struct tipc_disc_req_body *req, size_t len)
+{
+ struct tipc_chan *chan;
+
+ if (sizeof(*req) != len) {
+ dev_err(&vds->vdev->dev, "%s: Invalid request length %zd\n",
+ __func__, len);
+ return;
+ }
+
+ dev_dbg(&vds->vdev->dev, "%s: disconnect request: for addr 0x%x\n",
+ __func__, req->target);
+
+ chan = vds_lookup_channel(vds, req->target);
+ if (chan) {
+ mutex_lock(&chan->lock);
+ if (chan->state == TIPC_CONNECTED ||
+ chan->state == TIPC_CONNECTING) {
+ chan->state = TIPC_DISCONNECTED;
+ chan->remote = 0;
+ chan_trigger_event(chan, TIPC_CHANNEL_DISCONNECTED);
+ }
+ mutex_unlock(&chan->lock);
+ kref_put(&chan->refcount, _free_chan);
+ }
+}
+
+static void _handle_ctrl_msg(struct tipc_virtio_dev *vds,
+ void *data, int len, u32 src)
+{
+ struct tipc_ctrl_msg *msg = data;
+
+ if ((len < sizeof(*msg)) || (sizeof(*msg) + msg->body_len != len)) {
+ dev_err(&vds->vdev->dev,
+ "%s: Invalid message length ( %d vs. %d)\n",
+ __func__, (int)(sizeof(*msg) + msg->body_len), len);
+ return;
+ }
+
+ dev_dbg(&vds->vdev->dev,
+ "%s: Incoming ctrl message: src 0x%x type %d len %d\n",
+ __func__, src, msg->type, msg->body_len);
+
+ switch (msg->type) {
+ case TIPC_CTRL_MSGTYPE_GO_ONLINE:
+ _go_online(vds);
+ break;
+
+ case TIPC_CTRL_MSGTYPE_GO_OFFLINE:
+ _go_offline(vds);
+ break;
+
+ case TIPC_CTRL_MSGTYPE_CONN_RSP:
+ _handle_conn_rsp(vds, (struct tipc_conn_rsp_body *)msg->body,
+ msg->body_len);
+ break;
+
+ case TIPC_CTRL_MSGTYPE_DISC_REQ:
+ _handle_disc_req(vds, (struct tipc_disc_req_body *)msg->body,
+ msg->body_len);
+ break;
+
+ default:
+ dev_warn(&vds->vdev->dev,
+ "%s: Unexpected message type: %d\n",
+ __func__, msg->type);
+ }
+}
+
+static int _handle_rxbuf(struct tipc_virtio_dev *vds,
+ struct tipc_msg_buf *rxbuf, size_t rxlen)
+{
+ int err;
+ struct scatterlist sg;
+ struct tipc_msg_hdr *msg;
+ struct device *dev = &vds->vdev->dev;
+
+ /* message sanity check */
+ if (rxlen > rxbuf->buf_sz) {
+ dev_warn(dev, "inbound msg is too big: %zd\n", rxlen);
+ goto drop_it;
+ }
+
+ if (rxlen < sizeof(*msg)) {
+ dev_warn(dev, "inbound msg is too short: %zd\n", rxlen);
+ goto drop_it;
+ }
+
+ /* reset buffer and put data */
+ mb_reset(rxbuf);
+ mb_put_data(rxbuf, rxlen);
+
+ /* get message header */
+ msg = mb_get_data(rxbuf, sizeof(*msg));
+ if (mb_avail_data(rxbuf) != msg->len) {
+ dev_warn(dev, "inbound msg length mismatch: (%d vs. %d)\n",
+ (uint) mb_avail_data(rxbuf), (uint)msg->len);
+ goto drop_it;
+ }
+
+ dev_dbg(dev, "From: %d, To: %d, Len: %d, Flags: 0x%x, Reserved: %d\n",
+ msg->src, msg->dst, msg->len, msg->flags, msg->reserved);
+
+ /* message directed to control endpoint is a special case */
+ if (msg->dst == TIPC_CTRL_ADDR) {
+ _handle_ctrl_msg(vds, msg->data, msg->len, msg->src);
+ } else {
+ struct tipc_chan *chan = NULL;
+ /* Lookup channel */
+ chan = vds_lookup_channel(vds, msg->dst);
+ if (chan) {
+ /* handle it */
+ rxbuf = chan->ops->handle_msg(chan->ops_arg, rxbuf);
+ BUG_ON(!rxbuf);
+ kref_put(&chan->refcount, _free_chan);
+ }
+ }
+
+drop_it:
+ /* add the buffer back to the virtqueue */
+ sg_init_one(&sg, rxbuf->buf_va, rxbuf->buf_sz);
+ err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL);
+ if (err < 0) {
+ dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void _rxvq_cb(struct virtqueue *rxvq)
+{
+ unsigned int len;
+ struct tipc_msg_buf *mb;
+ unsigned int msg_cnt = 0;
+ struct tipc_virtio_dev *vds = rxvq->vdev->priv;
+
+ while ((mb = virtqueue_get_buf(rxvq, &len)) != NULL) {
+ if (_handle_rxbuf(vds, mb, len))
+ break;
+ msg_cnt++;
+ }
+
+ /* tell the other size that we added rx buffers */
+ if (msg_cnt)
+ virtqueue_kick(rxvq);
+}
+
+static void _txvq_cb(struct virtqueue *txvq)
+{
+ unsigned int len;
+ struct tipc_msg_buf *mb;
+ bool need_wakeup = false;
+ struct tipc_virtio_dev *vds = txvq->vdev->priv;
+
+ dev_dbg(&txvq->vdev->dev, "%s\n", __func__);
+
+ /* detach all buffers */
+ mutex_lock(&vds->lock);
+ while ((mb = virtqueue_get_buf(txvq, &len)) != NULL)
+ need_wakeup |= _put_txbuf_locked(vds, mb);
+ mutex_unlock(&vds->lock);
+
+ if (need_wakeup) {
+ /* wake up potential senders waiting for a tx buffer */
+ wake_up_interruptible_all(&vds->sendq);
+ }
+}
+
+static int tipc_virtio_probe(struct virtio_device *vdev)
+{
+ int err, i;
+ struct tipc_virtio_dev *vds;
+ struct tipc_dev_config config;
+ struct virtqueue *vqs[2];
+ vq_callback_t *vq_cbs[] = {_rxvq_cb, _txvq_cb};
+ const char *vq_names[] = { "rx", "tx" };
+
+ dev_dbg(&vdev->dev, "%s:\n", __func__);
+
+ vds = kzalloc(sizeof(*vds), GFP_KERNEL);
+ if (!vds)
+ return -ENOMEM;
+
+ vds->vdev = vdev;
+
+ mutex_init(&vds->lock);
+ kref_init(&vds->refcount);
+ init_waitqueue_head(&vds->sendq);
+ INIT_LIST_HEAD(&vds->free_buf_list);
+ idr_init(&vds->addr_idr);
+
+ /* set default max message size and alignment */
+ memset(&config, 0, sizeof(config));
+ config.msg_buf_max_size = DEFAULT_MSG_BUF_SIZE;
+ config.msg_buf_alignment = DEFAULT_MSG_BUF_ALIGN;
+
+ /* get configuration if present */
+ vdev->config->get(vdev, 0, &config, sizeof(config));
+
+ /* copy dev name */
+ strncpy(vds->cdev_name, config.dev_name, sizeof(vds->cdev_name));
+ vds->cdev_name[sizeof(vds->cdev_name)-1] = '\0';
+
+ /* find tx virtqueues (rx and tx and in this order) */
+ err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names);
+ if (err)
+ goto err_find_vqs;
+
+ vds->rxvq = vqs[0];
+ vds->txvq = vqs[1];
+
+ /* save max buffer size and count */
+ vds->msg_buf_max_sz = config.msg_buf_max_size;
+ vds->msg_buf_max_cnt = virtqueue_get_vring_size(vds->txvq);
+
+ /* set up the receive buffers */
+ for (i = 0; i < virtqueue_get_vring_size(vds->rxvq); i++) {
+ struct scatterlist sg;
+ struct tipc_msg_buf *rxbuf;
+
+ rxbuf = _alloc_msg_buf(vds->msg_buf_max_sz);
+ if (!rxbuf) {
+ dev_err(&vdev->dev, "failed to allocate rx buffer\n");
+ err = -ENOMEM;
+ goto err_free_rx_buffers;
+ }
+
+ sg_init_one(&sg, rxbuf->buf_va, rxbuf->buf_sz);
+ err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL);
+ WARN_ON(err); /* sanity check; this can't really happen */
+ }
+
+ vdev->priv = vds;
+ vds->state = VDS_OFFLINE;
+
+ dev_dbg(&vdev->dev, "%s: done\n", __func__);
+ return 0;
+
+err_free_rx_buffers:
+ _cleanup_vq(vds->rxvq);
+err_find_vqs:
+ kref_put(&vds->refcount, _free_vds);
+ return err;
+}
+
+static void tipc_virtio_remove(struct virtio_device *vdev)
+{
+ struct tipc_virtio_dev *vds = vdev->priv;
+
+ _go_offline(vds);
+
+ mutex_lock(&vds->lock);
+ vds->state = VDS_DEAD;
+ vds->vdev = NULL;
+ mutex_unlock(&vds->lock);
+
+ vdev->config->reset(vdev);
+
+ idr_destroy(&vds->addr_idr);
+
+ _cleanup_vq(vds->rxvq);
+ _cleanup_vq(vds->txvq);
+ _free_msg_buf_list(&vds->free_buf_list);
+
+ vdev->config->del_vqs(vds->vdev);
+
+ kref_put(&vds->refcount, _free_vds);
+}
+
+static struct virtio_device_id tipc_virtio_id_table[] = {
+ { VIRTIO_ID_TRUSTY_IPC, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static unsigned int features[] = {
+ 0,
+};
+
+static struct virtio_driver virtio_tipc_driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = tipc_virtio_id_table,
+ .probe = tipc_virtio_probe,
+ .remove = tipc_virtio_remove,
+};
+
+static int __init tipc_init(void)
+{
+ int ret;
+ dev_t dev;
+
+ ret = alloc_chrdev_region(&dev, 0, MAX_DEVICES, KBUILD_MODNAME);
+ if (ret) {
+ pr_err("%s: alloc_chrdev_region failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ tipc_major = MAJOR(dev);
+ tipc_class = class_create(THIS_MODULE, KBUILD_MODNAME);
+ if (IS_ERR(tipc_class)) {
+ ret = PTR_ERR(tipc_class);
+ pr_err("%s: class_create failed: %d\n", __func__, ret);
+ goto err_class_create;
+ }
+
+ ret = register_virtio_driver(&virtio_tipc_driver);
+ if (ret) {
+ pr_err("failed to register virtio driver: %d\n", ret);
+ goto err_register_virtio_drv;
+ }
+
+ return 0;
+
+err_register_virtio_drv:
+ class_destroy(tipc_class);
+
+err_class_create:
+ unregister_chrdev_region(dev, MAX_DEVICES);
+ return ret;
+}
+
+static void __exit tipc_exit(void)
+{
+ unregister_virtio_driver(&virtio_tipc_driver);
+ class_destroy(tipc_class);
+ unregister_chrdev_region(MKDEV(tipc_major, 0), MAX_DEVICES);
+}
+
+/* We need to init this early */
+subsys_initcall(tipc_init);
+module_exit(tipc_exit);
+
+MODULE_DEVICE_TABLE(tipc, tipc_virtio_id_table);
+MODULE_DESCRIPTION("Trusty IPC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c
new file mode 100644
index 000000000000..1a8215849010
--- /dev/null
+++ b/drivers/trusty/trusty-irq.c
@@ -0,0 +1,599 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/trusty/smcall.h>
+#include <linux/trusty/sm_err.h>
+#include <linux/trusty/trusty.h>
+
+struct trusty_irq {
+ struct trusty_irq_state *is;
+ struct hlist_node node;
+ unsigned int irq;
+ bool percpu;
+ bool enable;
+ struct trusty_irq __percpu *percpu_ptr;
+};
+
+struct trusty_irq_irqset {
+ struct hlist_head pending;
+ struct hlist_head inactive;
+};
+
+struct trusty_irq_state {
+ struct device *dev;
+ struct device *trusty_dev;
+ struct trusty_irq_irqset normal_irqs;
+ spinlock_t normal_irqs_lock;
+ struct trusty_irq_irqset __percpu *percpu_irqs;
+ struct notifier_block trusty_call_notifier;
+ struct hlist_node cpuhp_node;
+};
+
+static int trusty_irq_cpuhp_slot = -1;
+
+static void trusty_irq_enable_pending_irqs(struct trusty_irq_state *is,
+ struct trusty_irq_irqset *irqset,
+ bool percpu)
+{
+ struct hlist_node *n;
+ struct trusty_irq *trusty_irq;
+
+ hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) {
+ dev_dbg(is->dev,
+ "%s: enable pending irq %d, percpu %d, cpu %d\n",
+ __func__, trusty_irq->irq, percpu, smp_processor_id());
+ if (percpu)
+ enable_percpu_irq(trusty_irq->irq, 0);
+ else
+ enable_irq(trusty_irq->irq);
+ hlist_del(&trusty_irq->node);
+ hlist_add_head(&trusty_irq->node, &irqset->inactive);
+ }
+}
+
+static void trusty_irq_enable_irqset(struct trusty_irq_state *is,
+ struct trusty_irq_irqset *irqset)
+{
+ struct trusty_irq *trusty_irq;
+
+ hlist_for_each_entry(trusty_irq, &irqset->inactive, node) {
+ if (trusty_irq->enable) {
+ dev_warn(is->dev,
+ "%s: percpu irq %d already enabled, cpu %d\n",
+ __func__, trusty_irq->irq, smp_processor_id());
+ continue;
+ }
+ dev_dbg(is->dev, "%s: enable percpu irq %d, cpu %d\n",
+ __func__, trusty_irq->irq, smp_processor_id());
+ enable_percpu_irq(trusty_irq->irq, 0);
+ trusty_irq->enable = true;
+ }
+}
+
+static void trusty_irq_disable_irqset(struct trusty_irq_state *is,
+ struct trusty_irq_irqset *irqset)
+{
+ struct hlist_node *n;
+ struct trusty_irq *trusty_irq;
+
+ hlist_for_each_entry(trusty_irq, &irqset->inactive, node) {
+ if (!trusty_irq->enable) {
+ dev_warn(is->dev,
+ "irq %d already disabled, percpu %d, cpu %d\n",
+ trusty_irq->irq, trusty_irq->percpu,
+ smp_processor_id());
+ continue;
+ }
+ dev_dbg(is->dev, "%s: disable irq %d, percpu %d, cpu %d\n",
+ __func__, trusty_irq->irq, trusty_irq->percpu,
+ smp_processor_id());
+ trusty_irq->enable = false;
+ if (trusty_irq->percpu)
+ disable_percpu_irq(trusty_irq->irq);
+ else
+ disable_irq_nosync(trusty_irq->irq);
+ }
+ hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) {
+ if (!trusty_irq->enable) {
+ dev_warn(is->dev,
+ "pending irq %d already disabled, percpu %d, cpu %d\n",
+ trusty_irq->irq, trusty_irq->percpu,
+ smp_processor_id());
+ }
+ dev_dbg(is->dev,
+ "%s: disable pending irq %d, percpu %d, cpu %d\n",
+ __func__, trusty_irq->irq, trusty_irq->percpu,
+ smp_processor_id());
+ trusty_irq->enable = false;
+ hlist_del(&trusty_irq->node);
+ hlist_add_head(&trusty_irq->node, &irqset->inactive);
+ }
+}
+
+static int trusty_irq_call_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct trusty_irq_state *is;
+
+ BUG_ON(!irqs_disabled());
+
+ if (action != TRUSTY_CALL_PREPARE)
+ return NOTIFY_DONE;
+
+ is = container_of(nb, struct trusty_irq_state, trusty_call_notifier);
+
+ spin_lock(&is->normal_irqs_lock);
+ trusty_irq_enable_pending_irqs(is, &is->normal_irqs, false);
+ spin_unlock(&is->normal_irqs_lock);
+ trusty_irq_enable_pending_irqs(is, this_cpu_ptr(is->percpu_irqs), true);
+
+ return NOTIFY_OK;
+}
+
+irqreturn_t trusty_irq_handler(int irq, void *data)
+{
+ struct trusty_irq *trusty_irq = data;
+ struct trusty_irq_state *is = trusty_irq->is;
+ struct trusty_irq_irqset *irqset;
+
+ dev_dbg(is->dev, "%s: irq %d, percpu %d, cpu %d, enable %d\n",
+ __func__, irq, trusty_irq->irq, smp_processor_id(),
+ trusty_irq->enable);
+
+ if (trusty_irq->percpu) {
+ disable_percpu_irq(irq);
+ irqset = this_cpu_ptr(is->percpu_irqs);
+ } else {
+ disable_irq_nosync(irq);
+ irqset = &is->normal_irqs;
+ }
+
+ spin_lock(&is->normal_irqs_lock);
+ if (trusty_irq->enable) {
+ hlist_del(&trusty_irq->node);
+ hlist_add_head(&trusty_irq->node, &irqset->pending);
+ }
+ spin_unlock(&is->normal_irqs_lock);
+
+ trusty_enqueue_nop(is->trusty_dev, NULL);
+
+ dev_dbg(is->dev, "%s: irq %d done\n", __func__, irq);
+
+ return IRQ_HANDLED;
+}
+
+static int trusty_irq_cpu_up(unsigned int cpu, struct hlist_node *node)
+{
+ unsigned long irq_flags;
+ struct trusty_irq_state *is;
+
+ is = container_of(node, struct trusty_irq_state, cpuhp_node);
+
+ dev_dbg(is->dev, "%s: cpu %d\n", __func__, cpu);
+
+ local_irq_save(irq_flags);
+ trusty_irq_enable_irqset(is, this_cpu_ptr(is->percpu_irqs));
+ local_irq_restore(irq_flags);
+
+ return 0;
+}
+
+static int trusty_irq_cpu_down(unsigned int cpu, struct hlist_node *node)
+{
+ unsigned long irq_flags;
+ struct trusty_irq_state *is;
+
+ is = container_of(node, struct trusty_irq_state, cpuhp_node);
+
+ dev_dbg(is->dev, "%s: cpu %d\n", __func__, cpu);
+
+ local_irq_save(irq_flags);
+ trusty_irq_disable_irqset(is, this_cpu_ptr(is->percpu_irqs));
+ local_irq_restore(irq_flags);
+
+ return 0;
+}
+
+static int trusty_irq_create_irq_mapping(struct trusty_irq_state *is, int irq)
+{
+ int ret;
+ int index;
+ u32 irq_pos;
+ u32 templ_idx;
+ u32 range_base;
+ u32 range_end;
+ struct of_phandle_args oirq;
+
+ /* check if "interrupt-ranges" property is present */
+ if (!of_find_property(is->dev->of_node, "interrupt-ranges", NULL)) {
+ /* fallback to old behavior to be backward compatible with
+ * systems that do not need IRQ domains.
+ */
+ return irq;
+ }
+
+ /* find irq range */
+ for (index = 0;; index += 3) {
+ ret = of_property_read_u32_index(is->dev->of_node,
+ "interrupt-ranges",
+ index, &range_base);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(is->dev->of_node,
+ "interrupt-ranges",
+ index + 1, &range_end);
+ if (ret)
+ return ret;
+
+ if (irq >= range_base && irq <= range_end)
+ break;
+ }
+
+ /* read the rest of range entry: template index and irq_pos */
+ ret = of_property_read_u32_index(is->dev->of_node,
+ "interrupt-ranges",
+ index + 2, &templ_idx);
+ if (ret)
+ return ret;
+
+ /* read irq template */
+ ret = of_parse_phandle_with_args(is->dev->of_node,
+ "interrupt-templates",
+ "#interrupt-cells",
+ templ_idx, &oirq);
+ if (ret)
+ return ret;
+
+ WARN_ON(!oirq.np);
+ WARN_ON(!oirq.args_count);
+
+ /*
+ * An IRQ template is a non empty array of u32 values describing group
+ * of interrupts having common properties. The u32 entry with index
+ * zero contains the position of irq_id in interrupt specifier array
+ * followed by data representing interrupt specifier array with irq id
+ * field omitted, so to convert irq template to interrupt specifier
+ * array we have to move down one slot the first irq_pos entries and
+ * replace the resulting gap with real irq id.
+ */
+ irq_pos = oirq.args[0];
+
+ if (irq_pos >= oirq.args_count) {
+ dev_err(is->dev, "irq pos is out of range: %d\n", irq_pos);
+ return -EINVAL;
+ }
+
+ for (index = 1; index <= irq_pos; index++)
+ oirq.args[index - 1] = oirq.args[index];
+
+ oirq.args[irq_pos] = irq - range_base;
+
+ ret = irq_create_of_mapping(&oirq);
+
+ return (!ret) ? -EINVAL : ret;
+}
+
+static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int tirq)
+{
+ int ret;
+ int irq;
+ unsigned long irq_flags;
+ struct trusty_irq *trusty_irq;
+
+ dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq);
+
+ irq = trusty_irq_create_irq_mapping(is, tirq);
+ if (irq < 0) {
+ dev_err(is->dev,
+ "trusty_irq_create_irq_mapping failed (%d)\n", irq);
+ return irq;
+ }
+
+ trusty_irq = kzalloc(sizeof(*trusty_irq), GFP_KERNEL);
+ if (!trusty_irq)
+ return -ENOMEM;
+
+ trusty_irq->is = is;
+ trusty_irq->irq = irq;
+ trusty_irq->enable = true;
+
+ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
+ hlist_add_head(&trusty_irq->node, &is->normal_irqs.inactive);
+ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
+
+ ret = request_irq(irq, trusty_irq_handler, IRQF_NO_THREAD,
+ "trusty", trusty_irq);
+ if (ret) {
+ dev_err(is->dev, "request_irq failed %d\n", ret);
+ goto err_request_irq;
+ }
+ return 0;
+
+err_request_irq:
+ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
+ hlist_del(&trusty_irq->node);
+ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
+ kfree(trusty_irq);
+ return ret;
+}
+
+static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int tirq)
+{
+ int ret;
+ int irq;
+ unsigned int cpu;
+ struct trusty_irq __percpu *trusty_irq_handler_data;
+
+ dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq);
+
+ irq = trusty_irq_create_irq_mapping(is, tirq);
+ if (irq <= 0) {
+ dev_err(is->dev,
+ "trusty_irq_create_irq_mapping failed (%d)\n", irq);
+ return irq;
+ }
+
+ trusty_irq_handler_data = alloc_percpu(struct trusty_irq);
+ if (!trusty_irq_handler_data)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ struct trusty_irq *trusty_irq;
+ struct trusty_irq_irqset *irqset;
+
+ trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu);
+ irqset = per_cpu_ptr(is->percpu_irqs, cpu);
+
+ trusty_irq->is = is;
+ hlist_add_head(&trusty_irq->node, &irqset->inactive);
+ trusty_irq->irq = irq;
+ trusty_irq->percpu = true;
+ trusty_irq->percpu_ptr = trusty_irq_handler_data;
+ }
+
+ ret = request_percpu_irq(irq, trusty_irq_handler, "trusty",
+ trusty_irq_handler_data);
+ if (ret) {
+ dev_err(is->dev, "request_percpu_irq failed %d\n", ret);
+ goto err_request_percpu_irq;
+ }
+
+ return 0;
+
+err_request_percpu_irq:
+ for_each_possible_cpu(cpu) {
+ struct trusty_irq *trusty_irq;
+
+ trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu);
+ hlist_del(&trusty_irq->node);
+ }
+
+ free_percpu(trusty_irq_handler_data);
+ return ret;
+}
+
+static int trusty_smc_get_next_irq(struct trusty_irq_state *is,
+ unsigned long min_irq, bool per_cpu)
+{
+ return trusty_fast_call32(is->trusty_dev, SMC_FC_GET_NEXT_IRQ,
+ min_irq, per_cpu, 0);
+}
+
+static int trusty_irq_init_one(struct trusty_irq_state *is,
+ int irq, bool per_cpu)
+{
+ int ret;
+
+ irq = trusty_smc_get_next_irq(is, irq, per_cpu);
+ if (irq < 0)
+ return irq;
+
+ if (per_cpu)
+ ret = trusty_irq_init_per_cpu_irq(is, irq);
+ else
+ ret = trusty_irq_init_normal_irq(is, irq);
+
+ if (ret) {
+ dev_warn(is->dev,
+ "failed to initialize irq %d, irq will be ignored\n",
+ irq);
+ }
+
+ return irq + 1;
+}
+
+static void trusty_irq_free_irqs(struct trusty_irq_state *is)
+{
+ struct trusty_irq *irq;
+ struct hlist_node *n;
+ unsigned int cpu;
+
+ hlist_for_each_entry_safe(irq, n, &is->normal_irqs.inactive, node) {
+ dev_dbg(is->dev, "%s: irq %d\n", __func__, irq->irq);
+ free_irq(irq->irq, irq);
+ hlist_del(&irq->node);
+ kfree(irq);
+ }
+ hlist_for_each_entry_safe(irq, n,
+ &this_cpu_ptr(is->percpu_irqs)->inactive,
+ node) {
+ struct trusty_irq __percpu *trusty_irq_handler_data;
+
+ dev_dbg(is->dev, "%s: percpu irq %d\n", __func__, irq->irq);
+ trusty_irq_handler_data = irq->percpu_ptr;
+ free_percpu_irq(irq->irq, trusty_irq_handler_data);
+ for_each_possible_cpu(cpu) {
+ struct trusty_irq *irq_tmp;
+
+ irq_tmp = per_cpu_ptr(trusty_irq_handler_data, cpu);
+ hlist_del(&irq_tmp->node);
+ }
+ free_percpu(trusty_irq_handler_data);
+ }
+}
+
+static int trusty_irq_probe(struct platform_device *pdev)
+{
+ int ret;
+ int irq;
+ unsigned long irq_flags;
+ struct trusty_irq_state *is;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ is = kzalloc(sizeof(*is), GFP_KERNEL);
+ if (!is) {
+ ret = -ENOMEM;
+ goto err_alloc_is;
+ }
+
+ is->dev = &pdev->dev;
+ is->trusty_dev = is->dev->parent;
+ spin_lock_init(&is->normal_irqs_lock);
+ is->percpu_irqs = alloc_percpu(struct trusty_irq_irqset);
+ if (!is->percpu_irqs) {
+ ret = -ENOMEM;
+ goto err_alloc_pending_percpu_irqs;
+ }
+
+ platform_set_drvdata(pdev, is);
+
+ is->trusty_call_notifier.notifier_call = trusty_irq_call_notify;
+ ret = trusty_call_notifier_register(is->trusty_dev,
+ &is->trusty_call_notifier);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register trusty call notifier\n");
+ goto err_trusty_call_notifier_register;
+ }
+
+ for (irq = 0; irq >= 0;)
+ irq = trusty_irq_init_one(is, irq, true);
+ for (irq = 0; irq >= 0;)
+ irq = trusty_irq_init_one(is, irq, false);
+
+ ret = cpuhp_state_add_instance(trusty_irq_cpuhp_slot, &is->cpuhp_node);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cpuhp_state_add_instance failed %d\n",
+ ret);
+ goto err_add_cpuhp_instance;
+ }
+
+ return 0;
+
+err_add_cpuhp_instance:
+ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
+ trusty_irq_disable_irqset(is, &is->normal_irqs);
+ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
+ trusty_irq_free_irqs(is);
+ trusty_call_notifier_unregister(is->trusty_dev,
+ &is->trusty_call_notifier);
+err_trusty_call_notifier_register:
+ free_percpu(is->percpu_irqs);
+err_alloc_pending_percpu_irqs:
+ kfree(is);
+err_alloc_is:
+ return ret;
+}
+
+static int trusty_irq_remove(struct platform_device *pdev)
+{
+ int ret;
+ unsigned long irq_flags;
+ struct trusty_irq_state *is = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ ret = cpuhp_state_remove_instance(trusty_irq_cpuhp_slot,
+ &is->cpuhp_node);
+ if (WARN_ON(ret))
+ return ret;
+
+ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
+ trusty_irq_disable_irqset(is, &is->normal_irqs);
+ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
+
+ trusty_irq_free_irqs(is);
+
+ trusty_call_notifier_unregister(is->trusty_dev,
+ &is->trusty_call_notifier);
+ free_percpu(is->percpu_irqs);
+ kfree(is);
+
+ return 0;
+}
+
+static const struct of_device_id trusty_test_of_match[] = {
+ { .compatible = "android,trusty-irq-v1", },
+ {},
+};
+
+static struct platform_driver trusty_irq_driver = {
+ .probe = trusty_irq_probe,
+ .remove = trusty_irq_remove,
+ .driver = {
+ .name = "trusty-irq",
+ .owner = THIS_MODULE,
+ .of_match_table = trusty_test_of_match,
+ },
+};
+
+static int __init trusty_irq_driver_init(void)
+{
+ int ret;
+
+ /* allocate dynamic cpuhp state slot */
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "trusty-irq:cpu:online",
+ trusty_irq_cpu_up,
+ trusty_irq_cpu_down);
+ if (ret < 0)
+ return ret;
+ trusty_irq_cpuhp_slot = ret;
+
+ /* Register platform driver */
+ ret = platform_driver_register(&trusty_irq_driver);
+ if (ret < 0)
+ goto err_driver_register;
+
+ return ret;
+
+err_driver_register:
+ /* undo cpuhp slot allocation */
+ cpuhp_remove_multi_state(trusty_irq_cpuhp_slot);
+ trusty_irq_cpuhp_slot = -1;
+
+ return ret;
+}
+
+static void __exit trusty_irq_driver_exit(void)
+{
+ platform_driver_unregister(&trusty_irq_driver);
+ cpuhp_remove_multi_state(trusty_irq_cpuhp_slot);
+ trusty_irq_cpuhp_slot = -1;
+}
+
+module_init(trusty_irq_driver_init);
+module_exit(trusty_irq_driver_exit);
diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c
new file mode 100644
index 000000000000..e8dcced2ff1d
--- /dev/null
+++ b/drivers/trusty/trusty-log.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/platform_device.h>
+#include <linux/trusty/smcall.h>
+#include <linux/trusty/trusty.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/log2.h>
+#include <asm/page.h>
+#include "trusty-log.h"
+
+#define TRUSTY_LOG_SIZE (PAGE_SIZE * 2)
+#define TRUSTY_LINE_BUFFER_SIZE 256
+
+struct trusty_log_state {
+ struct device *dev;
+ struct device *trusty_dev;
+
+ /*
+ * This lock is here to ensure only one consumer will read
+ * from the log ring buffer at a time.
+ */
+ spinlock_t lock;
+ struct log_rb *log;
+ uint32_t get;
+
+ struct page *log_pages;
+
+ struct notifier_block call_notifier;
+ struct notifier_block panic_notifier;
+ char line_buffer[TRUSTY_LINE_BUFFER_SIZE];
+};
+
+static int log_read_line(struct trusty_log_state *s, int put, int get)
+{
+ struct log_rb *log = s->log;
+ int i;
+ char c = '\0';
+ size_t max_to_read = min((size_t)(put - get),
+ sizeof(s->line_buffer) - 1);
+ size_t mask = log->sz - 1;
+
+ for (i = 0; i < max_to_read && c != '\n';)
+ s->line_buffer[i++] = c = log->data[get++ & mask];
+ s->line_buffer[i] = '\0';
+
+ return i;
+}
+
+static void trusty_dump_logs(struct trusty_log_state *s)
+{
+ struct log_rb *log = s->log;
+ uint32_t get, put, alloc;
+ int read_chars;
+
+ BUG_ON(!is_power_of_2(log->sz));
+
+ /*
+ * For this ring buffer, at any given point, alloc >= put >= get.
+ * The producer side of the buffer is not locked, so the put and alloc
+ * pointers must be read in a defined order (put before alloc) so
+ * that the above condition is maintained. A read barrier is needed
+ * to make sure the hardware and compiler keep the reads ordered.
+ */
+ get = s->get;
+ while ((put = log->put) != get) {
+ /* Make sure that the read of put occurs before the read of log data */
+ rmb();
+
+ /* Read a line from the log */
+ read_chars = log_read_line(s, put, get);
+
+ /* Force the loads from log_read_line to complete. */
+ rmb();
+ alloc = log->alloc;
+
+ /*
+ * Discard the line that was just read if the data could
+ * have been corrupted by the producer.
+ */
+ if (alloc - get > log->sz) {
+ pr_err("trusty: log overflow.");
+ get = alloc - log->sz;
+ continue;
+ }
+ pr_info("trusty: %s", s->line_buffer);
+ get += read_chars;
+ }
+ s->get = get;
+}
+
+static int trusty_log_call_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct trusty_log_state *s;
+ unsigned long flags;
+
+ if (action != TRUSTY_CALL_RETURNED)
+ return NOTIFY_DONE;
+
+ s = container_of(nb, struct trusty_log_state, call_notifier);
+ spin_lock_irqsave(&s->lock, flags);
+ trusty_dump_logs(s);
+ spin_unlock_irqrestore(&s->lock, flags);
+ return NOTIFY_OK;
+}
+
+static int trusty_log_panic_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct trusty_log_state *s;
+
+ /*
+ * Don't grab the spin lock to hold up the panic notifier, even
+ * though this is racy.
+ */
+ s = container_of(nb, struct trusty_log_state, panic_notifier);
+ pr_info("trusty-log panic notifier - trusty version %s",
+ trusty_version_str_get(s->trusty_dev));
+ trusty_dump_logs(s);
+ return NOTIFY_OK;
+}
+
+static bool trusty_supports_logging(struct device *device)
+{
+ int result;
+
+ result = trusty_std_call32(device, SMC_SC_SHARED_LOG_VERSION,
+ TRUSTY_LOG_API_VERSION, 0, 0);
+ if (result == SM_ERR_UNDEFINED_SMC) {
+ pr_info("trusty-log not supported on secure side.\n");
+ return false;
+ } else if (result < 0) {
+ pr_err("trusty std call (SMC_SC_SHARED_LOG_VERSION) failed: %d\n",
+ result);
+ return false;
+ }
+
+ if (result == TRUSTY_LOG_API_VERSION) {
+ return true;
+ } else {
+ pr_info("trusty-log unsupported api version: %d, supported: %d\n",
+ result, TRUSTY_LOG_API_VERSION);
+ return false;
+ }
+}
+
+static int trusty_log_probe(struct platform_device *pdev)
+{
+ struct trusty_log_state *s;
+ int result;
+ phys_addr_t pa;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+ if (!trusty_supports_logging(pdev->dev.parent)) {
+ return -ENXIO;
+ }
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ result = -ENOMEM;
+ goto error_alloc_state;
+ }
+
+ spin_lock_init(&s->lock);
+ s->dev = &pdev->dev;
+ s->trusty_dev = s->dev->parent;
+ s->get = 0;
+ s->log_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(TRUSTY_LOG_SIZE));
+ if (!s->log_pages) {
+ result = -ENOMEM;
+ goto error_alloc_log;
+ }
+ s->log = page_address(s->log_pages);
+
+ pa = page_to_phys(s->log_pages);
+ result = trusty_std_call32(s->trusty_dev,
+ SMC_SC_SHARED_LOG_ADD,
+ (u32)(pa), (u32)(pa >> 32),
+ TRUSTY_LOG_SIZE);
+ if (result < 0) {
+ pr_err("trusty std call (SMC_SC_SHARED_LOG_ADD) failed: %d %pa\n",
+ result, &pa);
+ goto error_std_call;
+ }
+
+ s->call_notifier.notifier_call = trusty_log_call_notify;
+ result = trusty_call_notifier_register(s->trusty_dev,
+ &s->call_notifier);
+ if (result < 0) {
+ dev_err(&pdev->dev,
+ "failed to register trusty call notifier\n");
+ goto error_call_notifier;
+ }
+
+ s->panic_notifier.notifier_call = trusty_log_panic_notify;
+ result = atomic_notifier_chain_register(&panic_notifier_list,
+ &s->panic_notifier);
+ if (result < 0) {
+ dev_err(&pdev->dev,
+ "failed to register panic notifier\n");
+ goto error_panic_notifier;
+ }
+ platform_set_drvdata(pdev, s);
+
+ return 0;
+
+error_panic_notifier:
+ trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier);
+error_call_notifier:
+ trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM,
+ (u32)pa, (u32)(pa >> 32), 0);
+error_std_call:
+ __free_pages(s->log_pages, get_order(TRUSTY_LOG_SIZE));
+error_alloc_log:
+ kfree(s);
+error_alloc_state:
+ return result;
+}
+
+static int trusty_log_remove(struct platform_device *pdev)
+{
+ int result;
+ struct trusty_log_state *s = platform_get_drvdata(pdev);
+ phys_addr_t pa = page_to_phys(s->log_pages);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &s->panic_notifier);
+ trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier);
+
+ result = trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM,
+ (u32)pa, (u32)(pa >> 32), 0);
+ if (result) {
+ pr_err("trusty std call (SMC_SC_SHARED_LOG_RM) failed: %d\n",
+ result);
+ }
+ __free_pages(s->log_pages, get_order(TRUSTY_LOG_SIZE));
+ kfree(s);
+
+ return 0;
+}
+
+static const struct of_device_id trusty_test_of_match[] = {
+ { .compatible = "android,trusty-log-v1", },
+ {},
+};
+
+static struct platform_driver trusty_log_driver = {
+ .probe = trusty_log_probe,
+ .remove = trusty_log_remove,
+ .driver = {
+ .name = "trusty-log",
+ .owner = THIS_MODULE,
+ .of_match_table = trusty_test_of_match,
+ },
+};
+
+module_platform_driver(trusty_log_driver);
diff --git a/drivers/trusty/trusty-log.h b/drivers/trusty/trusty-log.h
new file mode 100644
index 000000000000..09f60213e1f6
--- /dev/null
+++ b/drivers/trusty/trusty-log.h
@@ -0,0 +1,22 @@
+#ifndef _TRUSTY_LOG_H_
+#define _TRUSTY_LOG_H_
+
+/*
+ * Ring buffer that supports one secure producer thread and one
+ * linux side consumer thread.
+ */
+struct log_rb {
+ volatile uint32_t alloc;
+ volatile uint32_t put;
+ uint32_t sz;
+ volatile char data[0];
+} __packed;
+
+#define SMC_SC_SHARED_LOG_VERSION SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 0)
+#define SMC_SC_SHARED_LOG_ADD SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 1)
+#define SMC_SC_SHARED_LOG_RM SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 2)
+
+#define TRUSTY_LOG_API_VERSION 1
+
+#endif
+
diff --git a/drivers/trusty/trusty-mem.c b/drivers/trusty/trusty-mem.c
new file mode 100644
index 000000000000..c55ace25beed
--- /dev/null
+++ b/drivers/trusty/trusty-mem.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/printk.h>
+#include <linux/trusty/trusty.h>
+#include <linux/trusty/smcall.h>
+
+static int get_mem_attr(struct page *page, pgprot_t pgprot)
+{
+#if defined(CONFIG_ARM64)
+ uint64_t mair;
+ uint attr_index = (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) >> 2;
+
+ asm ("mrs %0, mair_el1\n" : "=&r" (mair));
+ return (mair >> (attr_index * 8)) & 0xff;
+
+#elif defined(CONFIG_ARM_LPAE)
+ uint32_t mair;
+ uint attr_index = ((pgprot_val(pgprot) & L_PTE_MT_MASK) >> 2);
+
+ if (attr_index >= 4) {
+ attr_index -= 4;
+ asm volatile("mrc p15, 0, %0, c10, c2, 1\n" : "=&r" (mair));
+ } else {
+ asm volatile("mrc p15, 0, %0, c10, c2, 0\n" : "=&r" (mair));
+ }
+ return (mair >> (attr_index * 8)) & 0xff;
+
+#elif defined(CONFIG_ARM)
+ /* check memory type */
+ switch (pgprot_val(pgprot) & L_PTE_MT_MASK) {
+ case L_PTE_MT_WRITEALLOC:
+ /* Normal: write back write allocate */
+ return 0xFF;
+
+ case L_PTE_MT_BUFFERABLE:
+ /* Normal: non-cacheble */
+ return 0x44;
+
+ case L_PTE_MT_WRITEBACK:
+ /* Normal: writeback, read allocate */
+ return 0xEE;
+
+ case L_PTE_MT_WRITETHROUGH:
+ /* Normal: write through */
+ return 0xAA;
+
+ case L_PTE_MT_UNCACHED:
+ /* strongly ordered */
+ return 0x00;
+
+ case L_PTE_MT_DEV_SHARED:
+ case L_PTE_MT_DEV_NONSHARED:
+ /* device */
+ return 0x04;
+
+ default:
+ return -EINVAL;
+ }
+#else
+ return 0;
+#endif
+}
+
+int trusty_encode_page_info(struct ns_mem_page_info *inf,
+ struct page *page, pgprot_t pgprot)
+{
+ int mem_attr;
+ uint64_t pte;
+
+ if (!inf || !page)
+ return -EINVAL;
+
+ /* get physical address */
+ pte = (uint64_t) page_to_phys(page);
+
+ /* get memory attributes */
+ mem_attr = get_mem_attr(page, pgprot);
+ if (mem_attr < 0)
+ return mem_attr;
+
+ /* add other attributes */
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM_LPAE)
+ pte |= pgprot_val(pgprot);
+#elif defined(CONFIG_ARM)
+ if (pgprot_val(pgprot) & L_PTE_USER)
+ pte |= (1 << 6);
+ if (pgprot_val(pgprot) & L_PTE_RDONLY)
+ pte |= (1 << 7);
+ if (pgprot_val(pgprot) & L_PTE_SHARED)
+ pte |= (3 << 8); /* inner sharable */
+#endif
+
+ inf->attr = (pte & 0x0000FFFFFFFFFFFFull) | ((uint64_t)mem_attr << 48);
+ return 0;
+}
+
+int trusty_call32_mem_buf(struct device *dev, u32 smcnr,
+ struct page *page, u32 size,
+ pgprot_t pgprot)
+{
+ int ret;
+ struct ns_mem_page_info pg_inf;
+
+ if (!dev || !page)
+ return -EINVAL;
+
+ ret = trusty_encode_page_info(&pg_inf, page, pgprot);
+ if (ret)
+ return ret;
+
+ if (SMC_IS_FASTCALL(smcnr)) {
+ return trusty_fast_call32(dev, smcnr,
+ (u32)pg_inf.attr,
+ (u32)(pg_inf.attr >> 32), size);
+ } else {
+ return trusty_std_call32(dev, smcnr,
+ (u32)pg_inf.attr,
+ (u32)(pg_inf.attr >> 32), size);
+ }
+}
+
diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c
new file mode 100644
index 000000000000..1eb0ac2cde9b
--- /dev/null
+++ b/drivers/trusty/trusty-virtio.c
@@ -0,0 +1,733 @@
+/*
+ * Trusty Virtio driver
+ *
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+#include <linux/remoteproc.h>
+
+#include <linux/platform_device.h>
+#include <linux/trusty/smcall.h>
+#include <linux/trusty/trusty.h>
+
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_ring.h>
+
+#include <linux/atomic.h>
+
+#define RSC_DESCR_VER 1
+
+struct trusty_vdev;
+
+struct trusty_ctx {
+ struct device *dev;
+ void *shared_va;
+ size_t shared_sz;
+ struct work_struct check_vqs;
+ struct work_struct kick_vqs;
+ struct notifier_block call_notifier;
+ struct list_head vdev_list;
+ struct mutex mlock; /* protects vdev_list */
+ struct workqueue_struct *kick_wq;
+ struct workqueue_struct *check_wq;
+};
+
+struct trusty_vring {
+ void *vaddr;
+ phys_addr_t paddr;
+ size_t size;
+ uint align;
+ uint elem_num;
+ u32 notifyid;
+ atomic_t needs_kick;
+ struct fw_rsc_vdev_vring *vr_descr;
+ struct virtqueue *vq;
+ struct trusty_vdev *tvdev;
+ struct trusty_nop kick_nop;
+};
+
+struct trusty_vdev {
+ struct list_head node;
+ struct virtio_device vdev;
+ struct trusty_ctx *tctx;
+ u32 notifyid;
+ uint config_len;
+ void *config;
+ struct fw_rsc_vdev *vdev_descr;
+ uint vring_num;
+ struct trusty_vring vrings[0];
+};
+
+#define vdev_to_tvdev(vd) container_of((vd), struct trusty_vdev, vdev)
+
+static void check_all_vqs(struct work_struct *work)
+{
+ uint i;
+ struct trusty_ctx *tctx = container_of(work, struct trusty_ctx,
+ check_vqs);
+ struct trusty_vdev *tvdev;
+
+ list_for_each_entry(tvdev, &tctx->vdev_list, node) {
+ for (i = 0; i < tvdev->vring_num; i++)
+ vring_interrupt(0, tvdev->vrings[i].vq);
+ }
+}
+
+static int trusty_call_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct trusty_ctx *tctx;
+
+ if (action != TRUSTY_CALL_RETURNED)
+ return NOTIFY_DONE;
+
+ tctx = container_of(nb, struct trusty_ctx, call_notifier);
+ queue_work(tctx->check_wq, &tctx->check_vqs);
+
+ return NOTIFY_OK;
+}
+
+static void kick_vq(struct trusty_ctx *tctx,
+ struct trusty_vdev *tvdev,
+ struct trusty_vring *tvr)
+{
+ int ret;
+
+ dev_dbg(tctx->dev, "%s: vdev_id=%d: vq_id=%d\n",
+ __func__, tvdev->notifyid, tvr->notifyid);
+
+ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_KICK_VQ,
+ tvdev->notifyid, tvr->notifyid, 0);
+ if (ret) {
+ dev_err(tctx->dev, "vq notify (%d, %d) returned %d\n",
+ tvdev->notifyid, tvr->notifyid, ret);
+ }
+}
+
+static void kick_vqs(struct work_struct *work)
+{
+ uint i;
+ struct trusty_vdev *tvdev;
+ struct trusty_ctx *tctx = container_of(work, struct trusty_ctx,
+ kick_vqs);
+ mutex_lock(&tctx->mlock);
+ list_for_each_entry(tvdev, &tctx->vdev_list, node) {
+ for (i = 0; i < tvdev->vring_num; i++) {
+ struct trusty_vring *tvr = &tvdev->vrings[i];
+ if (atomic_xchg(&tvr->needs_kick, 0))
+ kick_vq(tctx, tvdev, tvr);
+ }
+ }
+ mutex_unlock(&tctx->mlock);
+}
+
+static bool trusty_virtio_notify(struct virtqueue *vq)
+{
+ struct trusty_vring *tvr = vq->priv;
+ struct trusty_vdev *tvdev = tvr->tvdev;
+ struct trusty_ctx *tctx = tvdev->tctx;
+ u32 api_ver = trusty_get_api_version(tctx->dev->parent);
+
+ if (api_ver < TRUSTY_API_VERSION_SMP_NOP) {
+ atomic_set(&tvr->needs_kick, 1);
+ queue_work(tctx->kick_wq, &tctx->kick_vqs);
+ } else {
+ trusty_enqueue_nop(tctx->dev->parent, &tvr->kick_nop);
+ }
+
+ return true;
+}
+
+static int trusty_load_device_descr(struct trusty_ctx *tctx,
+ void *va, size_t sz)
+{
+ int ret;
+
+ dev_dbg(tctx->dev, "%s: %zu bytes @ %p\n", __func__, sz, va);
+
+ ret = trusty_call32_mem_buf(tctx->dev->parent,
+ SMC_SC_VIRTIO_GET_DESCR,
+ virt_to_page(va), sz, PAGE_KERNEL);
+ if (ret < 0) {
+ dev_err(tctx->dev, "%s: virtio get descr returned (%d)\n",
+ __func__, ret);
+ return -ENODEV;
+ }
+ return ret;
+}
+
+static void trusty_virtio_stop(struct trusty_ctx *tctx, void *va, size_t sz)
+{
+ int ret;
+
+ dev_dbg(tctx->dev, "%s: %zu bytes @ %p\n", __func__, sz, va);
+
+ ret = trusty_call32_mem_buf(tctx->dev->parent, SMC_SC_VIRTIO_STOP,
+ virt_to_page(va), sz, PAGE_KERNEL);
+ if (ret) {
+ dev_err(tctx->dev, "%s: virtio done returned (%d)\n",
+ __func__, ret);
+ return;
+ }
+}
+
+static int trusty_virtio_start(struct trusty_ctx *tctx,
+ void *va, size_t sz)
+{
+ int ret;
+
+ dev_dbg(tctx->dev, "%s: %zu bytes @ %p\n", __func__, sz, va);
+
+ ret = trusty_call32_mem_buf(tctx->dev->parent, SMC_SC_VIRTIO_START,
+ virt_to_page(va), sz, PAGE_KERNEL);
+ if (ret) {
+ dev_err(tctx->dev, "%s: virtio start returned (%d)\n",
+ __func__, ret);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void trusty_virtio_reset(struct virtio_device *vdev)
+{
+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+ struct trusty_ctx *tctx = tvdev->tctx;
+
+ dev_dbg(&vdev->dev, "reset vdev_id=%d\n", tvdev->notifyid);
+ trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_RESET,
+ tvdev->notifyid, 0, 0);
+}
+
+static u64 trusty_virtio_get_features(struct virtio_device *vdev)
+{
+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+ return tvdev->vdev_descr->dfeatures;
+}
+
+static int trusty_virtio_finalize_features(struct virtio_device *vdev)
+{
+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+
+ /* Make sure we don't have any features > 32 bits! */
+ BUG_ON((u32)vdev->features != vdev->features);
+
+ tvdev->vdev_descr->gfeatures = vdev->features;
+ return 0;
+}
+
+static void trusty_virtio_get_config(struct virtio_device *vdev,
+ unsigned offset, void *buf,
+ unsigned len)
+{
+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+
+ dev_dbg(&vdev->dev, "%s: %d bytes @ offset %d\n",
+ __func__, len, offset);
+
+ if (tvdev->config) {
+ if (offset + len <= tvdev->config_len)
+ memcpy(buf, tvdev->config + offset, len);
+ }
+}
+
+static void trusty_virtio_set_config(struct virtio_device *vdev,
+ unsigned offset, const void *buf,
+ unsigned len)
+{
+ dev_dbg(&vdev->dev, "%s\n", __func__);
+}
+
+static u8 trusty_virtio_get_status(struct virtio_device *vdev)
+{
+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+ return tvdev->vdev_descr->status;
+}
+
+static void trusty_virtio_set_status(struct virtio_device *vdev, u8 status)
+{
+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+ tvdev->vdev_descr->status = status;
+}
+
+static void _del_vqs(struct virtio_device *vdev)
+{
+ uint i;
+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+ struct trusty_vring *tvr = &tvdev->vrings[0];
+
+ for (i = 0; i < tvdev->vring_num; i++, tvr++) {
+ /* dequeue kick_nop */
+ trusty_dequeue_nop(tvdev->tctx->dev->parent, &tvr->kick_nop);
+
+ /* delete vq */
+ if (tvr->vq) {
+ vring_del_virtqueue(tvr->vq);
+ tvr->vq = NULL;
+ }
+ /* delete vring */
+ if (tvr->vaddr) {
+ free_pages_exact(tvr->vaddr, tvr->size);
+ tvr->vaddr = NULL;
+ }
+ }
+}
+
+static void trusty_virtio_del_vqs(struct virtio_device *vdev)
+{
+ dev_dbg(&vdev->dev, "%s\n", __func__);
+ _del_vqs(vdev);
+}
+
+
+static struct virtqueue *_find_vq(struct virtio_device *vdev,
+ unsigned id,
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
+{
+ struct trusty_vring *tvr;
+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+ phys_addr_t pa;
+
+ if (!name)
+ return ERR_PTR(-EINVAL);
+
+ if (id >= tvdev->vring_num)
+ return ERR_PTR(-EINVAL);
+
+ tvr = &tvdev->vrings[id];
+
+ /* actual size of vring (in bytes) */
+ tvr->size = PAGE_ALIGN(vring_size(tvr->elem_num, tvr->align));
+
+ /* allocate memory for the vring. */
+ tvr->vaddr = alloc_pages_exact(tvr->size, GFP_KERNEL | __GFP_ZERO);
+ if (!tvr->vaddr) {
+ dev_err(&vdev->dev, "vring alloc failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pa = virt_to_phys(tvr->vaddr);
+ /* save vring address to shared structure */
+ tvr->vr_descr->da = (u32)pa;
+ /* da field is only 32 bit wide. Use previously unused 'reserved' field
+ * to store top 32 bits of 64-bit address
+ */
+ tvr->vr_descr->pa = (u32)(pa >> 32);
+
+ dev_info(&vdev->dev, "vring%d: va(pa) %p(%llx) qsz %d notifyid %d\n",
+ id, tvr->vaddr, (u64)tvr->paddr, tvr->elem_num, tvr->notifyid);
+
+ tvr->vq = vring_new_virtqueue(id, tvr->elem_num, tvr->align,
+ vdev, true, tvr->vaddr,
+ trusty_virtio_notify, callback, name);
+ if (!tvr->vq) {
+ dev_err(&vdev->dev, "vring_new_virtqueue %s failed\n",
+ name);
+ goto err_new_virtqueue;
+ }
+
+ tvr->vq->priv = tvr;
+
+ return tvr->vq;
+
+err_new_virtqueue:
+ free_pages_exact(tvr->vaddr, tvr->size);
+ tvr->vaddr = NULL;
+ return ERR_PTR(-ENOMEM);
+}
+
+static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char * const names[])
+{
+ uint i;
+ int ret;
+
+ for (i = 0; i < nvqs; i++) {
+ vqs[i] = _find_vq(vdev, i, callbacks[i], names[i]);
+ if (IS_ERR(vqs[i])) {
+ ret = PTR_ERR(vqs[i]);
+ _del_vqs(vdev);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static const char *trusty_virtio_bus_name(struct virtio_device *vdev)
+{
+ return "trusty-virtio";
+}
+
+/* The ops structure which hooks everything together. */
+static const struct virtio_config_ops trusty_virtio_config_ops = {
+ .get_features = trusty_virtio_get_features,
+ .finalize_features = trusty_virtio_finalize_features,
+ .get = trusty_virtio_get_config,
+ .set = trusty_virtio_set_config,
+ .get_status = trusty_virtio_get_status,
+ .set_status = trusty_virtio_set_status,
+ .reset = trusty_virtio_reset,
+ .find_vqs = trusty_virtio_find_vqs,
+ .del_vqs = trusty_virtio_del_vqs,
+ .bus_name = trusty_virtio_bus_name,
+};
+
+static int trusty_virtio_add_device(struct trusty_ctx *tctx,
+ struct fw_rsc_vdev *vdev_descr,
+ struct fw_rsc_vdev_vring *vr_descr,
+ void *config)
+{
+ int i, ret;
+ struct trusty_vdev *tvdev;
+
+ tvdev = kzalloc(sizeof(struct trusty_vdev) +
+ vdev_descr->num_of_vrings * sizeof(struct trusty_vring),
+ GFP_KERNEL);
+ if (!tvdev) {
+ dev_err(tctx->dev, "Failed to allocate VDEV\n");
+ return -ENOMEM;
+ }
+
+ /* setup vdev */
+ tvdev->tctx = tctx;
+ tvdev->vdev.dev.parent = tctx->dev;
+ tvdev->vdev.id.device = vdev_descr->id;
+ tvdev->vdev.config = &trusty_virtio_config_ops;
+ tvdev->vdev_descr = vdev_descr;
+ tvdev->notifyid = vdev_descr->notifyid;
+
+ /* setup config */
+ tvdev->config = config;
+ tvdev->config_len = vdev_descr->config_len;
+
+ /* setup vrings and vdev resource */
+ tvdev->vring_num = vdev_descr->num_of_vrings;
+
+ for (i = 0; i < tvdev->vring_num; i++, vr_descr++) {
+ struct trusty_vring *tvr = &tvdev->vrings[i];
+ tvr->tvdev = tvdev;
+ tvr->vr_descr = vr_descr;
+ tvr->align = vr_descr->align;
+ tvr->elem_num = vr_descr->num;
+ tvr->notifyid = vr_descr->notifyid;
+ trusty_nop_init(&tvr->kick_nop, SMC_NC_VDEV_KICK_VQ,
+ tvdev->notifyid, tvr->notifyid);
+ }
+
+ /* register device */
+ ret = register_virtio_device(&tvdev->vdev);
+ if (ret) {
+ dev_err(tctx->dev,
+ "Failed (%d) to register device dev type %u\n",
+ ret, vdev_descr->id);
+ goto err_register;
+ }
+
+ /* add it to tracking list */
+ list_add_tail(&tvdev->node, &tctx->vdev_list);
+
+ return 0;
+
+err_register:
+ kfree(tvdev);
+ return ret;
+}
+
+static int trusty_parse_device_descr(struct trusty_ctx *tctx,
+ void *descr_va, size_t descr_sz)
+{
+ u32 i;
+ struct resource_table *descr = descr_va;
+
+ if (descr_sz < sizeof(*descr)) {
+ dev_err(tctx->dev, "descr table is too small (0x%x)\n",
+ (int)descr_sz);
+ return -ENODEV;
+ }
+
+ if (descr->ver != RSC_DESCR_VER) {
+ dev_err(tctx->dev, "unexpected descr ver (0x%x)\n",
+ (int)descr->ver);
+ return -ENODEV;
+ }
+
+ if (descr_sz < (sizeof(*descr) + descr->num * sizeof(u32))) {
+ dev_err(tctx->dev, "descr table is too small (0x%x)\n",
+ (int)descr->ver);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < descr->num; i++) {
+ struct fw_rsc_hdr *hdr;
+ struct fw_rsc_vdev *vd;
+ struct fw_rsc_vdev_vring *vr;
+ void *cfg;
+ size_t vd_sz;
+
+ u32 offset = descr->offset[i];
+
+ if (offset >= descr_sz) {
+ dev_err(tctx->dev, "offset is out of bounds (%u)\n",
+ (uint)offset);
+ return -ENODEV;
+ }
+
+ /* check space for rsc header */
+ if ((descr_sz - offset) < sizeof(struct fw_rsc_hdr)) {
+ dev_err(tctx->dev, "no space for rsc header (%u)\n",
+ (uint)offset);
+ return -ENODEV;
+ }
+ hdr = (struct fw_rsc_hdr *)((u8 *)descr + offset);
+ offset += sizeof(struct fw_rsc_hdr);
+
+ /* check type */
+ if (hdr->type != RSC_VDEV) {
+ dev_err(tctx->dev, "unsupported rsc type (%u)\n",
+ (uint)hdr->type);
+ continue;
+ }
+
+ /* got vdev: check space for vdev */
+ if ((descr_sz - offset) < sizeof(struct fw_rsc_vdev)) {
+ dev_err(tctx->dev, "no space for vdev descr (%u)\n",
+ (uint)offset);
+ return -ENODEV;
+ }
+ vd = (struct fw_rsc_vdev *)((u8 *)descr + offset);
+
+ /* check space for vrings and config area */
+ vd_sz = sizeof(struct fw_rsc_vdev) +
+ vd->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) +
+ vd->config_len;
+
+ if ((descr_sz - offset) < vd_sz) {
+ dev_err(tctx->dev, "no space for vdev (%u)\n",
+ (uint)offset);
+ return -ENODEV;
+ }
+ vr = (struct fw_rsc_vdev_vring *)vd->vring;
+ cfg = (void *)(vr + vd->num_of_vrings);
+
+ trusty_virtio_add_device(tctx, vd, vr, cfg);
+ }
+
+ return 0;
+}
+
+static void _remove_devices_locked(struct trusty_ctx *tctx)
+{
+ struct trusty_vdev *tvdev, *next;
+
+ list_for_each_entry_safe(tvdev, next, &tctx->vdev_list, node) {
+ list_del(&tvdev->node);
+ unregister_virtio_device(&tvdev->vdev);
+ kfree(tvdev);
+ }
+}
+
+static void trusty_virtio_remove_devices(struct trusty_ctx *tctx)
+{
+ mutex_lock(&tctx->mlock);
+ _remove_devices_locked(tctx);
+ mutex_unlock(&tctx->mlock);
+}
+
+static int trusty_virtio_add_devices(struct trusty_ctx *tctx)
+{
+ int ret;
+ void *descr_va;
+ size_t descr_sz;
+ size_t descr_buf_sz;
+
+ /* allocate buffer to load device descriptor into */
+ descr_buf_sz = PAGE_SIZE;
+ descr_va = alloc_pages_exact(descr_buf_sz, GFP_KERNEL | __GFP_ZERO);
+ if (!descr_va) {
+ dev_err(tctx->dev, "Failed to allocate shared area\n");
+ return -ENOMEM;
+ }
+
+ /* load device descriptors */
+ ret = trusty_load_device_descr(tctx, descr_va, descr_buf_sz);
+ if (ret < 0) {
+ dev_err(tctx->dev, "failed (%d) to load device descr\n", ret);
+ goto err_load_descr;
+ }
+
+ descr_sz = (size_t)ret;
+
+ mutex_lock(&tctx->mlock);
+
+ /* parse device descriptor and add virtio devices */
+ ret = trusty_parse_device_descr(tctx, descr_va, descr_sz);
+ if (ret) {
+ dev_err(tctx->dev, "failed (%d) to parse device descr\n", ret);
+ goto err_parse_descr;
+ }
+
+ /* register call notifier */
+ ret = trusty_call_notifier_register(tctx->dev->parent,
+ &tctx->call_notifier);
+ if (ret) {
+ dev_err(tctx->dev, "%s: failed (%d) to register notifier\n",
+ __func__, ret);
+ goto err_register_notifier;
+ }
+
+ /* start virtio */
+ ret = trusty_virtio_start(tctx, descr_va, descr_sz);
+ if (ret) {
+ dev_err(tctx->dev, "failed (%d) to start virtio\n", ret);
+ goto err_start_virtio;
+ }
+
+ /* attach shared area */
+ tctx->shared_va = descr_va;
+ tctx->shared_sz = descr_buf_sz;
+
+ mutex_unlock(&tctx->mlock);
+
+ return 0;
+
+err_start_virtio:
+ trusty_call_notifier_unregister(tctx->dev->parent,
+ &tctx->call_notifier);
+ cancel_work_sync(&tctx->check_vqs);
+err_register_notifier:
+err_parse_descr:
+ _remove_devices_locked(tctx);
+ mutex_unlock(&tctx->mlock);
+ cancel_work_sync(&tctx->kick_vqs);
+ trusty_virtio_stop(tctx, descr_va, descr_sz);
+err_load_descr:
+ free_pages_exact(descr_va, descr_buf_sz);
+ return ret;
+}
+
+static int trusty_virtio_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct trusty_ctx *tctx;
+
+ dev_info(&pdev->dev, "initializing\n");
+
+ tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
+ if (!tctx) {
+ dev_err(&pdev->dev, "Failed to allocate context\n");
+ return -ENOMEM;
+ }
+
+ tctx->dev = &pdev->dev;
+ tctx->call_notifier.notifier_call = trusty_call_notify;
+ mutex_init(&tctx->mlock);
+ INIT_LIST_HEAD(&tctx->vdev_list);
+ INIT_WORK(&tctx->check_vqs, check_all_vqs);
+ INIT_WORK(&tctx->kick_vqs, kick_vqs);
+ platform_set_drvdata(pdev, tctx);
+
+ tctx->check_wq = alloc_workqueue("trusty-check-wq", WQ_UNBOUND, 0);
+ if (!tctx->check_wq) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "Failed create trusty-check-wq\n");
+ goto err_create_check_wq;
+ }
+
+ tctx->kick_wq = alloc_workqueue("trusty-kick-wq",
+ WQ_UNBOUND | WQ_CPU_INTENSIVE, 0);
+ if (!tctx->kick_wq) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "Failed create trusty-kick-wq\n");
+ goto err_create_kick_wq;
+ }
+
+ ret = trusty_virtio_add_devices(tctx);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add virtio devices\n");
+ goto err_add_devices;
+ }
+
+ dev_info(&pdev->dev, "initializing done\n");
+ return 0;
+
+err_add_devices:
+ destroy_workqueue(tctx->kick_wq);
+err_create_kick_wq:
+ destroy_workqueue(tctx->check_wq);
+err_create_check_wq:
+ kfree(tctx);
+ return ret;
+}
+
+static int trusty_virtio_remove(struct platform_device *pdev)
+{
+ struct trusty_ctx *tctx = platform_get_drvdata(pdev);
+
+ dev_err(&pdev->dev, "removing\n");
+
+ /* unregister call notifier and wait until workqueue is done */
+ trusty_call_notifier_unregister(tctx->dev->parent,
+ &tctx->call_notifier);
+ cancel_work_sync(&tctx->check_vqs);
+
+ /* remove virtio devices */
+ trusty_virtio_remove_devices(tctx);
+ cancel_work_sync(&tctx->kick_vqs);
+
+ /* destroy workqueues */
+ destroy_workqueue(tctx->kick_wq);
+ destroy_workqueue(tctx->check_wq);
+
+ /* notify remote that shared area goes away */
+ trusty_virtio_stop(tctx, tctx->shared_va, tctx->shared_sz);
+
+ /* free shared area */
+ free_pages_exact(tctx->shared_va, tctx->shared_sz);
+
+ /* free context */
+ kfree(tctx);
+ return 0;
+}
+
+static const struct of_device_id trusty_of_match[] = {
+ {
+ .compatible = "android,trusty-virtio-v1",
+ },
+};
+
+MODULE_DEVICE_TABLE(of, trusty_of_match);
+
+static struct platform_driver trusty_virtio_driver = {
+ .probe = trusty_virtio_probe,
+ .remove = trusty_virtio_remove,
+ .driver = {
+ .name = "trusty-virtio",
+ .owner = THIS_MODULE,
+ .of_match_table = trusty_of_match,
+ },
+};
+
+module_platform_driver(trusty_virtio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Trusty virtio driver");
diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c
new file mode 100644
index 000000000000..0f8ea863e277
--- /dev/null
+++ b/drivers/trusty/trusty.c
@@ -0,0 +1,575 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/compiler.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/trusty/smcall.h>
+#include <linux/trusty/sm_err.h>
+#include <linux/trusty/trusty.h>
+
+struct trusty_state;
+
+struct trusty_work {
+ struct trusty_state *ts;
+ struct work_struct work;
+};
+
+struct trusty_state {
+ struct mutex smc_lock;
+ struct atomic_notifier_head notifier;
+ struct completion cpu_idle_completion;
+ char *version_str;
+ u32 api_version;
+ struct device *dev;
+ struct workqueue_struct *nop_wq;
+ struct trusty_work __percpu *nop_works;
+ struct list_head nop_queue;
+ spinlock_t nop_lock; /* protects nop_queue */
+};
+
+#ifdef CONFIG_ARM64
+#define SMC_ARG0 "x0"
+#define SMC_ARG1 "x1"
+#define SMC_ARG2 "x2"
+#define SMC_ARG3 "x3"
+#define SMC_ARCH_EXTENSION ""
+#define SMC_REGISTERS_TRASHED "x4","x5","x6","x7","x8","x9","x10","x11", \
+ "x12","x13","x14","x15","x16","x17"
+#else
+#define SMC_ARG0 "r0"
+#define SMC_ARG1 "r1"
+#define SMC_ARG2 "r2"
+#define SMC_ARG3 "r3"
+#define SMC_ARCH_EXTENSION ".arch_extension sec\n"
+#define SMC_REGISTERS_TRASHED "ip"
+#endif
+
+static inline ulong smc(ulong r0, ulong r1, ulong r2, ulong r3)
+{
+ register ulong _r0 asm(SMC_ARG0) = r0;
+ register ulong _r1 asm(SMC_ARG1) = r1;
+ register ulong _r2 asm(SMC_ARG2) = r2;
+ register ulong _r3 asm(SMC_ARG3) = r3;
+
+ asm volatile(
+ __asmeq("%0", SMC_ARG0)
+ __asmeq("%1", SMC_ARG1)
+ __asmeq("%2", SMC_ARG2)
+ __asmeq("%3", SMC_ARG3)
+ __asmeq("%4", SMC_ARG0)
+ __asmeq("%5", SMC_ARG1)
+ __asmeq("%6", SMC_ARG2)
+ __asmeq("%7", SMC_ARG3)
+ SMC_ARCH_EXTENSION
+ "smc #0" /* switch to secure world */
+ : "=r" (_r0), "=r" (_r1), "=r" (_r2), "=r" (_r3)
+ : "r" (_r0), "r" (_r1), "r" (_r2), "r" (_r3)
+ : SMC_REGISTERS_TRASHED);
+ return _r0;
+}
+
+s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ BUG_ON(!s);
+ BUG_ON(!SMC_IS_FASTCALL(smcnr));
+ BUG_ON(SMC_IS_SMC64(smcnr));
+
+ return smc(smcnr, a0, a1, a2);
+}
+EXPORT_SYMBOL(trusty_fast_call32);
+
+#ifdef CONFIG_64BIT
+s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ BUG_ON(!s);
+ BUG_ON(!SMC_IS_FASTCALL(smcnr));
+ BUG_ON(!SMC_IS_SMC64(smcnr));
+
+ return smc(smcnr, a0, a1, a2);
+}
+#endif
+
+static ulong trusty_std_call_inner(struct device *dev, ulong smcnr,
+ ulong a0, ulong a1, ulong a2)
+{
+ ulong ret;
+ int retry = 5;
+
+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx)\n",
+ __func__, smcnr, a0, a1, a2);
+ while (true) {
+ ret = smc(smcnr, a0, a1, a2);
+ while ((s32)ret == SM_ERR_FIQ_INTERRUPTED)
+ ret = smc(SMC_SC_RESTART_FIQ, 0, 0, 0);
+ if ((int)ret != SM_ERR_BUSY || !retry)
+ break;
+
+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, retry\n",
+ __func__, smcnr, a0, a1, a2);
+ retry--;
+ }
+
+ return ret;
+}
+
+static ulong trusty_std_call_helper(struct device *dev, ulong smcnr,
+ ulong a0, ulong a1, ulong a2)
+{
+ ulong ret;
+ int sleep_time = 1;
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ while (true) {
+ local_irq_disable();
+ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE,
+ NULL);
+ ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2);
+ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED,
+ NULL);
+ local_irq_enable();
+
+ if ((int)ret != SM_ERR_BUSY)
+ break;
+
+ if (sleep_time == 256)
+ dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy\n",
+ __func__, smcnr, a0, a1, a2);
+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, wait %d ms\n",
+ __func__, smcnr, a0, a1, a2, sleep_time);
+
+ msleep(sleep_time);
+ if (sleep_time < 1000)
+ sleep_time <<= 1;
+
+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) retry\n",
+ __func__, smcnr, a0, a1, a2);
+ }
+
+ if (sleep_time > 256)
+ dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) busy cleared\n",
+ __func__, smcnr, a0, a1, a2);
+
+ return ret;
+}
+
+static void trusty_std_call_cpu_idle(struct trusty_state *s)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&s->cpu_idle_completion, HZ * 10);
+ if (!ret) {
+ pr_warn("%s: timed out waiting for cpu idle to clear, retry anyway\n",
+ __func__);
+ }
+}
+
+s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2)
+{
+ int ret;
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ BUG_ON(SMC_IS_FASTCALL(smcnr));
+ BUG_ON(SMC_IS_SMC64(smcnr));
+
+ if (smcnr != SMC_SC_NOP) {
+ mutex_lock(&s->smc_lock);
+ reinit_completion(&s->cpu_idle_completion);
+ }
+
+ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) started\n",
+ __func__, smcnr, a0, a1, a2);
+
+ ret = trusty_std_call_helper(dev, smcnr, a0, a1, a2);
+ while (ret == SM_ERR_INTERRUPTED || ret == SM_ERR_CPU_IDLE) {
+ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) interrupted\n",
+ __func__, smcnr, a0, a1, a2);
+ if (ret == SM_ERR_CPU_IDLE)
+ trusty_std_call_cpu_idle(s);
+ ret = trusty_std_call_helper(dev, SMC_SC_RESTART_LAST, 0, 0, 0);
+ }
+ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) returned 0x%x\n",
+ __func__, smcnr, a0, a1, a2, ret);
+
+ WARN_ONCE(ret == SM_ERR_PANIC, "trusty crashed");
+
+ if (smcnr == SMC_SC_NOP)
+ complete(&s->cpu_idle_completion);
+ else
+ mutex_unlock(&s->smc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(trusty_std_call32);
+
+int trusty_call_notifier_register(struct device *dev, struct notifier_block *n)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ return atomic_notifier_chain_register(&s->notifier, n);
+}
+EXPORT_SYMBOL(trusty_call_notifier_register);
+
+int trusty_call_notifier_unregister(struct device *dev,
+ struct notifier_block *n)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ return atomic_notifier_chain_unregister(&s->notifier, n);
+}
+EXPORT_SYMBOL(trusty_call_notifier_unregister);
+
+static int trusty_remove_child(struct device *dev, void *data)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+ssize_t trusty_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", s->version_str);
+}
+
+DEVICE_ATTR(trusty_version, S_IRUSR, trusty_version_show, NULL);
+
+const char *trusty_version_str_get(struct device *dev)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ return s->version_str;
+}
+EXPORT_SYMBOL(trusty_version_str_get);
+
+static void trusty_init_version(struct trusty_state *s, struct device *dev)
+{
+ int ret;
+ int i;
+ int version_str_len;
+
+ ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, -1, 0, 0);
+ if (ret <= 0)
+ goto err_get_size;
+
+ version_str_len = ret;
+
+ s->version_str = kmalloc(version_str_len + 1, GFP_KERNEL);
+ for (i = 0; i < version_str_len; i++) {
+ ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, i, 0, 0);
+ if (ret < 0)
+ goto err_get_char;
+ s->version_str[i] = ret;
+ }
+ s->version_str[i] = '\0';
+
+ dev_info(dev, "trusty version: %s\n", s->version_str);
+
+ ret = device_create_file(dev, &dev_attr_trusty_version);
+ if (ret)
+ goto err_create_file;
+ return;
+
+err_create_file:
+err_get_char:
+ kfree(s->version_str);
+ s->version_str = NULL;
+err_get_size:
+ dev_err(dev, "failed to get version: %d\n", ret);
+}
+
+u32 trusty_get_api_version(struct device *dev)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ return s->api_version;
+}
+EXPORT_SYMBOL(trusty_get_api_version);
+
+static int trusty_init_api_version(struct trusty_state *s, struct device *dev)
+{
+ u32 api_version;
+ api_version = trusty_fast_call32(dev, SMC_FC_API_VERSION,
+ TRUSTY_API_VERSION_CURRENT, 0, 0);
+ if (api_version == SM_ERR_UNDEFINED_SMC)
+ api_version = 0;
+
+ if (api_version > TRUSTY_API_VERSION_CURRENT) {
+ dev_err(dev, "unsupported api version %u > %u\n",
+ api_version, TRUSTY_API_VERSION_CURRENT);
+ return -EINVAL;
+ }
+
+ dev_info(dev, "selected api version: %u (requested %u)\n",
+ api_version, TRUSTY_API_VERSION_CURRENT);
+ s->api_version = api_version;
+
+ return 0;
+}
+
+static bool dequeue_nop(struct trusty_state *s, u32 *args)
+{
+ unsigned long flags;
+ struct trusty_nop *nop = NULL;
+
+ spin_lock_irqsave(&s->nop_lock, flags);
+ if (!list_empty(&s->nop_queue)) {
+ nop = list_first_entry(&s->nop_queue,
+ struct trusty_nop, node);
+ list_del_init(&nop->node);
+ args[0] = nop->args[0];
+ args[1] = nop->args[1];
+ args[2] = nop->args[2];
+ } else {
+ args[0] = 0;
+ args[1] = 0;
+ args[2] = 0;
+ }
+ spin_unlock_irqrestore(&s->nop_lock, flags);
+ return nop;
+}
+
+static void locked_nop_work_func(struct work_struct *work)
+{
+ int ret;
+ struct trusty_work *tw = container_of(work, struct trusty_work, work);
+ struct trusty_state *s = tw->ts;
+
+ dev_dbg(s->dev, "%s\n", __func__);
+
+ ret = trusty_std_call32(s->dev, SMC_SC_LOCKED_NOP, 0, 0, 0);
+ if (ret != 0)
+ dev_err(s->dev, "%s: SMC_SC_LOCKED_NOP failed %d",
+ __func__, ret);
+
+ dev_dbg(s->dev, "%s: done\n", __func__);
+}
+
+static void nop_work_func(struct work_struct *work)
+{
+ int ret;
+ bool next;
+ u32 args[3];
+ struct trusty_work *tw = container_of(work, struct trusty_work, work);
+ struct trusty_state *s = tw->ts;
+
+ dev_dbg(s->dev, "%s:\n", __func__);
+
+ dequeue_nop(s, args);
+ do {
+ dev_dbg(s->dev, "%s: %x %x %x\n",
+ __func__, args[0], args[1], args[2]);
+
+ ret = trusty_std_call32(s->dev, SMC_SC_NOP,
+ args[0], args[1], args[2]);
+
+ next = dequeue_nop(s, args);
+
+ if (ret == SM_ERR_NOP_INTERRUPTED)
+ next = true;
+ else if (ret != SM_ERR_NOP_DONE)
+ dev_err(s->dev, "%s: SMC_SC_NOP failed %d",
+ __func__, ret);
+ } while (next);
+
+ dev_dbg(s->dev, "%s: done\n", __func__);
+}
+
+void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop)
+{
+ unsigned long flags;
+ struct trusty_work *tw;
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ preempt_disable();
+ tw = this_cpu_ptr(s->nop_works);
+ if (nop) {
+ WARN_ON(s->api_version < TRUSTY_API_VERSION_SMP_NOP);
+
+ spin_lock_irqsave(&s->nop_lock, flags);
+ if (list_empty(&nop->node))
+ list_add_tail(&nop->node, &s->nop_queue);
+ spin_unlock_irqrestore(&s->nop_lock, flags);
+ }
+ queue_work(s->nop_wq, &tw->work);
+ preempt_enable();
+}
+EXPORT_SYMBOL(trusty_enqueue_nop);
+
+void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop)
+{
+ unsigned long flags;
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ if (WARN_ON(!nop))
+ return;
+
+ spin_lock_irqsave(&s->nop_lock, flags);
+ if (!list_empty(&nop->node))
+ list_del_init(&nop->node);
+ spin_unlock_irqrestore(&s->nop_lock, flags);
+}
+EXPORT_SYMBOL(trusty_dequeue_nop);
+
+static int trusty_probe(struct platform_device *pdev)
+{
+ int ret;
+ unsigned int cpu;
+ work_func_t work_func;
+ struct trusty_state *s;
+ struct device_node *node = pdev->dev.of_node;
+
+ if (!node) {
+ dev_err(&pdev->dev, "of_node required\n");
+ return -EINVAL;
+ }
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ ret = -ENOMEM;
+ goto err_allocate_state;
+ }
+
+ s->dev = &pdev->dev;
+ spin_lock_init(&s->nop_lock);
+ INIT_LIST_HEAD(&s->nop_queue);
+ mutex_init(&s->smc_lock);
+ ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier);
+ init_completion(&s->cpu_idle_completion);
+ platform_set_drvdata(pdev, s);
+
+ trusty_init_version(s, &pdev->dev);
+
+ ret = trusty_init_api_version(s, &pdev->dev);
+ if (ret < 0)
+ goto err_api_version;
+
+ s->nop_wq = alloc_workqueue("trusty-nop-wq", WQ_CPU_INTENSIVE, 0);
+ if (!s->nop_wq) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "Failed create trusty-nop-wq\n");
+ goto err_create_nop_wq;
+ }
+
+ s->nop_works = alloc_percpu(struct trusty_work);
+ if (!s->nop_works) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate works\n");
+ goto err_alloc_works;
+ }
+
+ if (s->api_version < TRUSTY_API_VERSION_SMP)
+ work_func = locked_nop_work_func;
+ else
+ work_func = nop_work_func;
+
+ for_each_possible_cpu(cpu) {
+ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
+
+ tw->ts = s;
+ INIT_WORK(&tw->work, work_func);
+ }
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add children: %d\n", ret);
+ goto err_add_children;
+ }
+
+ return 0;
+
+err_add_children:
+ for_each_possible_cpu(cpu) {
+ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
+
+ flush_work(&tw->work);
+ }
+ free_percpu(s->nop_works);
+err_alloc_works:
+ destroy_workqueue(s->nop_wq);
+err_create_nop_wq:
+err_api_version:
+ if (s->version_str) {
+ device_remove_file(&pdev->dev, &dev_attr_trusty_version);
+ kfree(s->version_str);
+ }
+ device_for_each_child(&pdev->dev, NULL, trusty_remove_child);
+ mutex_destroy(&s->smc_lock);
+ kfree(s);
+err_allocate_state:
+ return ret;
+}
+
+static int trusty_remove(struct platform_device *pdev)
+{
+ unsigned int cpu;
+ struct trusty_state *s = platform_get_drvdata(pdev);
+
+ device_for_each_child(&pdev->dev, NULL, trusty_remove_child);
+
+ for_each_possible_cpu(cpu) {
+ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
+
+ flush_work(&tw->work);
+ }
+ free_percpu(s->nop_works);
+ destroy_workqueue(s->nop_wq);
+
+ mutex_destroy(&s->smc_lock);
+ if (s->version_str) {
+ device_remove_file(&pdev->dev, &dev_attr_trusty_version);
+ kfree(s->version_str);
+ }
+ kfree(s);
+ return 0;
+}
+
+static const struct of_device_id trusty_of_match[] = {
+ { .compatible = "android,trusty-smc-v1", },
+ {},
+};
+
+static struct platform_driver trusty_driver = {
+ .probe = trusty_probe,
+ .remove = trusty_remove,
+ .driver = {
+ .name = "trusty",
+ .owner = THIS_MODULE,
+ .of_match_table = trusty_of_match,
+ },
+};
+
+static int __init trusty_driver_init(void)
+{
+ return platform_driver_register(&trusty_driver);
+}
+
+static void __exit trusty_driver_exit(void)
+{
+ platform_driver_unregister(&trusty_driver);
+}
+
+subsys_initcall(trusty_driver_init);
+module_exit(trusty_driver_exit);
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 5817e2397463..b95bed92da9f 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_R3964) += n_r3964.o
obj-y += vt/
obj-$(CONFIG_HVC_DRIVER) += hvc/
obj-y += serial/
+obj-$(CONFIG_SERIAL_DEV_BUS) += serdev/
# tty drivers
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
diff --git a/drivers/tty/serdev/Kconfig b/drivers/tty/serdev/Kconfig
new file mode 100644
index 000000000000..cdc6b820cf93
--- /dev/null
+++ b/drivers/tty/serdev/Kconfig
@@ -0,0 +1,16 @@
+#
+# Serial bus device driver configuration
+#
+menuconfig SERIAL_DEV_BUS
+ tristate "Serial device bus"
+ help
+ Core support for devices connected via a serial port.
+
+if SERIAL_DEV_BUS
+
+config SERIAL_DEV_CTRL_TTYPORT
+ bool "Serial device TTY port controller"
+ depends on TTY
+ depends on SERIAL_DEV_BUS != m
+
+endif
diff --git a/drivers/tty/serdev/Makefile b/drivers/tty/serdev/Makefile
new file mode 100644
index 000000000000..0cbdb9444d9d
--- /dev/null
+++ b/drivers/tty/serdev/Makefile
@@ -0,0 +1,5 @@
+serdev-objs := core.o
+
+obj-$(CONFIG_SERIAL_DEV_BUS) += serdev.o
+
+obj-$(CONFIG_SERIAL_DEV_CTRL_TTYPORT) += serdev-ttyport.o
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
new file mode 100644
index 000000000000..5e3f612f30b7
--- /dev/null
+++ b/drivers/tty/serdev/core.c
@@ -0,0 +1,492 @@
+/*
+ * Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org>
+ *
+ * Based on drivers/spmi/spmi.c:
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+
+static bool is_registered;
+static DEFINE_IDA(ctrl_ida);
+
+static void serdev_device_release(struct device *dev)
+{
+ struct serdev_device *serdev = to_serdev_device(dev);
+ kfree(serdev);
+}
+
+static const struct device_type serdev_device_type = {
+ .release = serdev_device_release,
+};
+
+static void serdev_ctrl_release(struct device *dev)
+{
+ struct serdev_controller *ctrl = to_serdev_controller(dev);
+ ida_simple_remove(&ctrl_ida, ctrl->nr);
+ kfree(ctrl);
+}
+
+static const struct device_type serdev_ctrl_type = {
+ .release = serdev_ctrl_release,
+};
+
+static int serdev_device_match(struct device *dev, struct device_driver *drv)
+{
+ /* TODO: ACPI and platform matching */
+ return of_driver_match_device(dev, drv);
+}
+
+static int serdev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ /* TODO: ACPI and platform modalias */
+ return of_device_uevent_modalias(dev, env);
+}
+
+/**
+ * serdev_device_add() - add a device previously constructed via serdev_device_alloc()
+ * @serdev: serdev_device to be added
+ */
+int serdev_device_add(struct serdev_device *serdev)
+{
+ struct device *parent = serdev->dev.parent;
+ int err;
+
+ dev_set_name(&serdev->dev, "%s-%d", dev_name(parent), serdev->nr);
+
+ err = device_add(&serdev->dev);
+ if (err < 0) {
+ dev_err(&serdev->dev, "Can't add %s, status %d\n",
+ dev_name(&serdev->dev), err);
+ goto err_device_add;
+ }
+
+ dev_dbg(&serdev->dev, "device %s registered\n", dev_name(&serdev->dev));
+
+err_device_add:
+ return err;
+}
+EXPORT_SYMBOL_GPL(serdev_device_add);
+
+/**
+ * serdev_device_remove(): remove an serdev device
+ * @serdev: serdev_device to be removed
+ */
+void serdev_device_remove(struct serdev_device *serdev)
+{
+ device_unregister(&serdev->dev);
+}
+EXPORT_SYMBOL_GPL(serdev_device_remove);
+
+int serdev_device_open(struct serdev_device *serdev)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->open)
+ return -EINVAL;
+
+ return ctrl->ops->open(ctrl);
+}
+EXPORT_SYMBOL_GPL(serdev_device_open);
+
+void serdev_device_close(struct serdev_device *serdev)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->close)
+ return;
+
+ ctrl->ops->close(ctrl);
+}
+EXPORT_SYMBOL_GPL(serdev_device_close);
+
+void serdev_device_write_wakeup(struct serdev_device *serdev)
+{
+ complete(&serdev->write_comp);
+}
+EXPORT_SYMBOL_GPL(serdev_device_write_wakeup);
+
+int serdev_device_write_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t count)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->write_buf)
+ return -EINVAL;
+
+ return ctrl->ops->write_buf(ctrl, buf, count);
+}
+EXPORT_SYMBOL_GPL(serdev_device_write_buf);
+
+int serdev_device_write(struct serdev_device *serdev,
+ const unsigned char *buf, size_t count,
+ unsigned long timeout)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+ int ret;
+
+ if (!ctrl || !ctrl->ops->write_buf ||
+ (timeout && !serdev->ops->write_wakeup))
+ return -EINVAL;
+
+ mutex_lock(&serdev->write_lock);
+ do {
+ reinit_completion(&serdev->write_comp);
+
+ ret = ctrl->ops->write_buf(ctrl, buf, count);
+ if (ret < 0)
+ break;
+
+ buf += ret;
+ count -= ret;
+
+ } while (count &&
+ (timeout = wait_for_completion_timeout(&serdev->write_comp,
+ timeout)));
+ mutex_unlock(&serdev->write_lock);
+ return ret < 0 ? ret : (count ? -ETIMEDOUT : 0);
+}
+EXPORT_SYMBOL_GPL(serdev_device_write);
+
+void serdev_device_write_flush(struct serdev_device *serdev)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->write_flush)
+ return;
+
+ ctrl->ops->write_flush(ctrl);
+}
+EXPORT_SYMBOL_GPL(serdev_device_write_flush);
+
+int serdev_device_write_room(struct serdev_device *serdev)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->write_room)
+ return 0;
+
+ return serdev->ctrl->ops->write_room(ctrl);
+}
+EXPORT_SYMBOL_GPL(serdev_device_write_room);
+
+unsigned int serdev_device_set_baudrate(struct serdev_device *serdev, unsigned int speed)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->set_baudrate)
+ return 0;
+
+ return ctrl->ops->set_baudrate(ctrl, speed);
+
+}
+EXPORT_SYMBOL_GPL(serdev_device_set_baudrate);
+
+void serdev_device_set_flow_control(struct serdev_device *serdev, bool enable)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->set_flow_control)
+ return;
+
+ ctrl->ops->set_flow_control(ctrl, enable);
+}
+EXPORT_SYMBOL_GPL(serdev_device_set_flow_control);
+
+void serdev_device_wait_until_sent(struct serdev_device *serdev, long timeout)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->wait_until_sent)
+ return;
+
+ ctrl->ops->wait_until_sent(ctrl, timeout);
+}
+EXPORT_SYMBOL_GPL(serdev_device_wait_until_sent);
+
+int serdev_device_get_tiocm(struct serdev_device *serdev)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->get_tiocm)
+ return -ENOTSUPP;
+
+ return ctrl->ops->get_tiocm(ctrl);
+}
+EXPORT_SYMBOL_GPL(serdev_device_get_tiocm);
+
+int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->set_tiocm)
+ return -ENOTSUPP;
+
+ return ctrl->ops->set_tiocm(ctrl, set, clear);
+}
+EXPORT_SYMBOL_GPL(serdev_device_set_tiocm);
+
+static int serdev_drv_probe(struct device *dev)
+{
+ const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
+
+ return sdrv->probe(to_serdev_device(dev));
+}
+
+static int serdev_drv_remove(struct device *dev)
+{
+ const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
+
+ sdrv->remove(to_serdev_device(dev));
+ return 0;
+}
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2);
+ buf[len] = '\n';
+ buf[len+1] = 0;
+ return len+1;
+}
+
+static struct device_attribute serdev_device_attrs[] = {
+ __ATTR_RO(modalias),
+ __ATTR_NULL
+};
+
+static struct bus_type serdev_bus_type = {
+ .name = "serial",
+ .match = serdev_device_match,
+ .probe = serdev_drv_probe,
+ .remove = serdev_drv_remove,
+ .uevent = serdev_uevent,
+ .dev_attrs = serdev_device_attrs,
+};
+
+/**
+ * serdev_controller_alloc() - Allocate a new serdev device
+ * @ctrl: associated controller
+ *
+ * Caller is responsible for either calling serdev_device_add() to add the
+ * newly allocated controller, or calling serdev_device_put() to discard it.
+ */
+struct serdev_device *serdev_device_alloc(struct serdev_controller *ctrl)
+{
+ struct serdev_device *serdev;
+
+ serdev = kzalloc(sizeof(*serdev), GFP_KERNEL);
+ if (!serdev)
+ return NULL;
+
+ serdev->ctrl = ctrl;
+ ctrl->serdev = serdev;
+ device_initialize(&serdev->dev);
+ serdev->dev.parent = &ctrl->dev;
+ serdev->dev.bus = &serdev_bus_type;
+ serdev->dev.type = &serdev_device_type;
+ init_completion(&serdev->write_comp);
+ mutex_init(&serdev->write_lock);
+ return serdev;
+}
+EXPORT_SYMBOL_GPL(serdev_device_alloc);
+
+/**
+ * serdev_controller_alloc() - Allocate a new serdev controller
+ * @parent: parent device
+ * @size: size of private data
+ *
+ * Caller is responsible for either calling serdev_controller_add() to add the
+ * newly allocated controller, or calling serdev_controller_put() to discard it.
+ * The allocated private data region may be accessed via
+ * serdev_controller_get_drvdata()
+ */
+struct serdev_controller *serdev_controller_alloc(struct device *parent,
+ size_t size)
+{
+ struct serdev_controller *ctrl;
+ int id;
+
+ if (WARN_ON(!parent))
+ return NULL;
+
+ ctrl = kzalloc(sizeof(*ctrl) + size, GFP_KERNEL);
+ if (!ctrl)
+ return NULL;
+
+ device_initialize(&ctrl->dev);
+ ctrl->dev.type = &serdev_ctrl_type;
+ ctrl->dev.bus = &serdev_bus_type;
+ ctrl->dev.parent = parent;
+ ctrl->dev.of_node = parent->of_node;
+ serdev_controller_set_drvdata(ctrl, &ctrl[1]);
+
+ id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(parent,
+ "unable to allocate serdev controller identifier.\n");
+ serdev_controller_put(ctrl);
+ return NULL;
+ }
+
+ ctrl->nr = id;
+ dev_set_name(&ctrl->dev, "serial%d", id);
+
+ dev_dbg(&ctrl->dev, "allocated controller 0x%p id %d\n", ctrl, id);
+ return ctrl;
+}
+EXPORT_SYMBOL_GPL(serdev_controller_alloc);
+
+static int of_serdev_register_devices(struct serdev_controller *ctrl)
+{
+ struct device_node *node;
+ struct serdev_device *serdev = NULL;
+ int err;
+ bool found = false;
+
+ for_each_available_child_of_node(ctrl->dev.of_node, node) {
+ if (!of_get_property(node, "compatible", NULL))
+ continue;
+
+ dev_dbg(&ctrl->dev, "adding child %s\n", node->full_name);
+
+ serdev = serdev_device_alloc(ctrl);
+ if (!serdev)
+ continue;
+
+ serdev->dev.of_node = node;
+
+ err = serdev_device_add(serdev);
+ if (err) {
+ dev_err(&serdev->dev,
+ "failure adding device. status %d\n", err);
+ serdev_device_put(serdev);
+ } else
+ found = true;
+ }
+ if (!found)
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
+ * serdev_controller_add() - Add an serdev controller
+ * @ctrl: controller to be registered.
+ *
+ * Register a controller previously allocated via serdev_controller_alloc() with
+ * the serdev core.
+ */
+int serdev_controller_add(struct serdev_controller *ctrl)
+{
+ int ret;
+
+ /* Can't register until after driver model init */
+ if (WARN_ON(!is_registered))
+ return -EAGAIN;
+
+ ret = device_add(&ctrl->dev);
+ if (ret)
+ return ret;
+
+ ret = of_serdev_register_devices(ctrl);
+ if (ret)
+ goto out_dev_del;
+
+ dev_dbg(&ctrl->dev, "serdev%d registered: dev:%p\n",
+ ctrl->nr, &ctrl->dev);
+ return 0;
+
+out_dev_del:
+ device_del(&ctrl->dev);
+ return ret;
+};
+EXPORT_SYMBOL_GPL(serdev_controller_add);
+
+/* Remove a device associated with a controller */
+static int serdev_remove_device(struct device *dev, void *data)
+{
+ struct serdev_device *serdev = to_serdev_device(dev);
+ if (dev->type == &serdev_device_type)
+ serdev_device_remove(serdev);
+ return 0;
+}
+
+/**
+ * serdev_controller_remove(): remove an serdev controller
+ * @ctrl: controller to remove
+ *
+ * Remove a serdev controller. Caller is responsible for calling
+ * serdev_controller_put() to discard the allocated controller.
+ */
+void serdev_controller_remove(struct serdev_controller *ctrl)
+{
+ int dummy;
+
+ if (!ctrl)
+ return;
+
+ dummy = device_for_each_child(&ctrl->dev, NULL,
+ serdev_remove_device);
+ device_del(&ctrl->dev);
+}
+EXPORT_SYMBOL_GPL(serdev_controller_remove);
+
+/**
+ * serdev_driver_register() - Register client driver with serdev core
+ * @sdrv: client driver to be associated with client-device.
+ *
+ * This API will register the client driver with the serdev framework.
+ * It is typically called from the driver's module-init function.
+ */
+int __serdev_device_driver_register(struct serdev_device_driver *sdrv, struct module *owner)
+{
+ sdrv->driver.bus = &serdev_bus_type;
+ sdrv->driver.owner = owner;
+
+ /* force drivers to async probe so I/O is possible in probe */
+ sdrv->driver.probe_type = PROBE_PREFER_ASYNCHRONOUS;
+
+ return driver_register(&sdrv->driver);
+}
+EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
+
+static void __exit serdev_exit(void)
+{
+ bus_unregister(&serdev_bus_type);
+}
+module_exit(serdev_exit);
+
+static int __init serdev_init(void)
+{
+ int ret;
+
+ ret = bus_register(&serdev_bus_type);
+ if (ret)
+ return ret;
+
+ is_registered = true;
+ return 0;
+}
+/* Must be before serial drivers register */
+postcore_initcall(serdev_init);
+
+MODULE_AUTHOR("Rob Herring <robh@kernel.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Serial attached device bus");
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
new file mode 100644
index 000000000000..013efffd2e82
--- /dev/null
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/serdev.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/poll.h>
+
+#define SERPORT_ACTIVE 1
+
+struct serport {
+ struct tty_port *port;
+ struct tty_struct *tty;
+ struct tty_driver *tty_drv;
+ int tty_idx;
+ unsigned long flags;
+};
+
+/*
+ * Callback functions from the tty port.
+ */
+
+static int ttyport_receive_buf(struct tty_port *port, const unsigned char *cp,
+ const unsigned char *fp, size_t count)
+{
+ struct serdev_controller *ctrl = port->client_data;
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+
+ if (!test_bit(SERPORT_ACTIVE, &serport->flags))
+ return 0;
+
+ return serdev_controller_receive_buf(ctrl, cp, count);
+}
+
+static void ttyport_write_wakeup(struct tty_port *port)
+{
+ struct serdev_controller *ctrl = port->client_data;
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+
+ if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &port->tty->flags) &&
+ test_bit(SERPORT_ACTIVE, &serport->flags))
+ serdev_controller_write_wakeup(ctrl);
+
+ wake_up_interruptible_poll(&port->tty->write_wait, POLLOUT);
+}
+
+static const struct tty_port_client_operations client_ops = {
+ .receive_buf = ttyport_receive_buf,
+ .write_wakeup = ttyport_write_wakeup,
+};
+
+/*
+ * Callback functions from the serdev core.
+ */
+
+static int ttyport_write_buf(struct serdev_controller *ctrl, const unsigned char *data, size_t len)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ if (!test_bit(SERPORT_ACTIVE, &serport->flags))
+ return 0;
+
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ return tty->ops->write(serport->tty, data, len);
+}
+
+static void ttyport_write_flush(struct serdev_controller *ctrl)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ tty_driver_flush_buffer(tty);
+}
+
+static int ttyport_write_room(struct serdev_controller *ctrl)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ return tty_write_room(tty);
+}
+
+static int ttyport_open(struct serdev_controller *ctrl)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty;
+ struct ktermios ktermios;
+
+ tty = tty_init_dev(serport->tty_drv, serport->tty_idx);
+ if (IS_ERR(tty))
+ return PTR_ERR(tty);
+ serport->tty = tty;
+
+ if (tty->ops->open)
+ tty->ops->open(serport->tty, NULL);
+ else
+ tty_port_open(serport->port, tty, NULL);
+
+ /* Bring the UART into a known 8 bits no parity hw fc state */
+ ktermios = tty->termios;
+ ktermios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP |
+ INLCR | IGNCR | ICRNL | IXON);
+ ktermios.c_oflag &= ~OPOST;
+ ktermios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
+ ktermios.c_cflag &= ~(CSIZE | PARENB);
+ ktermios.c_cflag |= CS8;
+ ktermios.c_cflag |= CRTSCTS;
+ tty_set_termios(tty, &ktermios);
+
+ set_bit(SERPORT_ACTIVE, &serport->flags);
+
+ tty_unlock(serport->tty);
+ return 0;
+}
+
+static void ttyport_close(struct serdev_controller *ctrl)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ clear_bit(SERPORT_ACTIVE, &serport->flags);
+
+ if (tty->ops->close)
+ tty->ops->close(tty, NULL);
+
+ tty_release_struct(tty, serport->tty_idx);
+}
+
+static unsigned int ttyport_set_baudrate(struct serdev_controller *ctrl, unsigned int speed)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+ struct ktermios ktermios = tty->termios;
+
+ ktermios.c_cflag &= ~CBAUD;
+ tty_termios_encode_baud_rate(&ktermios, speed, speed);
+
+ /* tty_set_termios() return not checked as it is always 0 */
+ tty_set_termios(tty, &ktermios);
+ return speed;
+}
+
+static void ttyport_set_flow_control(struct serdev_controller *ctrl, bool enable)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+ struct ktermios ktermios = tty->termios;
+
+ if (enable)
+ ktermios.c_cflag |= CRTSCTS;
+ else
+ ktermios.c_cflag &= ~CRTSCTS;
+
+ tty_set_termios(tty, &ktermios);
+}
+
+static void ttyport_wait_until_sent(struct serdev_controller *ctrl, long timeout)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ tty_wait_until_sent(tty, timeout);
+}
+
+static int ttyport_get_tiocm(struct serdev_controller *ctrl)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ if (!tty->ops->tiocmget)
+ return -ENOTSUPP;
+
+ return tty->driver->ops->tiocmget(tty);
+}
+
+static int ttyport_set_tiocm(struct serdev_controller *ctrl, unsigned int set, unsigned int clear)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ if (!tty->ops->tiocmset)
+ return -ENOTSUPP;
+
+ return tty->driver->ops->tiocmset(tty, set, clear);
+}
+
+static const struct serdev_controller_ops ctrl_ops = {
+ .write_buf = ttyport_write_buf,
+ .write_flush = ttyport_write_flush,
+ .write_room = ttyport_write_room,
+ .open = ttyport_open,
+ .close = ttyport_close,
+ .set_flow_control = ttyport_set_flow_control,
+ .set_baudrate = ttyport_set_baudrate,
+ .wait_until_sent = ttyport_wait_until_sent,
+ .get_tiocm = ttyport_get_tiocm,
+ .set_tiocm = ttyport_set_tiocm,
+};
+
+struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *parent,
+ struct tty_driver *drv, int idx)
+{
+ const struct tty_port_client_operations *old_ops;
+ struct serdev_controller *ctrl;
+ struct serport *serport;
+ int ret;
+
+ if (!port || !drv || !parent)
+ return ERR_PTR(-ENODEV);
+
+ ctrl = serdev_controller_alloc(parent, sizeof(struct serport));
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+ serport = serdev_controller_get_drvdata(ctrl);
+
+ serport->port = port;
+ serport->tty_idx = idx;
+ serport->tty_drv = drv;
+
+ ctrl->ops = &ctrl_ops;
+
+ old_ops = port->client_ops;
+ port->client_ops = &client_ops;
+ port->client_data = ctrl;
+
+ ret = serdev_controller_add(ctrl);
+ if (ret)
+ goto err_reset_data;
+
+ dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx);
+ return &ctrl->dev;
+
+err_reset_data:
+ port->client_data = NULL;
+ port->client_ops = old_ops;
+ serdev_controller_put(ctrl);
+
+ return ERR_PTR(ret);
+}
+
+void serdev_tty_port_unregister(struct tty_port *port)
+{
+ struct serdev_controller *ctrl = port->client_data;
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+
+ if (!serport)
+ return;
+
+ serdev_controller_remove(ctrl);
+ port->client_ops = NULL;
+ port->client_data = NULL;
+ serdev_controller_put(ctrl);
+}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 2e2b88aa3004..8a3c0ef66ebd 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2081,7 +2081,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
mutex_lock(&port->mutex);
tty_dev = device_find_child(uport->dev, &match, serial_match_port);
- if (device_may_wakeup(tty_dev)) {
+ if (tty_dev && device_may_wakeup(tty_dev)) {
if (!enable_irq_wake(uport->irq))
uport->irq_wake = 1;
put_device(tty_dev);
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index e99f1c5b1df6..677fa99b7747 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -448,7 +448,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
*
* Returns the number of bytes processed
*/
-int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
+int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
char *f, int count)
{
if (ld->ops->receive_buf2)
@@ -463,7 +463,7 @@ int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf);
static int
-receive_buf(struct tty_ldisc *ld, struct tty_buffer *head, int count)
+receive_buf(struct tty_port *port, struct tty_buffer *head, int count)
{
unsigned char *p = char_buf_ptr(head, head->read);
char *f = NULL;
@@ -471,7 +471,7 @@ receive_buf(struct tty_ldisc *ld, struct tty_buffer *head, int count)
if (~head->flags & TTYB_NORMAL)
f = flag_buf_ptr(head, head->read);
- return tty_ldisc_receive_buf(ld, p, f, count);
+ return port->client_ops->receive_buf(port, p, f, count);
}
/**
@@ -491,16 +491,6 @@ static void flush_to_ldisc(struct work_struct *work)
{
struct tty_port *port = container_of(work, struct tty_port, buf.work);
struct tty_bufhead *buf = &port->buf;
- struct tty_struct *tty;
- struct tty_ldisc *disc;
-
- tty = READ_ONCE(port->itty);
- if (tty == NULL)
- return;
-
- disc = tty_ldisc_ref(tty);
- if (disc == NULL)
- return;
mutex_lock(&buf->lock);
@@ -530,7 +520,7 @@ static void flush_to_ldisc(struct work_struct *work)
continue;
}
- count = receive_buf(disc, head, count);
+ count = receive_buf(port, head, count);
if (!count)
break;
head->read += count;
@@ -538,7 +528,6 @@ static void flush_to_ldisc(struct work_struct *work)
mutex_unlock(&buf->lock);
- tty_ldisc_deref(disc);
}
/**
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 734a635e7363..a1fd3f7d487a 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -855,7 +855,7 @@ static void tty_vhangup_session(struct tty_struct *tty)
int tty_hung_up_p(struct file *filp)
{
- return (filp->f_op == &hung_up_tty_fops);
+ return (filp && filp->f_op == &hung_up_tty_fops);
}
EXPORT_SYMBOL(tty_hung_up_p);
@@ -1745,6 +1745,37 @@ static int tty_release_checks(struct tty_struct *tty, int idx)
}
/**
+ * tty_release_struct - release a tty struct
+ * @tty: tty device
+ * @idx: index of the tty
+ *
+ * Performs the final steps to release and free a tty device. It is
+ * roughly the reverse of tty_init_dev.
+ */
+void tty_release_struct(struct tty_struct *tty, int idx)
+{
+ /*
+ * Ask the line discipline code to release its structures
+ */
+ tty_ldisc_release(tty);
+
+ /* Wait for pending work before tty destruction commmences */
+ tty_flush_works(tty);
+
+ tty_debug_hangup(tty, "freeing structure\n");
+ /*
+ * The release_tty function takes care of the details of clearing
+ * the slots and preserving the termios structure. The tty_unlock_pair
+ * should be safe as we keep a kref while the tty is locked (so the
+ * unlock never unlocks a freed tty).
+ */
+ mutex_lock(&tty_mutex);
+ release_tty(tty, idx);
+ mutex_unlock(&tty_mutex);
+}
+EXPORT_SYMBOL_GPL(tty_release_struct);
+
+/**
* tty_release - vfs callback for close
* @inode: inode of tty
* @filp: file pointer for handle to tty
@@ -1898,25 +1929,8 @@ int tty_release(struct inode *inode, struct file *filp)
return 0;
tty_debug_hangup(tty, "final close\n");
- /*
- * Ask the line discipline code to release its structures
- */
- tty_ldisc_release(tty);
-
- /* Wait for pending work before tty destruction commmences */
- tty_flush_works(tty);
-
- tty_debug_hangup(tty, "freeing structure\n");
- /*
- * The release_tty function takes care of the details of clearing
- * the slots and preserving the termios structure. The tty_unlock_pair
- * should be safe as we keep a kref while the tty is locked (so the
- * unlock never unlocks a freed tty).
- */
- mutex_lock(&tty_mutex);
- release_tty(tty, idx);
- mutex_unlock(&tty_mutex);
+ tty_release_struct(tty, idx);
return 0;
}
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index c3f9d93ba227..5cd3cd932293 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -16,6 +16,45 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/serdev.h>
+
+static int tty_port_default_receive_buf(struct tty_port *port,
+ const unsigned char *p,
+ const unsigned char *f, size_t count)
+{
+ int ret;
+ struct tty_struct *tty;
+ struct tty_ldisc *disc;
+
+ tty = READ_ONCE(port->itty);
+ if (!tty)
+ return 0;
+
+ disc = tty_ldisc_ref(tty);
+ if (!disc)
+ return 0;
+
+ ret = tty_ldisc_receive_buf(disc, p, (char *)f, count);
+
+ tty_ldisc_deref(disc);
+
+ return ret;
+}
+
+static void tty_port_default_wakeup(struct tty_port *port)
+{
+ struct tty_struct *tty = tty_port_tty_get(port);
+
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
+}
+
+static const struct tty_port_client_operations default_client_ops = {
+ .receive_buf = tty_port_default_receive_buf,
+ .write_wakeup = tty_port_default_wakeup,
+};
void tty_port_init(struct tty_port *port)
{
@@ -28,6 +67,7 @@ void tty_port_init(struct tty_port *port)
spin_lock_init(&port->lock);
port->close_delay = (50 * HZ) / 100;
port->closing_wait = (3000 * HZ) / 100;
+ port->client_ops = &default_client_ops;
kref_init(&port->kref);
}
EXPORT_SYMBOL(tty_port_init);
@@ -67,8 +107,7 @@ struct device *tty_port_register_device(struct tty_port *port,
struct tty_driver *driver, unsigned index,
struct device *device)
{
- tty_port_link_device(port, driver, index);
- return tty_register_device(driver, index, device);
+ return tty_port_register_device_attr(port, driver, index, device, NULL, NULL);
}
EXPORT_SYMBOL_GPL(tty_port_register_device);
@@ -90,7 +129,15 @@ struct device *tty_port_register_device_attr(struct tty_port *port,
struct device *device, void *drvdata,
const struct attribute_group **attr_grp)
{
+ struct device *dev;
+
tty_port_link_device(port, driver, index);
+
+ dev = serdev_tty_port_register(port, device, driver, index);
+ if (PTR_ERR(dev) != -ENODEV)
+ /* Skip creating cdev if we registered a serdev device */
+ return dev;
+
return tty_register_device_attr(driver, index, device, drvdata,
attr_grp);
}
@@ -142,6 +189,9 @@ static void tty_port_destructor(struct kref *kref)
/* check if last port ref was dropped before tty release */
if (WARN_ON(port->itty))
return;
+
+ serdev_tty_port_unregister(port);
+
if (port->xmit_buf)
free_page((unsigned long)port->xmit_buf);
tty_port_destroy(port);
@@ -273,12 +323,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_hangup);
*/
void tty_port_tty_wakeup(struct tty_port *port)
{
- struct tty_struct *tty = tty_port_tty_get(port);
-
- if (tty) {
- tty_wakeup(tty);
- tty_kref_put(tty);
- }
+ port->client_ops->write_wakeup(port);
}
EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
@@ -335,7 +380,7 @@ EXPORT_SYMBOL(tty_port_lower_dtr_rts);
* tty_port_block_til_ready - Waiting logic for tty open
* @port: the tty port being opened
* @tty: the tty device being bound
- * @filp: the file pointer of the opener
+ * @filp: the file pointer of the opener or NULL
*
* Implement the core POSIX/SuS tty behaviour when opening a tty device.
* Handles:
@@ -369,7 +414,7 @@ int tty_port_block_til_ready(struct tty_port *port,
tty_port_set_active(port, 1);
return 0;
}
- if (filp->f_flags & O_NONBLOCK) {
+ if (filp == NULL || (filp->f_flags & O_NONBLOCK)) {
/* Indicate we are open */
if (C_BAUD(tty))
tty_port_raise_dtr_rts(port);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 644e978cbd3e..001c8079024a 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -105,6 +105,8 @@ source "drivers/usb/chipidea/Kconfig"
source "drivers/usb/isp1760/Kconfig"
+source "drivers/usb/pd/Kconfig"
+
comment "USB port drivers"
if USB
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index dca78565eb55..0e407debfcc8 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_USB_ISP1760) += isp1760/
obj-$(CONFIG_USB_MON) += mon/
-obj-$(CONFIG_PCI) += host/
+obj-$(CONFIG_USB_PCI) += host/
obj-$(CONFIG_USB_EHCI_HCD) += host/
obj-$(CONFIG_USB_ISP116X_HCD) += host/
obj-$(CONFIG_USB_OHCI_HCD) += host/
@@ -61,3 +61,6 @@ obj-$(CONFIG_USB_GADGET) += gadget/
obj-$(CONFIG_USB_COMMON) += common/
obj-$(CONFIG_USBIP_CORE) += usbip/
+
+obj-$(CONFIG_TYPEC) += typec/
+obj-$(CONFIG_TCPC_CLASS) += pd/
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 98e39f91723a..a6cd44a711cf 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -63,7 +63,7 @@ int hcd_buffer_create(struct usb_hcd *hcd)
int i, size;
if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- (!hcd->self.controller->dma_mask &&
+ (!is_device_dma_capable(hcd->self.sysdev) &&
!(hcd->driver->flags & HCD_LOCAL_MEM)))
return 0;
@@ -72,7 +72,7 @@ int hcd_buffer_create(struct usb_hcd *hcd)
if (!size)
continue;
snprintf(name, sizeof(name), "buffer-%d", size);
- hcd->pool[i] = dma_pool_create(name, hcd->self.controller,
+ hcd->pool[i] = dma_pool_create(name, hcd->self.sysdev,
size, size, 0);
if (!hcd->pool[i]) {
hcd_buffer_destroy(hcd);
@@ -127,7 +127,7 @@ void *hcd_buffer_alloc(
/* some USB hosts just use PIO */
if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- (!bus->controller->dma_mask &&
+ (!is_device_dma_capable(bus->sysdev) &&
!(hcd->driver->flags & HCD_LOCAL_MEM))) {
*dma = ~(dma_addr_t) 0;
return kmalloc(size, mem_flags);
@@ -137,7 +137,7 @@ void *hcd_buffer_alloc(
if (size <= pool_max[i])
return dma_pool_alloc(hcd->pool[i], mem_flags, dma);
}
- return dma_alloc_coherent(hcd->self.controller, size, dma, mem_flags);
+ return dma_alloc_coherent(hcd->self.sysdev, size, dma, mem_flags);
}
void hcd_buffer_free(
@@ -154,7 +154,7 @@ void hcd_buffer_free(
return;
if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- (!bus->controller->dma_mask &&
+ (!is_device_dma_capable(bus->sysdev) &&
!(hcd->driver->flags & HCD_LOCAL_MEM))) {
kfree(addr);
return;
@@ -166,5 +166,5 @@ void hcd_buffer_free(
return;
}
}
- dma_free_coherent(hcd->self.controller, size, addr, dma);
+ dma_free_coherent(hcd->self.sysdev, size, addr, dma);
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index fcc7aa248ce7..ff3e925f3aae 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1076,6 +1076,7 @@ static void usb_deregister_bus (struct usb_bus *bus)
static int register_root_hub(struct usb_hcd *hcd)
{
struct device *parent_dev = hcd->self.controller;
+ struct device *sysdev = hcd->self.sysdev;
struct usb_device *usb_dev = hcd->self.root_hub;
const int devnum = 1;
int retval;
@@ -1122,7 +1123,7 @@ static int register_root_hub(struct usb_hcd *hcd)
/* Did the HC die before the root hub was registered? */
if (HCD_DEAD(hcd))
usb_hc_died (hcd); /* This time clean up */
- usb_dev->dev.of_node = parent_dev->of_node;
+ usb_dev->dev.of_node = sysdev->of_node;
}
mutex_unlock(&usb_bus_idr_lock);
@@ -1468,19 +1469,19 @@ void usb_hcd_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (IS_ENABLED(CONFIG_HAS_DMA) &&
(urb->transfer_flags & URB_DMA_MAP_SG))
- dma_unmap_sg(hcd->self.controller,
+ dma_unmap_sg(hcd->self.sysdev,
urb->sg,
urb->num_sgs,
dir);
else if (IS_ENABLED(CONFIG_HAS_DMA) &&
(urb->transfer_flags & URB_DMA_MAP_PAGE))
- dma_unmap_page(hcd->self.controller,
+ dma_unmap_page(hcd->self.sysdev,
urb->transfer_dma,
urb->transfer_buffer_length,
dir);
else if (IS_ENABLED(CONFIG_HAS_DMA) &&
(urb->transfer_flags & URB_DMA_MAP_SINGLE))
- dma_unmap_single(hcd->self.controller,
+ dma_unmap_single(hcd->self.sysdev,
urb->transfer_dma,
urb->transfer_buffer_length,
dir);
@@ -1523,11 +1524,11 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
return ret;
if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) {
urb->setup_dma = dma_map_single(
- hcd->self.controller,
+ hcd->self.sysdev,
urb->setup_packet,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
- if (dma_mapping_error(hcd->self.controller,
+ if (dma_mapping_error(hcd->self.sysdev,
urb->setup_dma))
return -EAGAIN;
urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
@@ -1558,7 +1559,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
}
n = dma_map_sg(
- hcd->self.controller,
+ hcd->self.sysdev,
urb->sg,
urb->num_sgs,
dir);
@@ -1573,12 +1574,12 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
} else if (urb->sg) {
struct scatterlist *sg = urb->sg;
urb->transfer_dma = dma_map_page(
- hcd->self.controller,
+ hcd->self.sysdev,
sg_page(sg),
sg->offset,
urb->transfer_buffer_length,
dir);
- if (dma_mapping_error(hcd->self.controller,
+ if (dma_mapping_error(hcd->self.sysdev,
urb->transfer_dma))
ret = -EAGAIN;
else
@@ -1588,11 +1589,11 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
ret = -EAGAIN;
} else {
urb->transfer_dma = dma_map_single(
- hcd->self.controller,
+ hcd->self.sysdev,
urb->transfer_buffer,
urb->transfer_buffer_length,
dir);
- if (dma_mapping_error(hcd->self.controller,
+ if (dma_mapping_error(hcd->self.sysdev,
urb->transfer_dma))
ret = -EAGAIN;
else
@@ -2500,24 +2501,8 @@ static void init_giveback_urb_bh(struct giveback_urb_bh *bh)
tasklet_init(&bh->bh, usb_giveback_urb_bh, (unsigned long)bh);
}
-/**
- * usb_create_shared_hcd - create and initialize an HCD structure
- * @driver: HC driver that will use this hcd
- * @dev: device for this HC, stored in hcd->self.controller
- * @bus_name: value to store in hcd->self.bus_name
- * @primary_hcd: a pointer to the usb_hcd structure that is sharing the
- * PCI device. Only allocate certain resources for the primary HCD
- * Context: !in_interrupt()
- *
- * Allocate a struct usb_hcd, with extra space at the end for the
- * HC driver's private data. Initialize the generic members of the
- * hcd structure.
- *
- * Return: On success, a pointer to the created and initialized HCD structure.
- * On failure (e.g. if memory is unavailable), %NULL.
- */
-struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
- struct device *dev, const char *bus_name,
+struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
+ struct device *sysdev, struct device *dev, const char *bus_name,
struct usb_hcd *primary_hcd)
{
struct usb_hcd *hcd;
@@ -2559,8 +2544,9 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
usb_bus_init(&hcd->self);
hcd->self.controller = dev;
+ hcd->self.sysdev = sysdev;
hcd->self.bus_name = bus_name;
- hcd->self.uses_dma = (dev->dma_mask != NULL);
+ hcd->self.uses_dma = (sysdev->dma_mask != NULL);
init_timer(&hcd->rh_timer);
hcd->rh_timer.function = rh_timer_func;
@@ -2575,6 +2561,30 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
"USB Host Controller";
return hcd;
}
+EXPORT_SYMBOL_GPL(__usb_create_hcd);
+
+/**
+ * usb_create_shared_hcd - create and initialize an HCD structure
+ * @driver: HC driver that will use this hcd
+ * @dev: device for this HC, stored in hcd->self.controller
+ * @bus_name: value to store in hcd->self.bus_name
+ * @primary_hcd: a pointer to the usb_hcd structure that is sharing the
+ * PCI device. Only allocate certain resources for the primary HCD
+ * Context: !in_interrupt()
+ *
+ * Allocate a struct usb_hcd, with extra space at the end for the
+ * HC driver's private data. Initialize the generic members of the
+ * hcd structure.
+ *
+ * Return: On success, a pointer to the created and initialized HCD structure.
+ * On failure (e.g. if memory is unavailable), %NULL.
+ */
+struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
+ struct device *dev, const char *bus_name,
+ struct usb_hcd *primary_hcd)
+{
+ return __usb_create_hcd(driver, dev, dev, bus_name, primary_hcd);
+}
EXPORT_SYMBOL_GPL(usb_create_shared_hcd);
/**
@@ -2594,7 +2604,7 @@ EXPORT_SYMBOL_GPL(usb_create_shared_hcd);
struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
struct device *dev, const char *bus_name)
{
- return usb_create_shared_hcd(driver, dev, bus_name, NULL);
+ return __usb_create_hcd(driver, dev, dev, bus_name, NULL);
}
EXPORT_SYMBOL_GPL(usb_create_hcd);
@@ -2721,7 +2731,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
struct usb_device *rhdev;
if (IS_ENABLED(CONFIG_USB_PHY) && !hcd->usb_phy) {
- struct usb_phy *phy = usb_get_phy_dev(hcd->self.controller, 0);
+ struct usb_phy *phy = usb_get_phy_dev(hcd->self.sysdev, 0);
if (IS_ERR(phy)) {
retval = PTR_ERR(phy);
@@ -2739,7 +2749,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
}
if (IS_ENABLED(CONFIG_GENERIC_PHY) && !hcd->phy) {
- struct phy *phy = phy_get(hcd->self.controller, "usb");
+ struct phy *phy = phy_get(hcd->self.sysdev, "usb");
if (IS_ERR(phy)) {
retval = PTR_ERR(phy);
@@ -2787,7 +2797,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
*/
retval = hcd_buffer_create(hcd);
if (retval != 0) {
- dev_dbg(hcd->self.controller, "pool alloc failed\n");
+ dev_dbg(hcd->self.sysdev, "pool alloc failed\n");
goto err_create_buf;
}
@@ -2797,7 +2807,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
rhdev = usb_alloc_dev(NULL, &hcd->self, 0);
if (rhdev == NULL) {
- dev_err(hcd->self.controller, "unable to allocate root hub\n");
+ dev_err(hcd->self.sysdev, "unable to allocate root hub\n");
retval = -ENOMEM;
goto err_allocate_root_hub;
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index eaf1c3b06f02..e1ab778e774a 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -452,9 +452,9 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
* Note: calling dma_set_mask() on a USB device would set the
* mask for the entire HCD, so don't do that.
*/
- dev->dev.dma_mask = bus->controller->dma_mask;
- dev->dev.dma_pfn_offset = bus->controller->dma_pfn_offset;
- set_dev_node(&dev->dev, dev_to_node(bus->controller));
+ dev->dev.dma_mask = bus->sysdev->dma_mask;
+ dev->dev.dma_pfn_offset = bus->sysdev->dma_pfn_offset;
+ set_dev_node(&dev->dev, dev_to_node(bus->sysdev));
dev->state = USB_STATE_ATTACHED;
dev->lpm_disable_count = 1;
atomic_set(&dev->urbnum, 0);
@@ -802,7 +802,7 @@ struct urb *usb_buffer_map(struct urb *urb)
if (!urb
|| !urb->dev
|| !(bus = urb->dev->bus)
- || !(controller = bus->controller))
+ || !(controller = bus->sysdev))
return NULL;
if (controller->dma_mask) {
@@ -840,7 +840,7 @@ void usb_buffer_dmasync(struct urb *urb)
|| !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
|| !urb->dev
|| !(bus = urb->dev->bus)
- || !(controller = bus->controller))
+ || !(controller = bus->sysdev))
return;
if (controller->dma_mask) {
@@ -874,7 +874,7 @@ void usb_buffer_unmap(struct urb *urb)
|| !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
|| !urb->dev
|| !(bus = urb->dev->bus)
- || !(controller = bus->controller))
+ || !(controller = bus->sysdev))
return;
if (controller->dma_mask) {
@@ -924,7 +924,7 @@ int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
if (!dev
|| !(bus = dev->bus)
- || !(controller = bus->controller)
+ || !(controller = bus->sysdev)
|| !controller->dma_mask)
return -EINVAL;
@@ -960,7 +960,7 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
if (!dev
|| !(bus = dev->bus)
- || !(controller = bus->controller)
+ || !(controller = bus->sysdev)
|| !controller->dma_mask)
return;
@@ -988,7 +988,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
if (!dev
|| !(bus = dev->bus)
- || !(controller = bus->controller)
+ || !(controller = bus->sysdev)
|| !controller->dma_mask)
return;
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index f6759c61ad07..d75bd5926e0b 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -313,7 +313,7 @@ static bool dwc2_iddig_filter_enabled(struct dwc2_hsotg *hsotg)
* Do core a soft reset of the core. Be careful with this because it
* resets all the internal state machines of the core.
*/
-int dwc2_core_reset(struct dwc2_hsotg *hsotg)
+int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
{
u32 greset;
int count = 0;
@@ -369,7 +369,7 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg)
}
} while (!(greset & GRSTCTL_AHBIDLE));
- if (wait_for_host_mode)
+ if (wait_for_host_mode && !skip_wait)
dwc2_wait_for_mode(hsotg, true);
return 0;
@@ -500,7 +500,7 @@ int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg)
{
int retval;
- retval = dwc2_core_reset(hsotg);
+ retval = dwc2_core_reset(hsotg, false);
if (retval)
return retval;
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 2a21a0414b1d..bc723c5eb2af 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -417,6 +417,11 @@ enum dwc2_ep0_state {
* needed.
* 0 - No (default)
* 1 - Yes
+ * @change_speed_quirk: Change speed configuration to DWC2_SPEED_PARAM_FULL
+ * while full&low speed device connect. And change speed
+ * back to DWC2_SPEED_PARAM_HIGH while device is gone.
+ * 0 - No (default)
+ * 1 - Yes
*
* The following parameters may be specified when starting the module. These
* parameters define how the DWC_otg controller should be configured. A
@@ -457,6 +462,7 @@ struct dwc2_core_params {
int uframe_sched;
int external_id_pin_ctl;
int hibernation;
+ int change_speed_quirk;
};
/**
@@ -624,6 +630,19 @@ struct dwc2_hregs_backup {
};
/*
+ * struct dwc2_extcon - Holds data related to extcon connections
+ *
+ * @nb: Notifier block for callbacks
+ * @extcon: Pointer to extcon device
+ * @state: State of extcon connection
+ */
+struct dwc2_extcon {
+ struct notifier_block nb;
+ struct extcon_dev *extcon;
+ int state;
+};
+
+/*
* Constants related to high speed periodic scheduling
*
* We have a periodic schedule that is DWC2_HS_SCHEDULE_UFRAMES long. From a
@@ -996,6 +1015,10 @@ struct dwc2_hsotg {
u32 g_np_g_tx_fifo_sz;
u32 g_tx_fifo_sz[MAX_EPS_CHANNELS];
#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
+
+ struct dwc2_extcon extcon_vbus;
+ struct dwc2_extcon extcon_id;
+ struct delayed_work extcon_work;
};
/* Reasons for halting a host channel */
@@ -1020,7 +1043,7 @@ enum dwc2_halt_status {
* The following functions support initialization of the core driver component
* and the DWC_otg controller
*/
-extern int dwc2_core_reset(struct dwc2_hsotg *hsotg);
+extern int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait);
extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg);
extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg);
extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore);
@@ -1041,6 +1064,11 @@ extern void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg);
extern void dwc2_enable_global_interrupts(struct dwc2_hsotg *hcd);
extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd);
+extern int dwc2_extcon_vbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr);
+extern int dwc2_extcon_id_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr);
+
/* This function should be called on every hardware interrupt. */
extern irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
@@ -1298,6 +1326,7 @@ extern int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg);
extern int dwc2_hsotg_suspend(struct dwc2_hsotg *dwc2);
extern int dwc2_hsotg_resume(struct dwc2_hsotg *dwc2);
extern int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq);
+extern void dwc2_gadget_notify(struct dwc2_hsotg *hsotg);
extern void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2,
bool reset);
extern void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg);
@@ -1315,6 +1344,7 @@ static inline int dwc2_hsotg_resume(struct dwc2_hsotg *dwc2)
{ return 0; }
static inline int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
{ return 0; }
+static inline void dwc2_gadget_notify(struct dwc2_hsotg *hsotg) {}
static inline void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2,
bool reset) {}
static inline void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg) {}
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index d85c5c9f96c1..0120bc75441b 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -479,6 +479,28 @@ skip_power_saving:
}
}
+int dwc2_extcon_vbus_notifier(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct dwc2_extcon *vbus = container_of(nb, struct dwc2_extcon, nb);
+ struct dwc2_hsotg *hsotg = container_of(vbus, struct dwc2_hsotg,
+ extcon_vbus);
+
+ schedule_delayed_work(&hsotg->extcon_work, msecs_to_jiffies(100));
+ return NOTIFY_DONE;
+}
+
+int dwc2_extcon_id_notifier(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct dwc2_extcon *id = container_of(nb, struct dwc2_extcon, nb);
+ struct dwc2_hsotg *hsotg = container_of(id, struct dwc2_hsotg,
+ extcon_id);
+
+ schedule_delayed_work(&hsotg->extcon_work, msecs_to_jiffies(100));
+ return NOTIFY_DONE;
+}
+
#define GINTMSK_COMMON (GINTSTS_WKUPINT | GINTSTS_SESSREQINT | \
GINTSTS_CONIDSTSCHNG | GINTSTS_OTGINT | \
GINTSTS_MODEMIS | GINTSTS_DISCONNINT | \
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index cfdd5c3da236..dadeb7d8f907 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -72,6 +72,14 @@ static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg,
/* forward declaration of functions */
static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
+void dwc2_gadget_notify(struct dwc2_hsotg *hsotg)
+{
+ if (hsotg->extcon_id.state)
+ usb_gadget_vbus_connect(&hsotg->gadget);
+ else
+ usb_gadget_vbus_disconnect(&hsotg->gadget);
+}
+
/**
* using_dma - return the DMA status of the driver.
* @hsotg: The driver state.
@@ -2467,6 +2475,8 @@ void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
call_gadget(hsotg, disconnect);
hsotg->lx_state = DWC2_L3;
+
+ usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED);
}
/**
@@ -2521,7 +2531,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
if (!is_usb_reset)
- if (dwc2_core_reset(hsotg))
+ if (dwc2_core_reset(hsotg, true))
return;
/*
@@ -3115,6 +3125,11 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
return -EINVAL;
}
+ if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
+ dev_err(hsotg->dev, "%s: called in host mode?\n", __func__);
+ return -EINVAL;
+ }
+
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
spin_lock_irqsave(&hsotg->lock, flags);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index df5a06578005..586715e9c5a0 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -47,6 +47,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/extcon.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ch11.h>
@@ -54,6 +55,8 @@
#include "core.h"
#include "hcd.h"
+static void dwc2_port_resume(struct dwc2_hsotg *hsotg);
+
/*
* =========================================================================
* Host Core Layer Functions
@@ -3199,17 +3202,25 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl);
dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n",
!!(gotgctl & GOTGCTL_CONID_B));
-
+again:
/* B-Device connector (Device Mode) */
if (gotgctl & GOTGCTL_CONID_B) {
/* Wait for switch to device mode */
dev_dbg(hsotg->dev, "connId B\n");
+ if (hsotg->bus_suspended) {
+ dev_info(hsotg->dev,
+ "Do port resume before switching to device mode\n");
+ dwc2_port_resume(hsotg);
+ }
while (!dwc2_is_device_mode(hsotg)) {
dev_info(hsotg->dev,
"Waiting for Peripheral Mode, Mode=%s\n",
dwc2_is_host_mode(hsotg) ? "Host" :
"Peripheral");
usleep_range(20000, 40000);
+ gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ if (!(gotgctl & GOTGCTL_CONID_B))
+ goto again;
if (++count > 250)
break;
}
@@ -3237,8 +3248,13 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
if (count > 250)
dev_err(hsotg->dev,
"Connection id status change timed out\n");
- hsotg->op_state = OTG_STATE_A_HOST;
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_hsotg_disconnect(hsotg);
+ dwc2_hsotg_core_init_disconnected(hsotg, false);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ hsotg->op_state = OTG_STATE_A_HOST;
/* Initialize the Core for Host mode */
dwc2_core_init(hsotg, false);
dwc2_enable_global_interrupts(hsotg);
@@ -3246,6 +3262,23 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
}
}
+static void dwc2_hcd_extcon_func(struct work_struct *work)
+{
+ struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
+ extcon_work.work);
+
+ if (!IS_ERR(hsotg->extcon_vbus.extcon))
+ hsotg->extcon_vbus.state =
+ extcon_get_state(hsotg->extcon_vbus.extcon,
+ EXTCON_USB);
+ if (!IS_ERR(hsotg->extcon_id.extcon))
+ hsotg->extcon_id.state =
+ extcon_get_state(hsotg->extcon_id.extcon,
+ EXTCON_USB_HOST);
+
+ dwc2_gadget_notify(hsotg);
+}
+
static void dwc2_wakeup_detected(unsigned long data)
{
struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data;
@@ -4365,6 +4398,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
if (!HCD_HW_ACCESSIBLE(hcd))
goto unlock;
+ if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
+ goto unlock;
+
if (!hsotg->core_params->hibernation)
goto skip_power_saving;
@@ -4860,6 +4896,61 @@ static void _dwc2_hcd_clear_tt_buffer_complete(struct usb_hcd *hcd,
spin_unlock_irqrestore(&hsotg->lock, flags);
}
+/*
+ * HPRT0_SPD_HIGH_SPEED: high speed
+ * HPRT0_SPD_FULL_SPEED: full speed
+ */
+static void dwc2_change_bus_speed(struct usb_hcd *hcd, int speed)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+
+ if (hsotg->core_params->speed == speed)
+ return;
+
+ hsotg->core_params->speed = speed;
+ queue_work(hsotg->wq_otg, &hsotg->wf_otg);
+}
+
+static void dwc2_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+
+ if (!hsotg->core_params->change_speed_quirk)
+ return;
+
+ /*
+ * On removal, set speed to default high-speed.
+ */
+ if (udev->parent && udev->parent->speed > USB_SPEED_UNKNOWN &&
+ udev->parent->speed < USB_SPEED_HIGH) {
+ dev_info(hsotg->dev, "Set speed to default high-speed\n");
+ dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED);
+ }
+}
+
+static int dwc2_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+
+ if (!hsotg->core_params->change_speed_quirk)
+ return 0;
+
+ if (udev->speed == USB_SPEED_HIGH) {
+ dev_info(hsotg->dev, "Set speed to high-speed\n");
+ dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED);
+ } else if ((udev->speed == USB_SPEED_FULL ||
+ udev->speed == USB_SPEED_LOW)) {
+ /*
+ * Change speed setting to full-speed if there's
+ * a full-speed or low-speed device plugged in.
+ */
+ dev_info(hsotg->dev, "Set speed to full-speed\n");
+ dwc2_change_bus_speed(hcd, HPRT0_SPD_FULL_SPEED);
+ }
+
+ return 0;
+}
+
static struct hc_driver dwc2_hc_driver = {
.description = "dwc2_hsotg",
.product_desc = "DWC OTG Controller",
@@ -5015,6 +5106,11 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
dev_warn(hsotg->dev, "can't set coherent DMA mask\n");
}
+ if (hsotg->core_params->change_speed_quirk) {
+ dwc2_hc_driver.free_dev = dwc2_free_dev;
+ dwc2_hc_driver.reset_device = dwc2_reset_device;
+ }
+
hcd = usb_create_hcd(&dwc2_hc_driver, hsotg->dev, dev_name(hsotg->dev));
if (!hcd)
goto error1;
@@ -5085,6 +5181,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
/* Initialize port reset work */
INIT_DELAYED_WORK(&hsotg->reset_work, dwc2_hcd_reset_func);
+ INIT_DELAYED_WORK(&hsotg->extcon_work, dwc2_hcd_extcon_func);
+
/*
* Allocate space for storing data on status transactions. Normally no
* data is sent, but this space acts as a bit bucket. This must be
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 8e1728b39a49..62a4bb259d83 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -46,6 +46,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_data/s3c-hsotg.h>
#include <linux/reset.h>
+#include <linux/extcon.h>
#include <linux/usb/of.h>
@@ -85,6 +86,7 @@ static const struct dwc2_core_params params_hi6220 = {
.uframe_sched = 0,
.external_id_pin_ctl = -1,
.hibernation = -1,
+ .change_speed_quirk = 1,
};
static const struct dwc2_core_params params_bcm2835 = {
@@ -543,6 +545,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
struct dwc2_core_params defparams;
struct dwc2_hsotg *hsotg;
struct resource *res;
+ struct extcon_dev *ext_id, *ext_vbus;
int retval;
match = of_match_device(dwc2_of_match_table, &dev->dev);
@@ -620,6 +623,51 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval)
goto error;
+ ext_id = ERR_PTR(-ENODEV);
+ ext_vbus = ERR_PTR(-ENODEV);
+ /* Each one of them is not mandatory */
+ ext_vbus = extcon_get_edev_by_phandle(&dev->dev, 0);
+ if (IS_ERR(ext_vbus)) {
+ if (PTR_ERR(ext_vbus) != -ENODEV)
+ return PTR_ERR(ext_vbus);
+ ext_vbus = NULL;
+ }
+
+ ext_id = extcon_get_edev_by_phandle(&dev->dev, 1);
+ if (IS_ERR(ext_id)) {
+ if (PTR_ERR(ext_id) != -ENODEV)
+ return PTR_ERR(ext_id);
+ ext_id = NULL;
+ }
+
+ hsotg->extcon_vbus.extcon = ext_vbus;
+ if (ext_vbus) {
+ hsotg->extcon_vbus.nb.notifier_call = dwc2_extcon_vbus_notifier;
+ retval = devm_extcon_register_notifier(&dev->dev, ext_vbus,
+ EXTCON_USB,
+ &hsotg->extcon_vbus.nb);
+ if (retval < 0) {
+ dev_err(&dev->dev, "register VBUS notifier failed\n");
+ return retval;
+ }
+ hsotg->extcon_vbus.state = extcon_get_state(ext_vbus,
+ EXTCON_USB);
+ }
+
+ hsotg->extcon_id.extcon = ext_id;
+ if (ext_id) {
+ hsotg->extcon_id.nb.notifier_call = dwc2_extcon_id_notifier;
+ retval = devm_extcon_register_notifier(&dev->dev, ext_id,
+ EXTCON_USB_HOST,
+ &hsotg->extcon_id.nb);
+ if (retval < 0) {
+ dev_err(&dev->dev, "register ID notifier failed\n");
+ return retval;
+ }
+ hsotg->extcon_id.state = extcon_get_state(ext_id,
+ EXTCON_USB_HOST);
+ }
+
/*
* Reset before dwc2_get_hwparams() then it could get power-on real
* reset value form registers.
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index b97cde76914d..b695745ff974 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -41,6 +41,7 @@ config USB_DWC3_GADGET
config USB_DWC3_DUAL_ROLE
bool "Dual Role mode"
depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3))
+ depends on (EXTCON=y || EXTCON=USB_DWC3)
help
This is the default mode of working of DWC3 controller where
both host and gadget features are enabled.
@@ -62,7 +63,7 @@ config USB_DWC3_OMAP
config USB_DWC3_EXYNOS
tristate "Samsung Exynos Platform"
- depends on ARCH_EXYNOS && OF || COMPILE_TEST
+ depends on (ARCH_EXYNOS || COMPILE_TEST) && OF
default USB_DWC3
help
Recent Exynos5 SoCs ship with one DesignWare Core USB3 IP inside,
@@ -70,7 +71,7 @@ config USB_DWC3_EXYNOS
config USB_DWC3_PCI
tristate "PCIe-based Platforms"
- depends on PCI
+ depends on PCI && ACPI
default USB_DWC3
help
If you're using the DesignWare Core IP with a PCIe, please say
@@ -98,11 +99,26 @@ config USB_DWC3_OF_SIMPLE
config USB_DWC3_ST
tristate "STMicroelectronics Platforms"
- depends on ARCH_STI && OF
+ depends on (ARCH_STI || COMPILE_TEST) && OF
default USB_DWC3
help
STMicroelectronics SoCs with one DesignWare Core USB3 IP
inside (i.e. STiH407).
Say 'Y' or 'M' if you have one such device.
+config USB_DWC3_HISI
+ tristate "Hisilicon Platforms"
+ select USB_DWC3_OTG
+ default USB_DWC3
+ help
+ Support of USB2/3 functionality in hisilicon platforms,
+ Say 'Y' or 'M' here if you have one such device.
+ Use for hisilicon device and it will select USB_DWC3_OTG
+ if Say 'Y' or 'M' here.
+
+config USB_DWC3_OTG
+ bool "Enable DWC3 OTG"
+ default n
+ help
+ Say Y here to enable DWC3 OTG feature.
endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 22420e17d68b..2a3c32a1841e 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -1,9 +1,15 @@
# define_trace.h needs to know how to find our header
CFLAGS_trace.o := -I$(src)
+ccflags-$(CONFIG_USB_DWC3_HISI) += -DDWC3_ENABLE_CSP
+ccflags-$(CONFIG_USB_DWC3_OTG) += -DDWC3_OTG_FORCE_MODE
obj-$(CONFIG_USB_DWC3) += dwc3.o
-dwc3-y := core.o debug.o trace.o
+dwc3-y := core.o
+
+ifneq ($(CONFIG_FTRACE),)
+ dwc3-y += trace.o
+endif
ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
dwc3-y += host.o
@@ -13,6 +19,10 @@ ifneq ($(filter y,$(CONFIG_USB_DWC3_GADGET) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
dwc3-y += gadget.o ep0.o
endif
+ifneq ($(CONFIG_USB_DWC3_DUAL_ROLE),)
+ dwc3-y += drd.o
+endif
+
ifneq ($(CONFIG_USB_DWC3_ULPI),)
dwc3-y += ulpi.o
endif
@@ -21,6 +31,8 @@ ifneq ($(CONFIG_DEBUG_FS),)
dwc3-y += debugfs.o
endif
+dwc3-$(CONFIG_USB_DWC3_OTG) += dwc3-otg.o
+
##
# Platform-specific glue layers go here
#
@@ -39,3 +51,4 @@ obj-$(CONFIG_USB_DWC3_PCI) += dwc3-pci.o
obj-$(CONFIG_USB_DWC3_KEYSTONE) += dwc3-keystone.o
obj-$(CONFIG_USB_DWC3_OF_SIMPLE) += dwc3-of-simple.o
obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o
+obj-$(CONFIG_USB_DWC3_HISI) += dwc3-hisi.o dwc3-hi3660.o \ No newline at end of file
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index fea446900cad..e5cb920d1957 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -44,7 +44,7 @@
#include "core.h"
#include "gadget.h"
#include "io.h"
-
+#include "dwc3-otg.h"
#include "debug.h"
#define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */
@@ -87,6 +87,8 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
mode = USB_DR_MODE_HOST;
else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
mode = USB_DR_MODE_PERIPHERAL;
+ else if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE))
+ mode = USB_DR_MODE_OTG;
}
if (mode != dwc->dr_mode) {
@@ -100,7 +102,10 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
return 0;
}
-void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
+static void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
+static int dwc3_event_buffers_setup(struct dwc3 *dwc);
+
+void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
{
u32 reg;
@@ -110,6 +115,71 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
}
+#ifndef CONFIG_USB_DWC3_HISI
+static void __dwc3_set_mode(struct work_struct *work)
+{
+ struct dwc3 *dwc = work_to_dwc(work);
+ unsigned long flags;
+ int ret;
+
+ if (!dwc->desired_dr_role)
+ return;
+
+ if (dwc->desired_dr_role == dwc->current_dr_role)
+ return;
+
+ if (dwc->dr_mode != USB_DR_MODE_OTG)
+ return;
+
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ dwc3_host_exit(dwc);
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ dwc3_gadget_exit(dwc);
+ dwc3_event_buffers_cleanup(dwc);
+ break;
+ default:
+ break;
+ }
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ dwc3_set_prtcap(dwc, dwc->desired_dr_role);
+
+ dwc->current_dr_role = dwc->desired_dr_role;
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ switch (dwc->desired_dr_role) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ ret = dwc3_host_init(dwc);
+ if (ret)
+ dev_err(dwc->dev, "failed to initialize host\n");
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ dwc3_event_buffers_setup(dwc);
+ ret = dwc3_gadget_init(dwc);
+ if (ret)
+ dev_err(dwc->dev, "failed to initialize peripheral\n");
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
+void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->desired_dr_role = mode;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ queue_work(system_power_efficient_wq, &dwc->drd_work);
+}
+
u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
{
struct dwc3 *dwc = dep->dwc;
@@ -169,33 +239,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
return -ETIMEDOUT;
}
-/**
- * dwc3_soft_reset - Issue soft reset
- * @dwc: Pointer to our controller context structure
- */
-static int dwc3_soft_reset(struct dwc3 *dwc)
-{
- unsigned long timeout;
- u32 reg;
-
- timeout = jiffies + msecs_to_jiffies(500);
- dwc3_writel(dwc->regs, DWC3_DCTL, DWC3_DCTL_CSFTRST);
- do {
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (!(reg & DWC3_DCTL_CSFTRST))
- break;
-
- if (time_after(jiffies, timeout)) {
- dev_err(dwc->dev, "Reset Timed Out\n");
- return -ETIMEDOUT;
- }
-
- cpu_relax();
- } while (true);
-
- return 0;
-}
-
/*
* dwc3_frame_length_adjustment - Adjusts frame length if required
* @dwc3: Pointer to our controller context structure
@@ -229,7 +272,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
struct dwc3_event_buffer *evt)
{
- dma_free_coherent(dwc->dev, evt->length, evt->buf, evt->dma);
+ dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma);
}
/**
@@ -251,7 +294,11 @@ static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
evt->dwc = dwc;
evt->length = length;
- evt->buf = dma_alloc_coherent(dwc->dev, length,
+ evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL);
+ if (!evt->cache)
+ return ERR_PTR(-ENOMEM);
+
+ evt->buf = dma_alloc_coherent(dwc->sysdev, length,
&evt->dma, GFP_KERNEL);
if (!evt->buf)
return ERR_PTR(-ENOMEM);
@@ -305,12 +352,12 @@ static int dwc3_event_buffers_setup(struct dwc3 *dwc)
struct dwc3_event_buffer *evt;
evt = dwc->ev_buf;
- dwc3_trace(trace_dwc3_core,
- "Event buf %p dma %08llx length %d\n",
- evt->buf, (unsigned long long) evt->dma,
- evt->length);
-
evt->lpos = 0;
+ #ifdef CONFIG_USB_DWC3_HISI
+ evt->count = 0;
+ evt->flags = 0;
+ memset(evt->buf, 0, evt->length);
+ #endif
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
lower_32_bits(evt->dma));
@@ -370,11 +417,11 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
if (!WARN_ON(dwc->scratchbuf))
return 0;
- scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf,
+ scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf,
dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dwc->dev, scratch_addr)) {
- dev_err(dwc->dev, "failed to map scratch buffer\n");
+ if (dma_mapping_error(dwc->sysdev, scratch_addr)) {
+ dev_err(dwc->sysdev, "failed to map scratch buffer\n");
ret = -EFAULT;
goto err0;
}
@@ -398,7 +445,7 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
return 0;
err1:
- dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+ dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
err0:
@@ -417,7 +464,7 @@ static void dwc3_free_scratch_buffers(struct dwc3 *dwc)
if (!WARN_ON(dwc->scratchbuf))
return;
- dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+ dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
kfree(dwc->scratchbuf);
}
@@ -426,11 +473,7 @@ static void dwc3_core_num_eps(struct dwc3 *dwc)
{
struct dwc3_hwparams *parms = &dwc->hwparams;
- dwc->num_in_eps = DWC3_NUM_IN_EPS(parms);
- dwc->num_out_eps = DWC3_NUM_EPS(parms) - dwc->num_in_eps;
-
- dwc3_trace(trace_dwc3_core, "found %d IN and %d OUT endpoints",
- dwc->num_in_eps, dwc->num_out_eps);
+ dwc->num_eps = DWC3_NUM_EPS(parms);
}
static void dwc3_cache_hwparams(struct dwc3 *dwc)
@@ -464,6 +507,12 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
/*
+ * Make sure UX_EXIT_PX is cleared as that causes issues with some
+ * PHYs. Also, this bit is not supposed to be used in normal operation.
+ */
+ reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
+
+ /*
* Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY
* to '0' during coreConsultant configuration. So default value
* will be '0' when the core is reset. Application needs to set it
@@ -524,13 +573,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
}
/* FALLTHROUGH */
case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
- /* Making sure the interface and PHY are operational */
- ret = dwc3_soft_reset(dwc);
- if (ret)
- return ret;
-
- udelay(1);
-
ret = dwc3_ulpi_init(dwc);
if (ret)
return ret;
@@ -594,19 +636,12 @@ static void dwc3_core_exit(struct dwc3 *dwc)
phy_power_off(dwc->usb3_generic_phy);
}
-/**
- * dwc3_core_init - Low-level initialization of DWC3 Core
- * @dwc: Pointer to our controller context structure
- *
- * Returns 0 on success otherwise negative errno.
- */
-static int dwc3_core_init(struct dwc3 *dwc)
+static bool dwc3_core_is_valid(struct dwc3 *dwc)
{
- u32 hwparams4 = dwc->hwparams.hwparams4;
- u32 reg;
- int ret;
+ u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
+
/* This should read as U3 followed by revision number */
if ((reg & DWC3_GSNPSID_MASK) == 0x55330000) {
/* Detected DWC_usb3 IP */
@@ -616,36 +651,16 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
dwc->revision |= DWC3_REVISION_IS_DWC31;
} else {
- dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
- ret = -ENODEV;
- goto err0;
+ return false;
}
- /*
- * Write Linux Version Code to our GUID register so it's easy to figure
- * out which kernel version a bug was found.
- */
- dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
-
- /* Handle USB2.0-only core configuration */
- if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
- DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
- if (dwc->maximum_speed == USB_SPEED_SUPER)
- dwc->maximum_speed = USB_SPEED_HIGH;
- }
-
- /* issue device SoftReset too */
- ret = dwc3_soft_reset(dwc);
- if (ret)
- goto err0;
-
- ret = dwc3_core_soft_reset(dwc);
- if (ret)
- goto err0;
+ return true;
+}
- ret = dwc3_phy_setup(dwc);
- if (ret)
- goto err0;
+static void dwc3_core_setup_global_control(struct dwc3 *dwc)
+{
+ u32 hwparams4 = dwc->hwparams.hwparams4;
+ u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
@@ -683,13 +698,13 @@ static int dwc3_core_init(struct dwc3 *dwc)
reg |= DWC3_GCTL_GBLHIBERNATIONEN;
break;
default:
- dwc3_trace(trace_dwc3_core, "No power optimization available\n");
+ /* nothing */
+ break;
}
/* check if current dwc3 is on simulation board */
if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
- dwc3_trace(trace_dwc3_core,
- "running on FPGA platform\n");
+ dev_info(dwc->dev, "Running with FPGA optmizations\n");
dwc->is_fpga = true;
}
@@ -712,9 +727,55 @@ static int dwc3_core_init(struct dwc3 *dwc)
*/
if (dwc->revision < DWC3_REVISION_190A)
reg |= DWC3_GCTL_U2RSTECN;
-
+ #ifdef DWC3_OTG_FORCE_MODE
+ /*
+ * if ID status is detected by third module, default device mode.
+ */
+ reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
+ reg |= DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_DEVICE);
+ #endif
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+}
+
+/**
+ * dwc3_core_init - Low-level initialization of DWC3 Core
+ * @dwc: Pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int dwc3_core_init(struct dwc3 *dwc)
+{
+ u32 reg;
+ int ret;
+
+ if (!dwc3_core_is_valid(dwc)) {
+ dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
+ ret = -ENODEV;
+ goto err0;
+ }
+ /*
+ * Write Linux Version Code to our GUID register so it's easy to figure
+ * out which kernel version a bug was found.
+ */
+ dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
+
+ /* Handle USB2.0-only core configuration */
+ if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
+ DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
+ if (dwc->maximum_speed == USB_SPEED_SUPER)
+ dwc->maximum_speed = USB_SPEED_HIGH;
+ }
+
+ ret = dwc3_core_soft_reset(dwc);
+ if (ret)
+ goto err0;
+
+ ret = dwc3_phy_setup(dwc);
+ if (ret)
+ goto err0;
+
+ dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
ret = dwc3_setup_scratch_buffers(dwc);
@@ -740,21 +801,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
goto err4;
}
- switch (dwc->dr_mode) {
- case USB_DR_MODE_PERIPHERAL:
- dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
- break;
- case USB_DR_MODE_HOST:
- dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
- break;
- case USB_DR_MODE_OTG:
- dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
- break;
- default:
- dev_warn(dwc->dev, "Unsupported mode %d\n", dwc->dr_mode);
- break;
- }
-
/*
* ENDXFER polling is available on version 3.10a and later of
* the DWC_usb3 controller. It is NOT available in the
@@ -766,6 +812,16 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
}
+ /*
+ * Enable hardware control of sending remote wakeup in HS when
+ * the device is in the L1 state.
+ */
+ if (dwc->revision >= DWC3_REVISION_290A) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
return 0;
err4:
@@ -862,6 +918,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -870,6 +927,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
}
break;
case USB_DR_MODE_HOST:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
ret = dwc3_host_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -878,19 +936,38 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
}
break;
case USB_DR_MODE_OTG:
- ret = dwc3_host_init(dwc);
+ #ifndef CONFIG_USB_DWC3_HISI
+ INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
+ ret = dwc3_drd_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize dual-role\n");
+ return ret;
+ }
+ #else
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+
+ ret = dwc3_otg_init(dwc);
+ if (ret) {
+ dev_err(dev, "failed to initialize otg\n");
+ return ret;
+ }
+
+ ret = dwc3_host_init(dwc);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ dwc3_otg_exit(dwc);
return ret;
}
ret = dwc3_gadget_init(dwc);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to initialize gadget\n");
+ dev_err(dev, "failed to initialize gadget\n");
+ dwc3_host_exit(dwc);
+ dwc3_otg_exit(dwc);
return ret;
}
+ #endif
break;
default:
dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
@@ -910,8 +987,8 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
dwc3_host_exit(dwc);
break;
case USB_DR_MODE_OTG:
- dwc3_host_exit(dwc);
- dwc3_gadget_exit(dwc);
+ dwc3_drd_exit(dwc);
+ dwc3_otg_exit(dwc);
break;
default:
/* do nothing */
@@ -919,57 +996,13 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
}
}
-#define DWC3_ALIGN_MASK (16 - 1)
-
-static int dwc3_probe(struct platform_device *pdev)
+static void dwc3_get_properties(struct dwc3 *dwc)
{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct dwc3 *dwc;
+ struct device *dev = dwc->dev;
u8 lpm_nyet_threshold;
u8 tx_de_emphasis;
u8 hird_threshold;
- int ret;
-
- void __iomem *regs;
- void *mem;
-
- mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1);
- dwc->mem = mem;
- dwc->dev = dev;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "missing memory resource\n");
- return -ENODEV;
- }
-
- dwc->xhci_resources[0].start = res->start;
- dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
- DWC3_XHCI_REGS_END;
- dwc->xhci_resources[0].flags = res->flags;
- dwc->xhci_resources[0].name = res->name;
-
- res->start += DWC3_GLOBALS_REGS_START;
-
- /*
- * Request memory region but exclude xHCI regs,
- * since it will be requested by the xhci-plat driver.
- */
- regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(regs)) {
- ret = PTR_ERR(regs);
- goto err0;
- }
-
- dwc->regs = regs;
- dwc->regs_size = resource_size(res);
-
/* default to highest possible threshold */
lpm_nyet_threshold = 0xff;
@@ -986,6 +1019,13 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->dr_mode = usb_get_dr_mode(dev);
dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
+ dwc->sysdev_is_parent = device_property_read_bool(dev,
+ "linux,sysdev_is_parent");
+ if (dwc->sysdev_is_parent)
+ dwc->sysdev = dwc->dev->parent;
+ else
+ dwc->sysdev = dwc->dev;
+
dwc->has_lpm_erratum = device_property_read_bool(dev,
"snps,has-lpm-erratum");
device_property_read_u8(dev, "snps,lpm-nyet-threshold",
@@ -1041,6 +1081,112 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->hird_threshold = hird_threshold
| (dwc->is_utmi_l1_suspend << 4);
+ dwc->imod_interval = 0;
+}
+
+/* check whether the core supports IMOD */
+bool dwc3_has_imod(struct dwc3 *dwc)
+{
+ return ((dwc3_is_usb3(dwc) &&
+ dwc->revision >= DWC3_REVISION_300A) ||
+ (dwc3_is_usb31(dwc) &&
+ dwc->revision >= DWC3_USB31_REVISION_120A));
+}
+
+static void dwc3_check_params(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+
+ /* Check for proper value of imod_interval */
+ if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
+ dev_warn(dwc->dev, "Interrupt moderation not supported\n");
+ dwc->imod_interval = 0;
+ }
+
+ /*
+ * Workaround for STAR 9000961433 which affects only version
+ * 3.00a of the DWC_usb3 core. This prevents the controller
+ * interrupt from being masked while handling events. IMOD
+ * allows us to work around this issue. Enable it for the
+ * affected version.
+ */
+ if (!dwc->imod_interval &&
+ (dwc->revision == DWC3_REVISION_300A))
+ dwc->imod_interval = 1;
+
+ /* Check the maximum_speed parameter */
+ switch (dwc->maximum_speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
+ break;
+ default:
+ dev_err(dev, "invalid maximum_speed parameter %d\n",
+ dwc->maximum_speed);
+ /* fall through */
+ case USB_SPEED_UNKNOWN:
+ /* default to superspeed */
+ dwc->maximum_speed = USB_SPEED_SUPER;
+
+ /*
+ * default to superspeed plus if we are capable.
+ */
+ if (dwc3_is_usb31(dwc) &&
+ (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
+ DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
+ dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
+
+ break;
+ }
+}
+
+static int dwc3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct dwc3 *dwc;
+
+ int ret;
+
+ void __iomem *regs;
+
+ dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return -ENOMEM;
+
+ dwc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "missing memory resource\n");
+ return -ENODEV;
+ }
+
+ dwc->xhci_resources[0].start = res->start;
+ dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
+ DWC3_XHCI_REGS_END;
+ dwc->xhci_resources[0].flags = res->flags;
+ dwc->xhci_resources[0].name = res->name;
+
+ res->start += DWC3_GLOBALS_REGS_START;
+
+ /*
+ * Request memory region but exclude xHCI regs,
+ * since it will be requested by the xhci-plat driver.
+ */
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ goto err0;
+ }
+
+ dwc->regs = regs;
+ dwc->regs_size = resource_size(res);
+
+ dwc3_get_properties(dwc);
+
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
@@ -1050,12 +1196,6 @@ static int dwc3_probe(struct platform_device *pdev)
spin_lock_init(&dwc->lock);
- if (!dev->dma_mask) {
- dev->dma_mask = dev->parent->dma_mask;
- dev->dma_parms = dev->parent->dma_parms;
- dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
- }
-
pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
@@ -1087,32 +1227,7 @@ static int dwc3_probe(struct platform_device *pdev)
goto err4;
}
- /* Check the maximum_speed parameter */
- switch (dwc->maximum_speed) {
- case USB_SPEED_LOW:
- case USB_SPEED_FULL:
- case USB_SPEED_HIGH:
- case USB_SPEED_SUPER:
- case USB_SPEED_SUPER_PLUS:
- break;
- default:
- dev_err(dev, "invalid maximum_speed parameter %d\n",
- dwc->maximum_speed);
- /* fall through */
- case USB_SPEED_UNKNOWN:
- /* default to superspeed */
- dwc->maximum_speed = USB_SPEED_SUPER;
-
- /*
- * default to superspeed plus if we are capable.
- */
- if (dwc3_is_usb31(dwc) &&
- (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
- DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
- dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
-
- break;
- }
+ dwc3_check_params(dwc);
ret = dwc3_core_init_mode(dwc);
if (ret)
@@ -1233,8 +1348,10 @@ static int dwc3_runtime_checks(struct dwc3 *dwc)
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
case USB_DR_MODE_OTG:
+#ifndef CONFIG_USB_DWC3_HISI
if (dwc->connected)
return -EBUSY;
+#endif
break;
case USB_DR_MODE_HOST:
default:
@@ -1259,6 +1376,7 @@ static int dwc3_runtime_suspend(struct device *dev)
device_init_wakeup(dev, true);
+ pm_runtime_put(dev);
return 0;
}
@@ -1285,7 +1403,7 @@ static int dwc3_runtime_resume(struct device *dev)
}
pm_runtime_mark_last_busy(dev);
- pm_runtime_put(dev);
+ pm_runtime_get(dev);
return 0;
}
@@ -1350,9 +1468,34 @@ static int dwc3_resume(struct device *dev)
static const struct dev_pm_ops dwc3_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
- dwc3_runtime_idle)
+ dwc3_runtime_idle)
};
+int dwc3_resume_device(struct dwc3 *dwc)
+{
+ int status;
+
+ pr_info("[dwc3_resume_device] +\n");
+ status = dwc3_runtime_resume(dwc->dev);
+ if (status < 0) {
+ pr_err("dwc3_runtime_resume err, status:%d\n", status);
+ }
+ pr_info("[dwc3_resume_device] -\n");
+ return status;
+}
+
+void dwc3_suspend_device(struct dwc3 *dwc)
+{
+ int status;
+
+ pr_info("[dwc3_suspend_device] +\n");
+ status = dwc3_runtime_suspend(dwc->dev);
+ if (status < 0) {
+ pr_err("dwc3_runtime_suspend err, status:%d\n", status);
+ }
+ pr_info("[dwc3_suspend_device] -\n");
+}
+
#ifdef CONFIG_OF
static const struct of_device_id of_dwc3_match[] = {
{
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 884c43714456..fba5ed56ab89 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -23,9 +23,12 @@
#include <linux/spinlock.h>
#include <linux/ioport.h>
#include <linux/list.h>
+#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -37,8 +40,9 @@
#define DWC3_MSG_MAX 500
/* Global constants */
-#define DWC3_ZLP_BUF_SIZE 1024 /* size of a superspeed bulk */
-#define DWC3_EP0_BOUNCE_SIZE 512
+#define DWC3_PULL_UP_TIMEOUT 500 /* ms */
+#define DWC3_BOUNCE_SIZE 1024 /* size of a superspeed bulk */
+#define DWC3_EP0_SETUP_SIZE 512
#define DWC3_ENDPOINTS_NUM 32
#define DWC3_XHCI_RESOURCES_NUM 2
@@ -63,6 +67,7 @@
#define DWC3_DEVICE_EVENT_OVERFLOW 11
#define DWC3_GEVNTCOUNT_MASK 0xfffc
+#define DWC3_GEVNTCOUNT_EHB BIT(31)
#define DWC3_GSNPSID_MASK 0xffff0000
#define DWC3_GSNPSREV_MASK 0xffff
@@ -112,20 +117,20 @@
#define DWC3_VER_NUMBER 0xc1a0
#define DWC3_VER_TYPE 0xc1a4
-#define DWC3_GUSB2PHYCFG(n) (0xc200 + (n * 0x04))
-#define DWC3_GUSB2I2CCTL(n) (0xc240 + (n * 0x04))
+#define DWC3_GUSB2PHYCFG(n) (0xc200 + ((n) * 0x04))
+#define DWC3_GUSB2I2CCTL(n) (0xc240 + ((n) * 0x04))
-#define DWC3_GUSB2PHYACC(n) (0xc280 + (n * 0x04))
+#define DWC3_GUSB2PHYACC(n) (0xc280 + ((n) * 0x04))
-#define DWC3_GUSB3PIPECTL(n) (0xc2c0 + (n * 0x04))
+#define DWC3_GUSB3PIPECTL(n) (0xc2c0 + ((n) * 0x04))
-#define DWC3_GTXFIFOSIZ(n) (0xc300 + (n * 0x04))
-#define DWC3_GRXFIFOSIZ(n) (0xc380 + (n * 0x04))
+#define DWC3_GTXFIFOSIZ(n) (0xc300 + ((n) * 0x04))
+#define DWC3_GRXFIFOSIZ(n) (0xc380 + ((n) * 0x04))
-#define DWC3_GEVNTADRLO(n) (0xc400 + (n * 0x10))
-#define DWC3_GEVNTADRHI(n) (0xc404 + (n * 0x10))
-#define DWC3_GEVNTSIZ(n) (0xc408 + (n * 0x10))
-#define DWC3_GEVNTCOUNT(n) (0xc40c + (n * 0x10))
+#define DWC3_GEVNTADRLO(n) (0xc400 + ((n) * 0x10))
+#define DWC3_GEVNTADRHI(n) (0xc404 + ((n) * 0x10))
+#define DWC3_GEVNTSIZ(n) (0xc408 + ((n) * 0x10))
+#define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10))
#define DWC3_GHWPARAMS8 0xc600
#define DWC3_GFLADJ 0xc630
@@ -139,12 +144,14 @@
#define DWC3_DGCMD 0xc714
#define DWC3_DALEPENA 0xc720
-#define DWC3_DEP_BASE(n) (0xc800 + (n * 0x10))
+#define DWC3_DEP_BASE(n) (0xc800 + ((n) * 0x10))
#define DWC3_DEPCMDPAR2 0x00
#define DWC3_DEPCMDPAR1 0x04
#define DWC3_DEPCMDPAR0 0x08
#define DWC3_DEPCMD 0x0c
+#define DWC3_DEV_IMOD(n) (0xca00 + ((n) * 0x4))
+
/* OTG Registers */
#define DWC3_OCFG 0xcc00
#define DWC3_OCTL 0xcc04
@@ -170,11 +177,11 @@
/* Global RX Threshold Configuration Register */
#define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19)
#define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
-#define DWC3_GRXTHRCFG_PKTCNTSEL (1 << 29)
+#define DWC3_GRXTHRCFG_PKTCNTSEL BIT(29)
/* Global Configuration Register */
#define DWC3_GCTL_PWRDNSCALE(n) ((n) << 19)
-#define DWC3_GCTL_U2RSTECN (1 << 16)
+#define DWC3_GCTL_U2RSTECN BIT(16)
#define DWC3_GCTL_RAMCLKSEL(x) (((x) & DWC3_GCTL_CLK_MASK) << 6)
#define DWC3_GCTL_CLK_BUS (0)
#define DWC3_GCTL_CLK_PIPE (1)
@@ -187,21 +194,24 @@
#define DWC3_GCTL_PRTCAP_DEVICE 2
#define DWC3_GCTL_PRTCAP_OTG 3
-#define DWC3_GCTL_CORESOFTRESET (1 << 11)
-#define DWC3_GCTL_SOFITPSYNC (1 << 10)
+#define DWC3_GCTL_CORESOFTRESET BIT(11)
+#define DWC3_GCTL_SOFITPSYNC BIT(10)
#define DWC3_GCTL_SCALEDOWN(n) ((n) << 4)
#define DWC3_GCTL_SCALEDOWN_MASK DWC3_GCTL_SCALEDOWN(3)
-#define DWC3_GCTL_DISSCRAMBLE (1 << 3)
-#define DWC3_GCTL_U2EXIT_LFPS (1 << 2)
-#define DWC3_GCTL_GBLHIBERNATIONEN (1 << 1)
-#define DWC3_GCTL_DSBLCLKGTNG (1 << 0)
+#define DWC3_GCTL_DISSCRAMBLE BIT(3)
+#define DWC3_GCTL_U2EXIT_LFPS BIT(2)
+#define DWC3_GCTL_GBLHIBERNATIONEN BIT(1)
+#define DWC3_GCTL_DSBLCLKGTNG BIT(0)
+
+/* Global User Control 1 Register */
+#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24)
/* Global USB2 PHY Configuration Register */
-#define DWC3_GUSB2PHYCFG_PHYSOFTRST (1 << 31)
-#define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS (1 << 30)
-#define DWC3_GUSB2PHYCFG_SUSPHY (1 << 6)
-#define DWC3_GUSB2PHYCFG_ULPI_UTMI (1 << 4)
-#define DWC3_GUSB2PHYCFG_ENBLSLPM (1 << 8)
+#define DWC3_GUSB2PHYCFG_PHYSOFTRST BIT(31)
+#define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS BIT(30)
+#define DWC3_GUSB2PHYCFG_SUSPHY BIT(6)
+#define DWC3_GUSB2PHYCFG_ULPI_UTMI BIT(4)
+#define DWC3_GUSB2PHYCFG_ENBLSLPM BIT(8)
#define DWC3_GUSB2PHYCFG_PHYIF(n) (n << 3)
#define DWC3_GUSB2PHYCFG_PHYIF_MASK DWC3_GUSB2PHYCFG_PHYIF(1)
#define DWC3_GUSB2PHYCFG_USBTRDTIM(n) (n << 10)
@@ -212,25 +222,26 @@
#define UTMI_PHYIF_8_BIT 0
/* Global USB2 PHY Vendor Control Register */
-#define DWC3_GUSB2PHYACC_NEWREGREQ (1 << 25)
-#define DWC3_GUSB2PHYACC_BUSY (1 << 23)
-#define DWC3_GUSB2PHYACC_WRITE (1 << 22)
+#define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25)
+#define DWC3_GUSB2PHYACC_BUSY BIT(23)
+#define DWC3_GUSB2PHYACC_WRITE BIT(22)
#define DWC3_GUSB2PHYACC_ADDR(n) (n << 16)
#define DWC3_GUSB2PHYACC_EXTEND_ADDR(n) (n << 8)
#define DWC3_GUSB2PHYACC_DATA(n) (n & 0xff)
/* Global USB3 PIPE Control Register */
-#define DWC3_GUSB3PIPECTL_PHYSOFTRST (1 << 31)
-#define DWC3_GUSB3PIPECTL_U2SSINP3OK (1 << 29)
-#define DWC3_GUSB3PIPECTL_DISRXDETINP3 (1 << 28)
-#define DWC3_GUSB3PIPECTL_REQP1P2P3 (1 << 24)
+#define DWC3_GUSB3PIPECTL_PHYSOFTRST BIT(31)
+#define DWC3_GUSB3PIPECTL_U2SSINP3OK BIT(29)
+#define DWC3_GUSB3PIPECTL_DISRXDETINP3 BIT(28)
+#define DWC3_GUSB3PIPECTL_UX_EXIT_PX BIT(27)
+#define DWC3_GUSB3PIPECTL_REQP1P2P3 BIT(24)
#define DWC3_GUSB3PIPECTL_DEP1P2P3(n) ((n) << 19)
#define DWC3_GUSB3PIPECTL_DEP1P2P3_MASK DWC3_GUSB3PIPECTL_DEP1P2P3(7)
#define DWC3_GUSB3PIPECTL_DEP1P2P3_EN DWC3_GUSB3PIPECTL_DEP1P2P3(1)
-#define DWC3_GUSB3PIPECTL_DEPOCHANGE (1 << 18)
-#define DWC3_GUSB3PIPECTL_SUSPHY (1 << 17)
-#define DWC3_GUSB3PIPECTL_LFPSFILT (1 << 9)
-#define DWC3_GUSB3PIPECTL_RX_DETOPOLL (1 << 8)
+#define DWC3_GUSB3PIPECTL_DEPOCHANGE BIT(18)
+#define DWC3_GUSB3PIPECTL_SUSPHY BIT(17)
+#define DWC3_GUSB3PIPECTL_LFPSFILT BIT(9)
+#define DWC3_GUSB3PIPECTL_RX_DETOPOLL BIT(8)
#define DWC3_GUSB3PIPECTL_TX_DEEPH_MASK DWC3_GUSB3PIPECTL_TX_DEEPH(3)
#define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1)
@@ -239,7 +250,7 @@
#define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000)
/* Global Event Size Registers */
-#define DWC3_GEVNTSIZ_INTMASK (1 << 31)
+#define DWC3_GEVNTSIZ_INTMASK BIT(31)
#define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff)
/* Global HWPARAMS0 Register */
@@ -280,18 +291,18 @@
#define DWC3_MAX_HIBER_SCRATCHBUFS 15
/* Global HWPARAMS6 Register */
-#define DWC3_GHWPARAMS6_EN_FPGA (1 << 7)
+#define DWC3_GHWPARAMS6_EN_FPGA BIT(7)
/* Global HWPARAMS7 Register */
#define DWC3_GHWPARAMS7_RAM1_DEPTH(n) ((n) & 0xffff)
#define DWC3_GHWPARAMS7_RAM2_DEPTH(n) (((n) >> 16) & 0xffff)
/* Global Frame Length Adjustment Register */
-#define DWC3_GFLADJ_30MHZ_SDBND_SEL (1 << 7)
+#define DWC3_GFLADJ_30MHZ_SDBND_SEL BIT(7)
#define DWC3_GFLADJ_30MHZ_MASK 0x3f
/* Global User Control Register 2 */
-#define DWC3_GUCTL2_RST_ACTBITLATER (1 << 14)
+#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
/* Device Configuration Register */
#define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
@@ -301,23 +312,23 @@
#define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
#define DWC3_DCFG_SUPERSPEED (4 << 0)
#define DWC3_DCFG_HIGHSPEED (0 << 0)
-#define DWC3_DCFG_FULLSPEED (1 << 0)
+#define DWC3_DCFG_FULLSPEED BIT(0)
#define DWC3_DCFG_LOWSPEED (2 << 0)
#define DWC3_DCFG_NUMP_SHIFT 17
#define DWC3_DCFG_NUMP(n) (((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f)
#define DWC3_DCFG_NUMP_MASK (0x1f << DWC3_DCFG_NUMP_SHIFT)
-#define DWC3_DCFG_LPM_CAP (1 << 22)
+#define DWC3_DCFG_LPM_CAP BIT(22)
/* Device Control Register */
-#define DWC3_DCTL_RUN_STOP (1 << 31)
-#define DWC3_DCTL_CSFTRST (1 << 30)
-#define DWC3_DCTL_LSFTRST (1 << 29)
+#define DWC3_DCTL_RUN_STOP BIT(31)
+#define DWC3_DCTL_CSFTRST BIT(30)
+#define DWC3_DCTL_LSFTRST BIT(29)
#define DWC3_DCTL_HIRD_THRES_MASK (0x1f << 24)
#define DWC3_DCTL_HIRD_THRES(n) ((n) << 24)
-#define DWC3_DCTL_APPL1RES (1 << 23)
+#define DWC3_DCTL_APPL1RES BIT(23)
/* These apply for core versions 1.87a and earlier */
#define DWC3_DCTL_TRGTULST_MASK (0x0f << 17)
@@ -332,15 +343,15 @@
#define DWC3_DCTL_LPM_ERRATA_MASK DWC3_DCTL_LPM_ERRATA(0xf)
#define DWC3_DCTL_LPM_ERRATA(n) ((n) << 20)
-#define DWC3_DCTL_KEEP_CONNECT (1 << 19)
-#define DWC3_DCTL_L1_HIBER_EN (1 << 18)
-#define DWC3_DCTL_CRS (1 << 17)
-#define DWC3_DCTL_CSS (1 << 16)
+#define DWC3_DCTL_KEEP_CONNECT BIT(19)
+#define DWC3_DCTL_L1_HIBER_EN BIT(18)
+#define DWC3_DCTL_CRS BIT(17)
+#define DWC3_DCTL_CSS BIT(16)
-#define DWC3_DCTL_INITU2ENA (1 << 12)
-#define DWC3_DCTL_ACCEPTU2ENA (1 << 11)
-#define DWC3_DCTL_INITU1ENA (1 << 10)
-#define DWC3_DCTL_ACCEPTU1ENA (1 << 9)
+#define DWC3_DCTL_INITU2ENA BIT(12)
+#define DWC3_DCTL_ACCEPTU2ENA BIT(11)
+#define DWC3_DCTL_INITU1ENA BIT(10)
+#define DWC3_DCTL_ACCEPTU1ENA BIT(9)
#define DWC3_DCTL_TSTCTRL_MASK (0xf << 1)
#define DWC3_DCTL_ULSTCHNGREQ_MASK (0x0f << 5)
@@ -355,36 +366,36 @@
#define DWC3_DCTL_ULSTCHNG_LOOPBACK (DWC3_DCTL_ULSTCHNGREQ(11))
/* Device Event Enable Register */
-#define DWC3_DEVTEN_VNDRDEVTSTRCVEDEN (1 << 12)
-#define DWC3_DEVTEN_EVNTOVERFLOWEN (1 << 11)
-#define DWC3_DEVTEN_CMDCMPLTEN (1 << 10)
-#define DWC3_DEVTEN_ERRTICERREN (1 << 9)
-#define DWC3_DEVTEN_SOFEN (1 << 7)
-#define DWC3_DEVTEN_EOPFEN (1 << 6)
-#define DWC3_DEVTEN_HIBERNATIONREQEVTEN (1 << 5)
-#define DWC3_DEVTEN_WKUPEVTEN (1 << 4)
-#define DWC3_DEVTEN_ULSTCNGEN (1 << 3)
-#define DWC3_DEVTEN_CONNECTDONEEN (1 << 2)
-#define DWC3_DEVTEN_USBRSTEN (1 << 1)
-#define DWC3_DEVTEN_DISCONNEVTEN (1 << 0)
+#define DWC3_DEVTEN_VNDRDEVTSTRCVEDEN BIT(12)
+#define DWC3_DEVTEN_EVNTOVERFLOWEN BIT(11)
+#define DWC3_DEVTEN_CMDCMPLTEN BIT(10)
+#define DWC3_DEVTEN_ERRTICERREN BIT(9)
+#define DWC3_DEVTEN_SOFEN BIT(7)
+#define DWC3_DEVTEN_EOPFEN BIT(6)
+#define DWC3_DEVTEN_HIBERNATIONREQEVTEN BIT(5)
+#define DWC3_DEVTEN_WKUPEVTEN BIT(4)
+#define DWC3_DEVTEN_ULSTCNGEN BIT(3)
+#define DWC3_DEVTEN_CONNECTDONEEN BIT(2)
+#define DWC3_DEVTEN_USBRSTEN BIT(1)
+#define DWC3_DEVTEN_DISCONNEVTEN BIT(0)
/* Device Status Register */
-#define DWC3_DSTS_DCNRD (1 << 29)
+#define DWC3_DSTS_DCNRD BIT(29)
/* This applies for core versions 1.87a and earlier */
-#define DWC3_DSTS_PWRUPREQ (1 << 24)
+#define DWC3_DSTS_PWRUPREQ BIT(24)
/* These apply for core versions 1.94a and later */
-#define DWC3_DSTS_RSS (1 << 25)
-#define DWC3_DSTS_SSS (1 << 24)
+#define DWC3_DSTS_RSS BIT(25)
+#define DWC3_DSTS_SSS BIT(24)
-#define DWC3_DSTS_COREIDLE (1 << 23)
-#define DWC3_DSTS_DEVCTRLHLT (1 << 22)
+#define DWC3_DSTS_COREIDLE BIT(23)
+#define DWC3_DSTS_DEVCTRLHLT BIT(22)
#define DWC3_DSTS_USBLNKST_MASK (0x0f << 18)
#define DWC3_DSTS_USBLNKST(n) (((n) & DWC3_DSTS_USBLNKST_MASK) >> 18)
-#define DWC3_DSTS_RXFIFOEMPTY (1 << 17)
+#define DWC3_DSTS_RXFIFOEMPTY BIT(17)
#define DWC3_DSTS_SOFFN_MASK (0x3fff << 3)
#define DWC3_DSTS_SOFFN(n) (((n) & DWC3_DSTS_SOFFN_MASK) >> 3)
@@ -394,7 +405,7 @@
#define DWC3_DSTS_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
#define DWC3_DSTS_SUPERSPEED (4 << 0)
#define DWC3_DSTS_HIGHSPEED (0 << 0)
-#define DWC3_DSTS_FULLSPEED (1 << 0)
+#define DWC3_DSTS_FULLSPEED BIT(0)
#define DWC3_DSTS_LOWSPEED (2 << 0)
/* Device Generic Command Register */
@@ -412,26 +423,26 @@
#define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10
#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F)
-#define DWC3_DGCMD_CMDACT (1 << 10)
-#define DWC3_DGCMD_CMDIOC (1 << 8)
+#define DWC3_DGCMD_CMDACT BIT(10)
+#define DWC3_DGCMD_CMDIOC BIT(8)
/* Device Generic Command Parameter Register */
-#define DWC3_DGCMDPAR_FORCE_LINKPM_ACCEPT (1 << 0)
+#define DWC3_DGCMDPAR_FORCE_LINKPM_ACCEPT BIT(0)
#define DWC3_DGCMDPAR_FIFO_NUM(n) ((n) << 0)
#define DWC3_DGCMDPAR_RX_FIFO (0 << 5)
-#define DWC3_DGCMDPAR_TX_FIFO (1 << 5)
+#define DWC3_DGCMDPAR_TX_FIFO BIT(5)
#define DWC3_DGCMDPAR_LOOPBACK_DIS (0 << 0)
-#define DWC3_DGCMDPAR_LOOPBACK_ENA (1 << 0)
+#define DWC3_DGCMDPAR_LOOPBACK_ENA BIT(0)
/* Device Endpoint Command Register */
#define DWC3_DEPCMD_PARAM_SHIFT 16
#define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT)
#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
-#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
-#define DWC3_DEPCMD_CLEARPENDIN (1 << 11)
-#define DWC3_DEPCMD_CMDACT (1 << 10)
-#define DWC3_DEPCMD_CMDIOC (1 << 8)
+#define DWC3_DEPCMD_HIPRI_FORCERM BIT(11)
+#define DWC3_DEPCMD_CLEARPENDIN BIT(11)
+#define DWC3_DEPCMD_CMDACT BIT(10)
+#define DWC3_DEPCMD_CMDIOC BIT(8)
#define DWC3_DEPCMD_DEPSTARTCFG (0x09 << 0)
#define DWC3_DEPCMD_ENDTRANSFER (0x08 << 0)
@@ -446,14 +457,21 @@
#define DWC3_DEPCMD_SETTRANSFRESOURCE (0x02 << 0)
#define DWC3_DEPCMD_SETEPCONFIG (0x01 << 0)
+#define DWC3_DEPCMD_CMD(x) ((x) & 0xf)
+
/* The EP number goes 0..31 so ep0 is always out and ep1 is always in */
-#define DWC3_DALEPENA_EP(n) (1 << n)
+#define DWC3_DALEPENA_EP(n) BIT(n)
#define DWC3_DEPCMD_TYPE_CONTROL 0
#define DWC3_DEPCMD_TYPE_ISOC 1
#define DWC3_DEPCMD_TYPE_BULK 2
#define DWC3_DEPCMD_TYPE_INTR 3
+#define DWC3_DEV_IMOD_COUNT_SHIFT 16
+#define DWC3_DEV_IMOD_COUNT_MASK (0xffff << 16)
+#define DWC3_DEV_IMOD_INTERVAL_SHIFT 0
+#define DWC3_DEV_IMOD_INTERVAL_MASK (0xffff << 0)
+
/* Structures */
struct dwc3_trb;
@@ -461,6 +479,7 @@ struct dwc3_trb;
/**
* struct dwc3_event_buffer - Software event buffer representation
* @buf: _THE_ buffer
+ * @cache: The buffer cache used in the threaded interrupt
* @length: size of this buffer
* @lpos: event offset
* @count: cache of last read event count register
@@ -470,6 +489,7 @@ struct dwc3_trb;
*/
struct dwc3_event_buffer {
void *buf;
+ void *cache;
unsigned length;
unsigned int lpos;
unsigned int count;
@@ -482,8 +502,8 @@ struct dwc3_event_buffer {
struct dwc3 *dwc;
};
-#define DWC3_EP_FLAG_STALLED (1 << 0)
-#define DWC3_EP_FLAG_WEDGED (1 << 1)
+#define DWC3_EP_FLAG_STALLED BIT(0)
+#define DWC3_EP_FLAG_WEDGED BIT(1)
#define DWC3_EP_DIRECTION_TX true
#define DWC3_EP_DIRECTION_RX false
@@ -495,6 +515,7 @@ struct dwc3_event_buffer {
* @endpoint: usb endpoint
* @pending_list: list of pending requests for this endpoint
* @started_list: list of started requests on this endpoint
+ * @wait_end_transfer: wait_queue_head_t for waiting on End Transfer complete
* @lock: spinlock for endpoint request queue traversal
* @regs: pointer to first endpoint register
* @trb_pool: array of transaction buffers
@@ -520,25 +541,28 @@ struct dwc3_ep {
struct list_head pending_list;
struct list_head started_list;
+ wait_queue_head_t wait_end_transfer;
+
spinlock_t lock;
void __iomem *regs;
struct dwc3_trb *trb_pool;
dma_addr_t trb_pool_dma;
- const struct usb_ss_ep_comp_descriptor *comp_desc;
struct dwc3 *dwc;
u32 saved_state;
unsigned flags;
-#define DWC3_EP_ENABLED (1 << 0)
-#define DWC3_EP_STALL (1 << 1)
-#define DWC3_EP_WEDGE (1 << 2)
-#define DWC3_EP_BUSY (1 << 4)
-#define DWC3_EP_PENDING_REQUEST (1 << 5)
-#define DWC3_EP_MISSED_ISOC (1 << 6)
+#define DWC3_EP_ENABLED BIT(0)
+#define DWC3_EP_STALL BIT(1)
+#define DWC3_EP_WEDGE BIT(2)
+#define DWC3_EP_BUSY BIT(4)
+#define DWC3_EP_PENDING_REQUEST BIT(5)
+#define DWC3_EP_MISSED_ISOC BIT(6)
+#define DWC3_EP_END_TRANSFER_PENDING BIT(7)
+#define DWC3_EP_TRANSFER_STARTED BIT(8)
/* This last one is specific to EP0 */
-#define DWC3_EP0_DIR_IN (1 << 31)
+#define DWC3_EP0_DIR_IN BIT(31)
/*
* IMPORTANT: we *know* we have 256 TRBs in our @trb_pool, so we will
@@ -616,13 +640,13 @@ enum dwc3_link_state {
#define DWC3_TRB_STS_XFER_IN_PROG 4
/* TRB Control */
-#define DWC3_TRB_CTRL_HWO (1 << 0)
-#define DWC3_TRB_CTRL_LST (1 << 1)
-#define DWC3_TRB_CTRL_CHN (1 << 2)
-#define DWC3_TRB_CTRL_CSP (1 << 3)
+#define DWC3_TRB_CTRL_HWO BIT(0)
+#define DWC3_TRB_CTRL_LST BIT(1)
+#define DWC3_TRB_CTRL_CHN BIT(2)
+#define DWC3_TRB_CTRL_CSP BIT(3)
#define DWC3_TRB_CTRL_TRBCTL(n) (((n) & 0x3f) << 4)
-#define DWC3_TRB_CTRL_ISP_IMI (1 << 10)
-#define DWC3_TRB_CTRL_IOC (1 << 11)
+#define DWC3_TRB_CTRL_ISP_IMI BIT(10)
+#define DWC3_TRB_CTRL_IOC BIT(11)
#define DWC3_TRB_CTRL_SID_SOFN(n) (((n) & 0xffff) << 14)
#define DWC3_TRBCTL_TYPE(n) ((n) & (0x3f << 4))
@@ -699,10 +723,11 @@ struct dwc3_hwparams {
* @dep: struct dwc3_ep owning this request
* @sg: pointer to first incomplete sg
* @num_pending_sgs: counter to pending sgs
- * @first_trb_index: index to first trb used by this request
+ * @remaining: amount of data remaining
* @epnum: endpoint number to which this request refers
* @trb: pointer to struct dwc3_trb
* @trb_dma: DMA address of @trb
+ * @unaligned: true for OUT endpoints with length not divisible by maxp
* @direction: IN or OUT direction flag
* @mapped: true when request has been dma-mapped
* @queued: true when request has been queued to HW
@@ -714,14 +739,17 @@ struct dwc3_request {
struct scatterlist *sg;
unsigned num_pending_sgs;
- u8 first_trb_index;
+ unsigned remaining;
u8 epnum;
struct dwc3_trb *trb;
dma_addr_t trb_dma;
+ unsigned unaligned:1;
unsigned direction:1;
unsigned mapped:1;
unsigned started:1;
+ unsigned zero:1;
+ unsigned send_zlp:1;
};
/*
@@ -732,18 +760,17 @@ struct dwc3_scratchpad_array {
__le64 dma_adr[DWC3_MAX_HIBER_SCRATCHBUFS];
};
+struct dwc3_otg;
+
/**
* struct dwc3 - representation of our controller
- * @ctrl_req: usb control request which is used for ep0
+ * @drd_work - workqueue used for role swapping
* @ep0_trb: trb which is used for the ctrl_req
- * @ep0_bounce: bounce buffer for ep0
- * @zlp_buf: used when request->zero is set
* @setup_buf: used while precessing STD USB requests
- * @ctrl_req_addr: dma address of ctrl_req
* @ep0_trb: dma address of ep0_trb
* @ep0_usb_req: dummy req used while handling STD USB requests
- * @ep0_bounce_addr: dma address of ep0_bounce
* @scratch_addr: dma address of scratchbuf
+ * @ep0_in_setup: one control transfer is completed and enter setup phase
* @lock: for synchronizing
* @dev: pointer to our struct device
* @xhci: pointer to our xHCI child
@@ -759,6 +786,10 @@ struct dwc3_scratchpad_array {
* @maximum_speed: maximum speed requested (mainly for testing purposes)
* @revision: revision register contents
* @dr_mode: requested mode of operation
+ * @current_dr_role: current role of operation when in dual-role mode
+ * @desired_dr_role: desired role of operation when in dual-role mode
+ * @edev: extcon handle
+ * @edev_nb: extcon notifier
* @hsphy_mode: UTMI phy mode, one of following:
* - USBPHY_INTERFACE_MODE_UTMI
* - USBPHY_INTERFACE_MODE_UTMIW
@@ -774,13 +805,11 @@ struct dwc3_scratchpad_array {
* @u2pel: parameter from Set SEL request.
* @u1sel: parameter from Set SEL request.
* @u1pel: parameter from Set SEL request.
- * @num_out_eps: number of out endpoints
- * @num_in_eps: number of in endpoints
+ * @num_eps: number of endpoints
* @ep0_next_event: hold the next expected event
* @ep0state: state of endpoint zero
* @link_state: link state
* @speed: device speed (super, high, full, low)
- * @mem: points to start of memory which is used for this struct.
* @hwparams: copy of hwparams registers
* @root: debugfs root folder pointer
* @regset: debugfs pointer to regdump file
@@ -794,6 +823,7 @@ struct dwc3_scratchpad_array {
* @ep0_bounced: true when we used bounce buffer
* @ep0_expect_in: true when we expect a DATA IN transfer
* @has_hibernation: true when dwc3 was configured with Hibernation
+ * @sysdev_is_parent: true when dwc3 device has a parent driver
* @has_lpm_erratum: true when core was configured with LPM Erratum. Note that
* there's now way for software to detect this in runtime.
* @is_utmi_l1_suspend: the core asserts output signal
@@ -829,24 +859,26 @@ struct dwc3_scratchpad_array {
* 1 - -3.5dB de-emphasis
* 2 - No de-emphasis
* 3 - Reserved
+ * @imod_interval: set the interrupt moderation interval in 250ns
+ * increments or 0 to disable.
*/
struct dwc3 {
- struct usb_ctrlrequest *ctrl_req;
+ struct work_struct drd_work;
struct dwc3_trb *ep0_trb;
- void *ep0_bounce;
- void *zlp_buf;
+ void *bounce;
void *scratchbuf;
u8 *setup_buf;
- dma_addr_t ctrl_req_addr;
dma_addr_t ep0_trb_addr;
- dma_addr_t ep0_bounce_addr;
+ dma_addr_t bounce_addr;
dma_addr_t scratch_addr;
struct dwc3_request ep0_usb_req;
+ struct completion ep0_in_setup;
/* device lock */
spinlock_t lock;
struct device *dev;
+ struct device *sysdev;
struct platform_device *xhci;
struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM];
@@ -869,6 +901,10 @@ struct dwc3 {
size_t regs_size;
enum usb_dr_mode dr_mode;
+ u32 current_dr_role;
+ u32 desired_dr_role;
+ struct extcon_dev *edev;
+ struct notifier_block edev_nb;
enum usb_phy_interface hsphy_mode;
u32 fladj;
@@ -905,6 +941,7 @@ struct dwc3 {
#define DWC3_REVISION_260A 0x5533260a
#define DWC3_REVISION_270A 0x5533270a
#define DWC3_REVISION_280A 0x5533280a
+#define DWC3_REVISION_290A 0x5533290a
#define DWC3_REVISION_300A 0x5533300a
#define DWC3_REVISION_310A 0x5533310a
@@ -914,6 +951,7 @@ struct dwc3 {
*/
#define DWC3_REVISION_IS_DWC31 0x80000000
#define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_DWC31)
+#define DWC3_USB31_REVISION_120A (0x3132302a | DWC3_REVISION_IS_DWC31)
enum dwc3_ep0_next ep0_next_event;
enum dwc3_ep0_state ep0state;
@@ -927,10 +965,7 @@ struct dwc3 {
u8 speed;
- u8 num_out_eps;
- u8 num_in_eps;
-
- void *mem;
+ u8 num_eps;
struct dwc3_hwparams hwparams;
struct dentry *root;
@@ -941,13 +976,21 @@ struct dwc3 {
u8 lpm_nyet_threshold;
u8 hird_threshold;
+ struct dwc3_otg *dwc_otg;
const char *hsphy_interface;
unsigned connected:1;
unsigned delayed_status:1;
+
+ /* the delayed status may come before notready interrupt,
+ * in this case, don't wait for delayed status
+ */
+ unsigned status_queued:1;
+
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
unsigned has_hibernation:1;
+ unsigned sysdev_is_parent:1;
unsigned has_lpm_erratum:1;
unsigned is_utmi_l1_suspend:1;
unsigned is_fpga:1;
@@ -974,9 +1017,11 @@ struct dwc3 {
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
+
+ u16 imod_interval;
};
-/* -------------------------------------------------------------------------- */
+#define work_to_dwc(w) (container_of((w), struct dwc3, drd_work))
/* -------------------------------------------------------------------------- */
@@ -1020,13 +1065,13 @@ struct dwc3_event_depevt {
u32 status:4;
/* Within XferNotReady */
-#define DEPEVT_STATUS_TRANSFER_ACTIVE (1 << 3)
+#define DEPEVT_STATUS_TRANSFER_ACTIVE BIT(3)
/* Within XferComplete */
-#define DEPEVT_STATUS_BUSERR (1 << 0)
-#define DEPEVT_STATUS_SHORT (1 << 1)
-#define DEPEVT_STATUS_IOC (1 << 2)
-#define DEPEVT_STATUS_LST (1 << 3)
+#define DEPEVT_STATUS_BUSERR BIT(0)
+#define DEPEVT_STATUS_SHORT BIT(1)
+#define DEPEVT_STATUS_IOC BIT(2)
+#define DEPEVT_STATUS_LST BIT(3)
/* Stream event only */
#define DEPEVT_STREAMEVT_FOUND 1
@@ -1035,12 +1080,16 @@ struct dwc3_event_depevt {
/* Control-only Status */
#define DEPEVT_STATUS_CONTROL_DATA 1
#define DEPEVT_STATUS_CONTROL_STATUS 2
+#define DEPEVT_STATUS_CONTROL_PHASE(n) ((n) & 3)
/* In response to Start Transfer */
#define DEPEVT_TRANSFER_NO_RESOURCE 1
#define DEPEVT_TRANSFER_BUS_EXPIRY 2
u32 parameters:16;
+
+/* For Command Complete Events */
+#define DEPEVT_PARAMETER_CMD(n) (((n) & (0xf << 8)) >> 8)
} __packed;
/**
@@ -1128,6 +1177,12 @@ struct dwc3_gadget_ep_cmd_params {
/* prototypes */
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
+void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode);
+/* check whether we are on the DWC_usb3 core */
+static inline bool dwc3_is_usb3(struct dwc3 *dwc)
+{
+ return !(dwc->revision & DWC3_REVISION_IS_DWC31);
+}
/* check whether we are on the DWC_usb31 core */
static inline bool dwc3_is_usb31(struct dwc3 *dwc)
@@ -1135,6 +1190,8 @@ static inline bool dwc3_is_usb31(struct dwc3 *dwc)
return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
}
+bool dwc3_has_imod(struct dwc3 *dwc);
+
#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_host_init(struct dwc3 *dwc);
void dwc3_host_exit(struct dwc3 *dwc);
@@ -1154,6 +1211,8 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
struct dwc3_gadget_ep_cmd_params *params);
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param);
+int dwc3_conndone_notifier_register(struct notifier_block *nb);
+int dwc3_conndone_notifier_unregister(struct notifier_block *nb);
#else
static inline int dwc3_gadget_init(struct dwc3 *dwc)
{ return 0; }
@@ -1173,6 +1232,20 @@ static inline int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
int cmd, u32 param)
{ return 0; }
+static inline int dwc3_conndone_notifier_register(struct notifier_block *nb)
+{ return 0; }
+static inline int dwc3_conndone_notifier_unregister(struct notifier_block *nb)
+{ return 0; }
+#endif
+
+#if IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+int dwc3_drd_init(struct dwc3 *dwc);
+void dwc3_drd_exit(struct dwc3 *dwc);
+#else
+static inline int dwc3_drd_init(struct dwc3 *dwc)
+{ return 0; }
+static inline void dwc3_drd_exit(struct dwc3 *dwc)
+{ }
#endif
/* power management interface */
@@ -1196,6 +1269,9 @@ static inline void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
}
#endif /* !IS_ENABLED(CONFIG_USB_DWC3_HOST) */
+int dwc3_resume_device(struct dwc3 *dwc);
+void dwc3_suspend_device(struct dwc3 *dwc);
+
#if IS_ENABLED(CONFIG_USB_DWC3_ULPI)
int dwc3_ulpi_init(struct dwc3 *dwc);
void dwc3_ulpi_exit(struct dwc3 *dwc);
diff --git a/drivers/usb/dwc3/debug.c b/drivers/usb/dwc3/debug.c
deleted file mode 100644
index 0be6885bc370..000000000000
--- a/drivers/usb/dwc3/debug.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * debug.c - DesignWare USB3 DRD Controller Debug/Trace Support
- *
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
- *
- * Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "debug.h"
-
-void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
-
- trace(&vaf);
-
- va_end(args);
-}
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 33ab2a203c1b..cb2d8d3f7f3d 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -125,6 +125,50 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state)
}
/**
+ * dwc3_trb_type_string - returns TRB type as a string
+ * @type: the type of the TRB
+ */
+static inline const char *dwc3_trb_type_string(unsigned int type)
+{
+ switch (type) {
+ case DWC3_TRBCTL_NORMAL:
+ return "normal";
+ case DWC3_TRBCTL_CONTROL_SETUP:
+ return "setup";
+ case DWC3_TRBCTL_CONTROL_STATUS2:
+ return "status2";
+ case DWC3_TRBCTL_CONTROL_STATUS3:
+ return "status3";
+ case DWC3_TRBCTL_CONTROL_DATA:
+ return "data";
+ case DWC3_TRBCTL_ISOCHRONOUS_FIRST:
+ return "isoc-first";
+ case DWC3_TRBCTL_ISOCHRONOUS:
+ return "isoc";
+ case DWC3_TRBCTL_LINK_TRB:
+ return "link";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
+{
+ switch (state) {
+ case EP0_UNCONNECTED:
+ return "Unconnected";
+ case EP0_SETUP_PHASE:
+ return "Setup Phase";
+ case EP0_DATA_PHASE:
+ return "Data Phase";
+ case EP0_STATUS_PHASE:
+ return "Status Phase";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+/**
* dwc3_gadget_event_string - returns event name
* @event: the event code
*/
@@ -184,10 +228,11 @@ dwc3_gadget_event_string(const struct dwc3_event_devt *event)
* @event: then event code
*/
static inline const char *
-dwc3_ep_event_string(const struct dwc3_event_depevt *event)
+dwc3_ep_event_string(const struct dwc3_event_depevt *event, u32 ep0state)
{
u8 epnum = event->endpoint_number;
static char str[256];
+ size_t len;
int status;
int ret;
@@ -199,6 +244,10 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event)
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
strcat(str, "Transfer Complete");
+ len = strlen(str);
+
+ if (epnum <= 1)
+ sprintf(str + len, " [%s]", dwc3_ep0_state_string(ep0state));
break;
case DWC3_DEPEVT_XFERINPROGRESS:
strcat(str, "Transfer In-Progress");
@@ -207,6 +256,19 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event)
strcat(str, "Transfer Not Ready");
status = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
strcat(str, status ? " (Active)" : " (Not Active)");
+
+ /* Control Endpoints */
+ if (epnum <= 1) {
+ int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status);
+
+ switch (phase) {
+ case DEPEVT_STATUS_CONTROL_DATA:
+ strcat(str, " [Data Phase]");
+ break;
+ case DEPEVT_STATUS_CONTROL_STATUS:
+ strcat(str, " [Status Phase]");
+ }
+ }
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
strcat(str, "FIFO");
@@ -270,14 +332,14 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
}
}
-static inline const char *dwc3_decode_event(u32 event)
+static inline const char *dwc3_decode_event(u32 event, u32 ep0state)
{
const union dwc3_event evt = (union dwc3_event) event;
if (evt.type.is_devspec)
return dwc3_gadget_event_string(&evt.devt);
else
- return dwc3_ep_event_string(&evt.depevt);
+ return dwc3_ep_event_string(&evt.depevt, ep0state);
}
static inline const char *dwc3_ep_cmd_status_string(int status)
@@ -310,7 +372,6 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
}
}
-void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...);
#ifdef CONFIG_DEBUG_FS
extern void dwc3_debugfs_init(struct dwc3 *);
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 31926dda43c9..7be963dd8e3b 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -300,7 +300,7 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
seq_printf(s, "device\n");
break;
case DWC3_GCTL_PRTCAP_OTG:
- seq_printf(s, "OTG\n");
+ seq_printf(s, "otg\n");
break;
default:
seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
@@ -319,7 +319,6 @@ static ssize_t dwc3_mode_write(struct file *file,
{
struct seq_file *s = file->private_data;
struct dwc3 *dwc = s->private;
- unsigned long flags;
u32 mode = 0;
char buf[32];
@@ -327,19 +326,16 @@ static ssize_t dwc3_mode_write(struct file *file,
return -EFAULT;
if (!strncmp(buf, "host", 4))
- mode |= DWC3_GCTL_PRTCAP_HOST;
+ mode = DWC3_GCTL_PRTCAP_HOST;
if (!strncmp(buf, "device", 6))
- mode |= DWC3_GCTL_PRTCAP_DEVICE;
+ mode = DWC3_GCTL_PRTCAP_DEVICE;
if (!strncmp(buf, "otg", 3))
- mode |= DWC3_GCTL_PRTCAP_OTG;
+ mode = DWC3_GCTL_PRTCAP_OTG;
+
+ dwc3_set_mode(dwc, mode);
- if (mode) {
- spin_lock_irqsave(&dwc->lock, flags);
- dwc3_set_mode(dwc, mode);
- spin_unlock_irqrestore(&dwc->lock, flags);
- }
return count;
}
@@ -446,52 +442,7 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
state = DWC3_DSTS_USBLNKST(reg);
spin_unlock_irqrestore(&dwc->lock, flags);
- switch (state) {
- case DWC3_LINK_STATE_U0:
- seq_printf(s, "U0\n");
- break;
- case DWC3_LINK_STATE_U1:
- seq_printf(s, "U1\n");
- break;
- case DWC3_LINK_STATE_U2:
- seq_printf(s, "U2\n");
- break;
- case DWC3_LINK_STATE_U3:
- seq_printf(s, "U3\n");
- break;
- case DWC3_LINK_STATE_SS_DIS:
- seq_printf(s, "SS.Disabled\n");
- break;
- case DWC3_LINK_STATE_RX_DET:
- seq_printf(s, "Rx.Detect\n");
- break;
- case DWC3_LINK_STATE_SS_INACT:
- seq_printf(s, "SS.Inactive\n");
- break;
- case DWC3_LINK_STATE_POLL:
- seq_printf(s, "Poll\n");
- break;
- case DWC3_LINK_STATE_RECOV:
- seq_printf(s, "Recovery\n");
- break;
- case DWC3_LINK_STATE_HRESET:
- seq_printf(s, "HRESET\n");
- break;
- case DWC3_LINK_STATE_CMPLY:
- seq_printf(s, "Compliance\n");
- break;
- case DWC3_LINK_STATE_LPBK:
- seq_printf(s, "Loopback\n");
- break;
- case DWC3_LINK_STATE_RESET:
- seq_printf(s, "Reset\n");
- break;
- case DWC3_LINK_STATE_RESUME:
- seq_printf(s, "Resume\n");
- break;
- default:
- seq_printf(s, "UNKNOWN %d\n", state);
- }
+ seq_printf(s, "%s\n", dwc3_gadget_link_string(state));
return 0;
}
@@ -689,30 +640,6 @@ out:
return 0;
}
-static inline const char *dwc3_trb_type_string(struct dwc3_trb *trb)
-{
- switch (DWC3_TRBCTL_TYPE(trb->ctrl)) {
- case DWC3_TRBCTL_NORMAL:
- return "normal";
- case DWC3_TRBCTL_CONTROL_SETUP:
- return "control-setup";
- case DWC3_TRBCTL_CONTROL_STATUS2:
- return "control-status2";
- case DWC3_TRBCTL_CONTROL_STATUS3:
- return "control-status3";
- case DWC3_TRBCTL_CONTROL_DATA:
- return "control-data";
- case DWC3_TRBCTL_ISOCHRONOUS_FIRST:
- return "isoc-first";
- case DWC3_TRBCTL_ISOCHRONOUS:
- return "isoc";
- case DWC3_TRBCTL_LINK_TRB:
- return "link";
- default:
- return "UNKNOWN";
- }
-}
-
static int dwc3_ep_trb_ring_show(struct seq_file *s, void *unused)
{
struct dwc3_ep *dep = s->private;
@@ -733,10 +660,11 @@ static int dwc3_ep_trb_ring_show(struct seq_file *s, void *unused)
for (i = 0; i < DWC3_TRB_NUM; i++) {
struct dwc3_trb *trb = &dep->trb_pool[i];
+ unsigned int type = DWC3_TRBCTL_TYPE(trb->ctrl);
seq_printf(s, "%08x%08x,%d,%s,%d,%d,%d,%d,%d,%d\n",
trb->bph, trb->bpl, trb->size,
- dwc3_trb_type_string(trb),
+ dwc3_trb_type_string(type),
!!(trb->ctrl & DWC3_TRB_CTRL_IOC),
!!(trb->ctrl & DWC3_TRB_CTRL_ISP_IMI),
!!(trb->ctrl & DWC3_TRB_CTRL_CSP),
@@ -822,19 +750,8 @@ static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
{
int i;
- for (i = 0; i < dwc->num_in_eps; i++) {
- u8 epnum = (i << 1) | 1;
- struct dwc3_ep *dep = dwc->eps[epnum];
-
- if (!dep)
- continue;
-
- dwc3_debugfs_create_endpoint_dir(dep, parent);
- }
-
- for (i = 0; i < dwc->num_out_eps; i++) {
- u8 epnum = (i << 1);
- struct dwc3_ep *dep = dwc->eps[epnum];
+ for (i = 0; i < dwc->num_eps; i++) {
+ struct dwc3_ep *dep = dwc->eps[i];
if (!dep)
continue;
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
new file mode 100644
index 000000000000..2765c51c7ef5
--- /dev/null
+++ b/drivers/usb/dwc3/drd.c
@@ -0,0 +1,85 @@
+/**
+ * drd.c - DesignWare USB3 DRD Controller Dual-role support
+ *
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Roger Quadros <rogerq@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/extcon.h>
+
+#include "debug.h"
+#include "core.h"
+#include "gadget.h"
+
+static void dwc3_drd_update(struct dwc3 *dwc)
+{
+ int id;
+
+ id = extcon_get_state(dwc->edev, EXTCON_USB_HOST);
+ if (id < 0)
+ id = 0;
+
+ dwc3_set_mode(dwc, id ?
+ DWC3_GCTL_PRTCAP_HOST :
+ DWC3_GCTL_PRTCAP_DEVICE);
+}
+
+static int dwc3_drd_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3 *dwc = container_of(nb, struct dwc3, edev_nb);
+
+ dwc3_set_mode(dwc, event ?
+ DWC3_GCTL_PRTCAP_HOST :
+ DWC3_GCTL_PRTCAP_DEVICE);
+
+ return NOTIFY_DONE;
+}
+
+int dwc3_drd_init(struct dwc3 *dwc)
+{
+ int ret;
+
+ if (dwc->dev->of_node) {
+ if (of_property_read_bool(dwc->dev->of_node, "extcon"))
+ dwc->edev = extcon_get_edev_by_phandle(dwc->dev, 0);
+
+ if (IS_ERR(dwc->edev))
+ return PTR_ERR(dwc->edev);
+
+ dwc->edev_nb.notifier_call = dwc3_drd_notifier;
+ ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
+ &dwc->edev_nb);
+ if (ret < 0) {
+ dev_err(dwc->dev, "couldn't register cable notifier\n");
+ return ret;
+ }
+ }
+
+ dwc3_drd_update(dwc);
+
+ return 0;
+}
+
+void dwc3_drd_exit(struct dwc3 *dwc)
+{
+ extcon_unregister_notifier(dwc->edev, EXTCON_USB_HOST,
+ &dwc->edev_nb);
+
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ flush_work(&dwc->drd_work);
+ dwc3_gadget_exit(dwc);
+}
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 9eba51b92f72..98f74ff66120 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/usb/otg.h>
#include <linux/usb/usb_phy_generic.h>
@@ -117,15 +116,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
if (!exynos)
return -ENOMEM;
- /*
- * Right now device-tree probed devices don't get dma_mask set.
- * Since shared usb code relies on it, set it here for now.
- * Once we move to full device tree support this will vanish off.
- */
- ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
platform_set_drvdata(pdev, exynos);
exynos->dev = dev;
@@ -138,10 +128,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
clk_prepare_enable(exynos->clk);
exynos->susp_clk = devm_clk_get(dev, "usbdrd30_susp_clk");
- if (IS_ERR(exynos->susp_clk)) {
- dev_info(dev, "no suspend clk specified\n");
+ if (IS_ERR(exynos->susp_clk))
exynos->susp_clk = NULL;
- }
clk_prepare_enable(exynos->susp_clk);
if (of_device_is_compatible(node, "samsung,exynos7-dwusb3")) {
@@ -159,53 +147,53 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
exynos->vdd33 = devm_regulator_get(dev, "vdd33");
if (IS_ERR(exynos->vdd33)) {
ret = PTR_ERR(exynos->vdd33);
- goto err2;
+ goto vdd33_err;
}
ret = regulator_enable(exynos->vdd33);
if (ret) {
dev_err(dev, "Failed to enable VDD33 supply\n");
- goto err2;
+ goto vdd33_err;
}
exynos->vdd10 = devm_regulator_get(dev, "vdd10");
if (IS_ERR(exynos->vdd10)) {
ret = PTR_ERR(exynos->vdd10);
- goto err3;
+ goto vdd10_err;
}
ret = regulator_enable(exynos->vdd10);
if (ret) {
dev_err(dev, "Failed to enable VDD10 supply\n");
- goto err3;
+ goto vdd10_err;
}
ret = dwc3_exynos_register_phys(exynos);
if (ret) {
dev_err(dev, "couldn't register PHYs\n");
- goto err4;
+ goto phys_err;
}
if (node) {
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to add dwc3 core\n");
- goto err5;
+ goto populate_err;
}
} else {
dev_err(dev, "no device node, failed to add dwc3 core\n");
ret = -ENODEV;
- goto err5;
+ goto populate_err;
}
return 0;
-err5:
+populate_err:
platform_device_unregister(exynos->usb2_phy);
platform_device_unregister(exynos->usb3_phy);
-err4:
+phys_err:
regulator_disable(exynos->vdd10);
-err3:
+vdd10_err:
regulator_disable(exynos->vdd33);
-err2:
+vdd33_err:
clk_disable_unprepare(exynos->axius_clk);
axius_clk_err:
clk_disable_unprepare(exynos->susp_clk);
@@ -300,7 +288,6 @@ static struct platform_driver dwc3_exynos_driver = {
module_platform_driver(dwc3_exynos_driver);
-MODULE_ALIAS("platform:exynos-dwc3");
MODULE_AUTHOR("Anton Tikhomirov <av.tikhomirov@samsung.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 EXYNOS Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-hi3660.c b/drivers/usb/dwc3/dwc3-hi3660.c
new file mode 100644
index 000000000000..d8cdc0f7280b
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-hi3660.c
@@ -0,0 +1,310 @@
+/*
+ * dwc3-hi3660.c
+ *
+ * Copyright: (C) 2008-2018 hisilicon.
+ * Contact: wangbinghui<wangbinghui@hisilicon.com>
+ *
+ * USB vbus for Hisilicon device
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose this file to be licensed under the terms
+ * of the GNU General Public License (GPL) Version 2 or the 2-clause
+ * BSD license listed below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+
+#include "dwc3-hisi.h"
+
+/*lint -e750 -esym(750,*)*/
+/* clk module will round to 228M */
+#define USB3OTG_ACLK_FREQ 229000000
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+#define SCTRL_SCDEEPSLEEPED 0x08
+#define USB_REFCLK_ISO_EN BIT(25)
+#define PCTRL_PERI_CTRL3 0x10
+#define USB_TCXO_EN BIT(1)
+#define PERI_CTRL3_MSK_START (16)
+#define SC_CLK_USB3PHY_3MUX1_SEL BIT(25)
+
+#define SC_SEL_ABB_BACKUP BIT(8)
+#define CLKDIV_MASK_START (16)
+
+#define PERI_CRG_CLKDIV21 0xFC
+
+#define GT_CLK_ABB_BACKUP BIT(22)
+#define PERI_CRG_CLK_DIS5 0x54
+
+#define PMC_PPLL3CTRL0 0x048
+#define PPLL3_FBDIV_START (8)
+#define PPLL3_EN BIT(0)
+#define PPLL3_BP BIT(1)
+#define PPLL3_LOCK BIT(26)
+
+#define PMC_PPLL3CTRL1 0x04C
+#define PPLL3_INT_MOD BIT(24)
+#define GT_CLK_PPLL3 BIT(26)
+
+#define PERI_CRG_CLK_EN5 0x50
+
+#define SC_USB3PHY_ABB_GT_EN BIT(15)
+#define REF_SSP_EN BIT(16)
+/*lint -e750 +esym(750,*)*/
+
+static int usb3_regu_init(struct hisi_dwc3_device *hisi_dwc3)
+{
+ if (hisi_dwc3->is_regu_on != 0) {
+ usb_dbg("ldo already opened!\n");
+ return 0;
+ }
+
+ hisi_dwc3->is_regu_on = 1;
+
+ return 0;
+}
+
+static int usb3_regu_shutdown(struct hisi_dwc3_device *hisi_dwc3)
+{
+ if (hisi_dwc3->is_regu_on == 0) {
+ usb_dbg("regu already closed!\n");
+ return 0;
+ }
+
+ hisi_dwc3->is_regu_on = 0;
+
+ return 0;
+}
+
+static int usb3_clk_init(struct hisi_dwc3_device *hisi_dwc3)
+{
+ int ret;
+ u32 temp;
+ void __iomem *pctrl_base = hisi_dwc3->pctrl_reg_base;
+ void __iomem *pericfg_base = hisi_dwc3->pericfg_reg_base;
+
+ /* set usb aclk 240MHz to improve performance */
+ ret = clk_set_rate(hisi_dwc3->gt_aclk_usb3otg, USB3OTG_ACLK_FREQ);
+ if (ret)
+ usb_err("usb aclk set rate failed\n");
+
+ ret = clk_prepare_enable(hisi_dwc3->gt_aclk_usb3otg);
+ if (ret) {
+ usb_err("clk_prepare_enable gt_aclk_usb3otg failed\n");
+ return ret;
+ }
+
+ /* usb refclk iso enable */
+ writel(USB_REFCLK_ISO_EN, pericfg_base + PERI_CRG_ISODIS);
+
+ /* enable usb_tcxo_en */
+ writel(USB_TCXO_EN | (USB_TCXO_EN << PERI_CTRL3_MSK_START),
+ pctrl_base + PCTRL_PERI_CTRL3);
+
+ /* select usbphy clk from abb */
+ temp = readl(pctrl_base + PCTRL_PERI_CTRL24);
+ temp &= ~SC_CLK_USB3PHY_3MUX1_SEL;
+ writel(temp, pctrl_base + PCTRL_PERI_CTRL24);
+
+ /* open clk gate */
+ writel(GT_CLK_USB3OTG_REF | GT_ACLK_USB3OTG,
+ pericfg_base + PERI_CRG_CLK_EN4);
+
+ ret = clk_prepare_enable(hisi_dwc3->clk);
+ if (ret) {
+ usb_err("clk_prepare_enable clk failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void usb3_clk_shutdown(struct hisi_dwc3_device *hisi_dwc3)
+{
+ u32 temp;
+ void __iomem *pctrl_base = hisi_dwc3->pctrl_reg_base;
+ void __iomem *pericfg_base = hisi_dwc3->pericfg_reg_base;
+
+ writel(GT_CLK_USB3OTG_REF | GT_ACLK_USB3OTG,
+ pericfg_base + PERI_CRG_CLK_DIS4);
+
+ temp = readl(pctrl_base + PCTRL_PERI_CTRL24);
+ temp &= ~SC_CLK_USB3PHY_3MUX1_SEL;
+ writel(temp, pctrl_base + PCTRL_PERI_CTRL24);
+
+ /* disable usb_tcxo_en */
+ writel(0 | (USB_TCXO_EN << PERI_CTRL3_MSK_START),
+ pctrl_base + PCTRL_PERI_CTRL3);
+
+ clk_disable_unprepare(hisi_dwc3->clk);
+ clk_disable_unprepare(hisi_dwc3->gt_aclk_usb3otg);
+
+ msleep(20);
+}
+
+static void dwc3_release(struct hisi_dwc3_device *hisi_dwc3)
+{
+ u32 temp;
+ void __iomem *pericfg_base = hisi_dwc3->pericfg_reg_base;
+ void __iomem *otg_bc_base = hisi_dwc3->otg_bc_reg_base;
+
+ /* dis-reset the module */
+ writel(IP_RST_USB3OTG_MUX | IP_RST_USB3OTG_AHBIF | IP_RST_USB3OTG_32K,
+ pericfg_base + PERI_CRG_RSTDIS4);
+
+ /* reset phy */
+ writel(IP_RST_USB3OTGPHY_POR | IP_RST_USB3OTG,
+ pericfg_base + PERI_CRG_RSTEN4);
+
+ /* enable phy ref clk */
+ temp = readl(otg_bc_base + USBOTG3_CTRL0);
+ temp |= SC_USB3PHY_ABB_GT_EN;
+ writel(temp, otg_bc_base + USBOTG3_CTRL0);
+
+ temp = readl(otg_bc_base + USBOTG3_CTRL7);
+ temp |= REF_SSP_EN;
+ writel(temp, otg_bc_base + USBOTG3_CTRL7);
+
+ /* exit from IDDQ mode */
+ temp = readl(otg_bc_base + USBOTG3_CTRL2);
+ temp &= ~(USBOTG3CTRL2_POWERDOWN_HSP | USBOTG3CTRL2_POWERDOWN_SSP);
+ writel(temp, otg_bc_base + USBOTG3_CTRL2);
+
+ usleep_range(100, 120);
+
+ /* dis-reset phy */
+ writel(IP_RST_USB3OTGPHY_POR, pericfg_base + PERI_CRG_RSTDIS4);
+
+ /* dis-reset controller */
+ writel(IP_RST_USB3OTG, pericfg_base + PERI_CRG_RSTDIS4);
+
+ msleep(20);
+
+ /* fake vbus valid signal */
+ temp = readl(otg_bc_base + USBOTG3_CTRL3);
+ temp |= (USBOTG3_CTRL3_VBUSVLDEXT | USBOTG3_CTRL3_VBUSVLDEXTSEL);
+ writel(temp, otg_bc_base + USBOTG3_CTRL3);
+
+ usleep_range(100, 120);
+}
+
+static void dwc3_reset(struct hisi_dwc3_device *hisi_dwc3)
+{
+ void __iomem *pericfg_base = hisi_dwc3->pericfg_reg_base;
+
+ writel(IP_RST_USB3OTG, pericfg_base + PERI_CRG_RSTEN4);
+ writel(IP_RST_USB3OTGPHY_POR, pericfg_base + PERI_CRG_RSTEN4);
+ writel(IP_RST_USB3OTG_MUX | IP_RST_USB3OTG_AHBIF | IP_RST_USB3OTG_32K,
+ pericfg_base + PERI_CRG_RSTEN4);
+}
+
+static int hi3660_usb3phy_init(struct hisi_dwc3_device *hisi_dwc3)
+{
+ int ret;
+
+ usb_dbg("+\n");
+
+ ret = usb3_regu_init(hisi_dwc3);
+ if (ret)
+ return ret;
+
+ ret = usb3_clk_init(hisi_dwc3);
+ if (ret)
+ return ret;
+
+ dwc3_release(hisi_dwc3);
+ config_femtophy_param(hisi_dwc3);
+
+ set_hisi_dwc3_power_flag(1);
+
+ usb_dbg("-\n");
+
+ return 0;
+}
+
+static int hi3660_usb3phy_shutdown(struct hisi_dwc3_device *hisi_dwc3)
+{
+ int ret;
+
+ usb_dbg("+\n");
+
+ set_hisi_dwc3_power_flag(0);
+
+ dwc3_reset(hisi_dwc3);
+ usb3_clk_shutdown(hisi_dwc3);
+
+ ret = usb3_regu_shutdown(hisi_dwc3);
+ if (ret)
+ return ret;
+
+ usb_dbg("-\n");
+
+ return 0;
+}
+
+static struct usb3_phy_ops hi3660_phy_ops = {
+ .init = hi3660_usb3phy_init,
+ .shutdown = hi3660_usb3phy_shutdown,
+};
+
+static int dwc3_hi3660_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ ret = hisi_dwc3_probe(pdev, &hi3660_phy_ops);
+ if (ret)
+ usb_err("probe failed, ret=[%d]\n", ret);
+
+ return ret;
+}
+
+static int dwc3_hi3660_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ ret = hisi_dwc3_remove(pdev);
+ if (ret)
+ usb_err("hisi_dwc3_remove failed, ret=[%d]\n", ret);
+
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id dwc3_hi3660_match[] = {
+ { .compatible = "hisilicon,hi3660-dwc3" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc3_hi3660_match);
+#else
+#define dwc3_hi3660_match NULL
+#endif
+
+static struct platform_driver dwc3_hi3660_driver = {
+ .probe = dwc3_hi3660_probe,
+ .remove = dwc3_hi3660_remove,
+ .driver = {
+ .name = "usb3-hi3660",
+ .of_match_table = of_match_ptr(dwc3_hi3660_match),
+ .pm = HISI_DWC3_PM_OPS,
+ },
+};
+
+module_platform_driver(dwc3_hi3660_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 HI3660 Glue Layer");
+MODULE_AUTHOR("wangbinghui<wangbinghui@hisilicon.com>");
diff --git a/drivers/usb/dwc3/dwc3-hisi.c b/drivers/usb/dwc3/dwc3-hisi.c
new file mode 100644
index 000000000000..32d7edca5e7b
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-hisi.c
@@ -0,0 +1,1972 @@
+/*
+ * hisi_usb_vbus.c
+ *
+ * Copyright: (C) 2008-2018 hisilicon.
+ * Contact: wangbinghui<wangbinghui@hisilicon.com>
+ *
+ * USB vbus for Hisilicon device
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose this file to be licensed under the terms
+ * of the GNU General Public License (GPL) Version 2 or the 2-clause
+ * BSD license listed below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/usb/ch9.h>
+
+#include "dwc3-hisi.h"
+#include "core.h"
+#include "dwc3-otg.h"
+
+#define ENABLE_USB_TEST_PORT
+
+#define BC_AGAIN_DELAY_TIME 8000 /* ms */
+
+struct hisi_dwc3_device *hisi_dwc3_dev;
+atomic_t hisi_dwc3_power_on = ATOMIC_INIT(0);
+
+void set_hisi_dwc3_power_flag(int val)
+{
+ unsigned long flags;
+ struct dwc3 *dwc = NULL;
+
+ if (dwc_otg_handler && dwc_otg_handler->dwc) {
+ dwc = dwc_otg_handler->dwc;
+ spin_lock_irqsave(&dwc->lock, flags);
+ usb_dbg("get dwc3 lock\n");
+ }
+
+ atomic_set(&hisi_dwc3_power_on, val);
+ usb_dbg("set hisi_dwc3_power_flag %d\n", val);
+
+ if (dwc) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ usb_dbg("put dwc3 lock\n");
+ }
+}
+
+#ifdef ENABLE_USB_TEST_PORT
+
+static ssize_t plugusb_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct hisi_dwc3_device *hisi_dwc3 = platform_get_drvdata(pdev);
+ char *s;
+
+ if (!hisi_dwc3) {
+ usb_err("hisi_dwc3 NULL\n");
+ return scnprintf(buf, PAGE_SIZE, "hisi_dwc3 NULL\n");
+ }
+
+ switch (hisi_dwc3->state) {
+ case USB_STATE_UNKNOWN:
+ s = "USB_STATE_UNKNOWN";
+ break;
+ case USB_STATE_OFF:
+ s = "USB_STATE_OFF";
+ break;
+ case USB_STATE_DEVICE:
+ s = "USB_STATE_DEVICE";
+ break;
+ case USB_STATE_HOST:
+ s = "USB_STATE_HOST";
+ break;
+ default:
+ s = "unknown";
+ break;
+ }
+ return scnprintf(buf, PAGE_SIZE, "current state: %s\n usage: %s\n", s,
+ "echo hoston/hostoff/deviceon/deviceoff > plugusb\n");
+}
+
+static ssize_t plugusb_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ if (!strncmp(buf, "hoston", strlen("hoston")))
+ hisi_usb_otg_event(ID_FALL_EVENT);
+ else if (!strncmp(buf, "hostoff", strlen("hostoff")))
+ hisi_usb_otg_event(ID_RISE_EVENT);
+ else if (!strncmp(buf, "deviceon", strlen("deviceon")))
+ hisi_usb_otg_event(CHARGER_CONNECT_EVENT);
+ else if (!strncmp(buf, "deviceoff", strlen("deviceoff")))
+ hisi_usb_otg_event(CHARGER_DISCONNECT_EVENT);
+ else
+ usb_err("input state is ilegal!\n");
+
+ /* added for show message of plugusb status to com port */
+ pr_err("[USB.plugusb] %s\n", buf);
+
+ return size;
+}
+
+/*lint -save -e750 */
+DEVICE_ATTR(plugusb, (0644), plugusb_show, plugusb_store);
+/*lint -restore */
+
+static const char * const charger_type_array[] = {
+ [CHARGER_TYPE_SDP] = "sdp", /* Standard Downstreame Port */
+ [CHARGER_TYPE_CDP] = "cdp", /* Charging Downstreame Port */
+ [CHARGER_TYPE_DCP] = "dcp", /* Dedicate Charging Port */
+ [CHARGER_TYPE_UNKNOWN] = "unknown", /* non-standard */
+ [CHARGER_TYPE_NONE] = "none", /* not connected */
+ [PLEASE_PROVIDE_POWER] = "provide" /* host mode, provide power */
+};
+
+static enum hisi_charger_type get_charger_type_from_str(const char *buf,
+ size_t size)
+{
+ int i = 0;
+ enum hisi_charger_type ret = CHARGER_TYPE_NONE;
+
+ for (i = 0; i < sizeof(charger_type_array) /
+ sizeof(charger_type_array[0]); i++) {
+ if (!strncmp(buf, charger_type_array[i], size - 1)) {
+ ret = (enum hisi_charger_type)i;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+ssize_t hiusb_do_charger_show(void *dev_data, char *buf, size_t size)
+{
+ struct hisi_dwc3_device *hisi_dwc = (struct hisi_dwc3_device *)dev_data;
+ enum hisi_charger_type charger_type = CHARGER_TYPE_NONE;
+
+ if (!hisi_dwc) {
+ pr_err("platform_get_drvdata return null\n");
+ return scnprintf(buf, size,
+ "platform_get_drvdata return null\n");
+ }
+
+ mutex_lock(&hisi_dwc->lock);
+ charger_type = hisi_dwc->charger_type;
+ mutex_unlock(&hisi_dwc->lock);
+
+ return scnprintf(buf, size, "[(%d):Charger type = %s]\n"
+ "----------------------------------------------------------------\n"
+ "usage: echo {str} > chargertest\n"
+ " sdp: Standard Downstreame Port\n"
+ " cdp: Charging Downstreame Port\n"
+ " dcp: Dedicate Charging Port\n"
+ " unknown: non-standard\n"
+ " none: not connected\n"
+ " provide: host mode, provide power\n"
+ , charger_type, charger_type_array[charger_type]);
+}
+
+int hiusb_get_eyepattern_param(void *dev_data, char *buf, size_t len)
+{
+ struct hisi_dwc3_device *hisi_dwc = (struct hisi_dwc3_device *)dev_data;
+ int ret = 0;
+
+ if (hisi_dwc) {
+ ret = scnprintf(buf, len, "device:0x%x\nhost:0x%x\n",
+ hisi_dwc->eye_diagram_param,
+ hisi_dwc->eye_diagram_host_param);
+ } else {
+ usb_err("hisi_dwc NULL\n");
+ ret = scnprintf(buf, len, "hisi_dwc NULL\n");
+ }
+
+ return ret;
+}
+
+int hiusb_set_eyepattern_param(void *dev_data, const char *buf, size_t size)
+{
+ struct hisi_dwc3_device *hisi_dwc = (struct hisi_dwc3_device *)dev_data;
+ int eye_diagram_param;
+
+ if (!hisi_dwc) {
+ pr_err("seteye: hisi_dwc is null\n");
+ return size;
+ }
+
+ if (sscanf(buf, "%32x", &eye_diagram_param) != 1)
+ return size;
+
+ hisi_dwc->eye_diagram_param = eye_diagram_param;
+ hisi_dwc->eye_diagram_host_param = eye_diagram_param;
+
+ return size;
+}
+
+static void notify_charger_type(struct hisi_dwc3_device *hisi_dwc3);
+ssize_t hiusb_do_charger_store(void *dev_data, const char *buf, size_t size)
+{
+ struct hisi_dwc3_device *hisi_dwc = (struct hisi_dwc3_device *)dev_data;
+ enum hisi_charger_type charger_type =
+ get_charger_type_from_str(buf, size);
+
+ if (!hisi_dwc) {
+ pr_err("platform_get_drvdata return null\n");
+ return size;
+ }
+
+ mutex_lock(&hisi_dwc->lock);
+ hisi_dwc->charger_type = charger_type;
+ notify_charger_type(hisi_dwc);
+ mutex_unlock(&hisi_dwc->lock);
+
+ return size;
+}
+
+#ifdef CONFIG_HISI_DEBUG_FS
+static ssize_t fakecharger_show(void *dev_data, char *buf, size_t size)
+{
+ struct hisi_dwc3_device *hisi_dwc = (struct hisi_dwc3_device *)dev_data;
+
+ if (!hisi_dwc) {
+ pr_err("platform_get_drvdata return null\n");
+ return scnprintf(buf, size,
+ "platform_get_drvdata return null\n");
+ }
+
+ return scnprintf(buf, size, "[fake charger type: %s]\n",
+ hisi_dwc->fake_charger_type == CHARGER_TYPE_NONE ?
+ "not fake" :
+ charger_type_array[hisi_dwc->fake_charger_type]);
+}
+
+static ssize_t fakecharger_store(void *dev_data, const char *buf, size_t size)
+{
+ struct hisi_dwc3_device *hisi_dwc = (struct hisi_dwc3_device *)dev_data;
+ enum hisi_charger_type charger_type =
+ get_charger_type_from_str(buf, size);
+
+ if (!hisi_dwc) {
+ pr_err("platform_get_drvdata return null\n");
+ return size;
+ }
+
+ mutex_lock(&hisi_dwc->lock);
+ hisi_dwc->fake_charger_type = charger_type;
+ mutex_unlock(&hisi_dwc->lock);
+
+ return size;
+}
+#endif
+ssize_t hiusb_do_eventmask_show(void *dev_data, char *buf, size_t size)
+{
+ struct hisi_dwc3_device *hisi_dwc = (struct hisi_dwc3_device *)dev_data;
+
+ if (!hisi_dwc) {
+ pr_err("platform_get_drvdata return null\n");
+ return scnprintf(buf, size,
+ "platform_get_drvdata return null\n");
+ }
+
+ return scnprintf(buf, size, "%d\n", hisi_dwc->eventmask);
+}
+
+ssize_t hiusb_do_eventmask_store(void *dev_data, const char *buf, size_t size)
+{
+ int eventmask;
+ struct hisi_dwc3_device *hisi_dwc = (struct hisi_dwc3_device *)dev_data;
+
+ if (!hisi_dwc) {
+ pr_err("platform_get_drvdata return null\n");
+ return size;
+ }
+
+ if (sscanf(buf, "%1d", &eventmask) != 1)
+ return size;
+
+ hisi_dwc->eventmask = eventmask;
+
+ return size;
+}
+
+static struct device_attribute *hisi_dwc3_attributes[] = {
+ &dev_attr_plugusb,
+ NULL
+};
+
+static int create_attr_file(struct device *dev)
+{
+ struct device_attribute **attrs = hisi_dwc3_attributes;
+ struct device_attribute *attr;
+ struct class *hisi_usb_class;
+ struct device *hisi_usb_dev;
+ int i;
+ int ret = 0;
+
+ usb_dbg("+\n");
+ for (i = 0; attrs[i]; i++) {
+ attr = attrs[i];
+ ret = device_create_file(dev, attr);
+ if (ret) {
+ dev_err(dev, "create attr file error!\n");
+ goto err;
+ }
+ }
+
+ hisi_usb_class = class_create(THIS_MODULE, "hisi_usb_class");
+ if (IS_ERR(hisi_usb_class)) {
+ usb_dbg("create hisi_usb_class error!\n");
+ } else {
+ hisi_usb_dev = device_create(hisi_usb_class, NULL, 0,
+ NULL, "hisi_usb_dev");
+ if (IS_ERR(hisi_usb_dev))
+ usb_dbg("create hisi_usb_dev error!\n");
+ else
+ ret |= sysfs_create_link(&hisi_usb_dev->kobj,
+ &dev->kobj, "interface");
+ }
+ if (ret)
+ usb_dbg("create attr file error!\n");
+
+#ifdef CONFIG_HISI_DEBUG_FS
+ hiusb_debug_quick_register(
+ platform_get_drvdata(to_platform_device(dev)),
+ (hiusb_debug_show_ops)fakecharger_show,
+ (hiusb_debug_store_ops)fakecharger_store);
+ hiusb_debug_init(platform_get_drvdata(to_platform_device(dev)));
+#endif
+
+ usb_dbg("-\n");
+ return 0;
+
+err:
+ for (i-- ; i >= 0; i--) {
+ attr = attrs[i];
+ device_remove_file(dev, attr);
+ }
+
+ return ret;
+}
+
+static void remove_attr_file(struct device *dev)
+{
+ struct device_attribute **attrs = hisi_dwc3_attributes;
+ struct device_attribute *attr;
+
+ while ((attr = *attrs++))
+ device_remove_file(dev, attr);
+}
+#else
+static inline int create_attr_file(struct device *dev)
+{
+ return 0;
+}
+
+static inline void remove_attr_file(struct device *dev) {}
+#endif
+
+static void phy_cr_wait_ack(void __iomem *otg_bc_base)
+{
+ int i = 1000;
+
+ while (1) {
+ if ((readl(otg_bc_base + USB3PHY_CR_STS) &
+ USB3OTG_PHY_CR_ACK) == 1)
+ break;
+ usleep_range(50, 60);
+ if (i-- < 0) {
+ usb_err("wait phy_cr_ack timeout!\n");
+ break;
+ }
+ }
+}
+
+static void phy_cr_set_addr(void __iomem *otg_bc_base, u32 addr)
+{
+ u32 reg;
+
+ /* set addr */
+ reg = USB3OTG_PHY_CR_DATA_IN(addr);
+ writel(reg, otg_bc_base + USB3PHY_CR_CTRL);
+
+ usleep_range(100, 120);
+
+ /* cap addr */
+ reg = readl(otg_bc_base + USB3PHY_CR_CTRL);
+ reg |= USB3OTG_PHY_CR_CAP_ADDR;
+ writel(reg, otg_bc_base + USB3PHY_CR_CTRL);
+
+ phy_cr_wait_ack(otg_bc_base);
+
+ /* clear ctrl reg */
+ writel(0, otg_bc_base + USB3PHY_CR_CTRL);
+}
+
+static u16 phy_cr_read(void __iomem *otg_bc_base, u32 addr)
+{
+ u32 reg;
+ int i = 1000;
+
+ phy_cr_set_addr(otg_bc_base, addr);
+
+ /* read cap */
+ writel(USB3OTG_PHY_CR_READ, otg_bc_base + USB3PHY_CR_CTRL);
+
+ usleep_range(100, 120);
+
+ while (1) {
+ reg = readl(otg_bc_base + USB3PHY_CR_STS);
+ if ((reg & USB3OTG_PHY_CR_ACK) == 1)
+ break;
+ usleep_range(50, 60);
+ if (i-- < 0) {
+ usb_err("wait phy_cr_ack timeout!\n");
+ break;
+ }
+ }
+
+ /* clear ctrl reg */
+ writel(0, otg_bc_base + USB3PHY_CR_CTRL);
+
+ return (u16)USB3OTG_PHY_CR_DATA_OUT(reg);
+}
+
+static void phy_cr_write(void __iomem *otg_bc_base, u32 addr, u32 value)
+{
+ u32 reg;
+
+ phy_cr_set_addr(otg_bc_base, addr);
+
+ reg = USB3OTG_PHY_CR_DATA_IN(value);
+ writel(reg, otg_bc_base + USB3PHY_CR_CTRL);
+
+ /* cap data */
+ reg = readl(otg_bc_base + USB3PHY_CR_CTRL);
+ reg |= USB3OTG_PHY_CR_CAP_DATA;
+ writel(reg, otg_bc_base + USB3PHY_CR_CTRL);
+
+ /* wait ack */
+ phy_cr_wait_ack(otg_bc_base);
+
+ /* clear ctrl reg */
+ writel(0, otg_bc_base + USB3PHY_CR_CTRL);
+
+ reg = USB3OTG_PHY_CR_WRITE;
+ writel(reg, otg_bc_base + USB3PHY_CR_CTRL);
+
+ /* wait ack */
+ phy_cr_wait_ack(otg_bc_base);
+}
+
+void set_usb3_phy_cr_param(u32 addr, u32 value)
+{
+ if (!hisi_dwc3_dev) {
+ pr_err("hisi dwc3 device not ready!\n");
+ return;
+ }
+
+ phy_cr_write(hisi_dwc3_dev->otg_bc_reg_base, addr, value);
+}
+EXPORT_SYMBOL_GPL(set_usb3_phy_cr_param);
+
+void read_usb3_phy_cr_param(u32 addr)
+{
+ if (!hisi_dwc3_dev) {
+ pr_err("hisi dwc3 device not ready!\n");
+ return;
+ }
+
+ usb_dbg("read usb3 phy cr param 0x%x\n",
+ phy_cr_read(hisi_dwc3_dev->otg_bc_reg_base, addr));
+}
+EXPORT_SYMBOL_GPL(read_usb3_phy_cr_param);
+
+void config_femtophy_param(struct hisi_dwc3_device *hisi_dwc)
+{
+ u32 reg;
+ void __iomem *otg_bc_base = hisi_dwc->otg_bc_reg_base;
+
+ if (hisi_dwc->fpga_flag != 0)
+ return;
+
+ /* set high speed phy parameter */
+ if (hisi_dwc->host_flag) {
+ writel(hisi_dwc->eye_diagram_host_param,
+ otg_bc_base + USBOTG3_CTRL4);
+ usb_dbg("set hs phy param 0x%x for host\n",
+ readl(otg_bc_base + USBOTG3_CTRL4));
+ } else {
+ writel(hisi_dwc->eye_diagram_param,
+ otg_bc_base + USBOTG3_CTRL4);
+ usb_dbg("set hs phy param 0x%x for device\n",
+ readl(otg_bc_base + USBOTG3_CTRL4));
+ }
+
+ /* set usb3 phy cr config for usb3.0 */
+
+ if (hisi_dwc->host_flag) {
+ phy_cr_write(otg_bc_base, DWC3_PHY_RX_OVRD_IN_HI,
+ hisi_dwc->usb3_phy_host_cr_param);
+ } else {
+ phy_cr_write(otg_bc_base, DWC3_PHY_RX_OVRD_IN_HI,
+ hisi_dwc->usb3_phy_cr_param);
+ }
+
+ usb_dbg("set ss phy rx equalization 0x%x\n",
+ phy_cr_read(otg_bc_base, DWC3_PHY_RX_OVRD_IN_HI));
+
+ /* enable RX_SCOPE_LFPS_EN for usb3.0 */
+ reg = phy_cr_read(otg_bc_base, DWC3_PHY_RX_SCOPE_VDCC);
+ reg |= RX_SCOPE_LFPS_EN;
+ phy_cr_write(otg_bc_base, DWC3_PHY_RX_SCOPE_VDCC, reg);
+
+ usb_dbg("set ss RX_SCOPE_VDCC 0x%x\n",
+ phy_cr_read(otg_bc_base, DWC3_PHY_RX_SCOPE_VDCC));
+
+ reg = readl(otg_bc_base + USBOTG3_CTRL6);
+ reg &= ~TX_VBOOST_LVL_MASK;
+ reg |= TX_VBOOST_LVL(hisi_dwc->usb3_phy_tx_vboost_lvl);
+ writel(reg, otg_bc_base + USBOTG3_CTRL6);
+ usb_dbg("set ss phy tx vboost lvl 0x%x\n",
+ readl(otg_bc_base + USBOTG3_CTRL6));
+}
+
+int hisi_charger_type_notifier_register(struct notifier_block *nb)
+{
+ if (!hisi_dwc3_dev) {
+ pr_err("hisi dwc3 device not ready!\n");
+ return -EBUSY;
+ }
+ if (!nb)
+ return -EINVAL;
+ return atomic_notifier_chain_register(
+ &hisi_dwc3_dev->charger_type_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(hisi_charger_type_notifier_register);
+
+int hisi_charger_type_notifier_unregister(struct notifier_block *nb)
+{
+ if (!hisi_dwc3_dev) {
+ pr_err("hisi dwc3 device not ready!\n");
+ return -EBUSY;
+ }
+ if (!nb)
+ return -EINVAL;
+ return atomic_notifier_chain_unregister(
+ &hisi_dwc3_dev->charger_type_notifier,
+ nb);
+}
+EXPORT_SYMBOL_GPL(hisi_charger_type_notifier_unregister);
+
+/* BC1.2 Spec:
+ * If a PD detects that D+ is greater than VDAT_REF, it knows that it is
+ * attached to a DCP. It is then required to enable VDP_SRC or pull D+
+ * to VDP_UP through RDP_UP
+ */
+static void disable_vdp_src(struct hisi_dwc3_device *hisi_dwc3)
+{
+ void __iomem *base = hisi_dwc3->otg_bc_reg_base;
+ u32 reg;
+
+ usb_dbg("diaable VDP_SRC\n");
+
+ reg = readl(base + BC_CTRL2);
+ reg &= ~(BC_CTRL2_BC_PHY_VDATARCENB | BC_CTRL2_BC_PHY_VDATDETENB);
+ writel(reg, base + BC_CTRL2);
+
+ reg = readl(base + BC_CTRL0);
+ reg |= BC_CTRL0_BC_SUSPEND_N;
+ writel(reg, base + BC_CTRL0);
+
+ writel((readl(base + BC_CTRL1) & ~BC_CTRL1_BC_MODE), base + BC_CTRL1);
+}
+
+static void enable_vdp_src(struct hisi_dwc3_device *hisi_dwc3)
+{
+ void __iomem *base = hisi_dwc3->otg_bc_reg_base;
+ u32 reg;
+
+ reg = readl(base + BC_CTRL2);
+ reg &= ~BC_CTRL2_BC_PHY_CHRGSEL;
+ reg |= (BC_CTRL2_BC_PHY_VDATARCENB | BC_CTRL2_BC_PHY_VDATDETENB);
+ writel(reg, base + BC_CTRL2);
+}
+
+static enum hisi_charger_type detect_charger_type(struct hisi_dwc3_device
+ *hisi_dwc3)
+{
+ enum hisi_charger_type type = CHARGER_TYPE_NONE;
+ void __iomem *base = hisi_dwc3->otg_bc_reg_base;
+ u32 reg;
+ unsigned long jiffies_expire;
+ int i = 0;
+
+ if (hisi_dwc3->fpga_flag) {
+ usb_dbg("this is fpga platform, charger is SDP\n");
+ return CHARGER_TYPE_SDP;
+ }
+
+ if (hisi_dwc3->fake_charger_type != CHARGER_TYPE_NONE) {
+ usb_dbg("fake type: %d\n", hisi_dwc3->fake_charger_type);
+ return hisi_dwc3->fake_charger_type;
+ }
+
+ writel(BC_CTRL1_BC_MODE, base + BC_CTRL1);
+
+ /* phy suspend */
+ reg = readl(base + BC_CTRL0);
+ reg &= ~BC_CTRL0_BC_SUSPEND_N;
+ writel(reg, base + BC_CTRL0);
+
+ /* enable DCD */
+ reg = readl(base + BC_CTRL2);
+ reg |= BC_CTRL2_BC_PHY_DCDENB;
+ writel(reg, base + BC_CTRL2);
+
+ reg = readl(base + BC_CTRL0);
+ reg |= BC_CTRL0_BC_DMPULLDOWN;
+ writel(reg, base + BC_CTRL0);
+
+ jiffies_expire = jiffies + msecs_to_jiffies(900);
+ msleep(50);
+ while (1) {
+ reg = readl(base + BC_STS0);
+ if ((reg & BC_STS0_BC_PHY_FSVPLUS) == 0) {
+ i++;
+ if (i >= 10)
+ break;
+ } else {
+ i = 0;
+ }
+
+ msleep(20);
+
+ if (time_after(jiffies, jiffies_expire)) {
+ usb_dbg("DCD timeout!\n");
+ type = CHARGER_TYPE_UNKNOWN;
+ break;
+ }
+ }
+
+ reg = readl(base + BC_CTRL0);
+ reg &= ~BC_CTRL0_BC_DMPULLDOWN;
+ writel(reg, base + BC_CTRL0);
+
+ /* disable DCD */
+ reg = readl(base + BC_CTRL2);
+ reg &= ~BC_CTRL2_BC_PHY_DCDENB;
+ writel(reg, base + BC_CTRL2);
+
+ usb_dbg("DCD done\n");
+
+ if (type == CHARGER_TYPE_NONE) {
+ /* enable vdect */
+ reg = readl(base + BC_CTRL2);
+ reg &= ~BC_CTRL2_BC_PHY_CHRGSEL;
+ reg |= (BC_CTRL2_BC_PHY_VDATARCENB |
+ BC_CTRL2_BC_PHY_VDATDETENB);
+ writel(reg, base + BC_CTRL2);
+
+ msleep(20);
+
+ /* we can detect sdp or cdp dcp */
+ reg = readl(base + BC_STS0);
+ if ((reg & BC_STS0_BC_PHY_CHGDET) == 0)
+ type = CHARGER_TYPE_SDP;
+
+ /* disable vdect */
+ reg = readl(base + BC_CTRL2);
+ reg &= ~(BC_CTRL2_BC_PHY_VDATARCENB |
+ BC_CTRL2_BC_PHY_VDATDETENB);
+ writel(reg, base + BC_CTRL2);
+ }
+
+ usb_dbg("Primary Detection done\n");
+
+ if (type == CHARGER_TYPE_NONE) {
+ /* enable vdect */
+ reg = readl(base + BC_CTRL2);
+ reg |= (BC_CTRL2_BC_PHY_VDATARCENB | BC_CTRL2_BC_PHY_VDATDETENB
+ | BC_CTRL2_BC_PHY_CHRGSEL);
+ writel(reg, base + BC_CTRL2);
+
+ msleep(20);
+
+ /* we can detect sdp or cdp dcp */
+ reg = readl(base + BC_STS0);
+ if ((reg & BC_STS0_BC_PHY_CHGDET) == 0)
+ type = CHARGER_TYPE_CDP;
+ else
+ type = CHARGER_TYPE_DCP;
+
+ /* disable vdect */
+ reg = readl(base + BC_CTRL2);
+ reg &= ~(BC_CTRL2_BC_PHY_VDATARCENB | BC_CTRL2_BC_PHY_VDATDETENB
+ | BC_CTRL2_BC_PHY_CHRGSEL);
+ writel(reg, base + BC_CTRL2);
+ }
+
+ usb_dbg("Secondary Detection done\n");
+
+ /* If a PD detects that D+ is greater than VDAT_REF, it knows that it is
+ * attached to a DCP. It is then required to enable VDP_SRC or pull D+
+ * to VDP_UP through RDP_UP
+ */
+ if (type == CHARGER_TYPE_DCP) {
+ usb_dbg("charger is DCP, enable VDP_SRC\n");
+ enable_vdp_src(hisi_dwc3);
+ } else {
+ /* bc_suspend = 1, nomal mode */
+ reg = readl(base + BC_CTRL0);
+ reg |= BC_CTRL0_BC_SUSPEND_N;
+ writel(reg, base + BC_CTRL0);
+
+ msleep(20);
+
+ /* disable BC */
+ writel((readl(base + BC_CTRL1) & ~BC_CTRL1_BC_MODE),
+ base + BC_CTRL1);
+ }
+
+ usb_dbg("type: %d\n", type);
+
+ return type;
+}
+
+enum hisi_charger_type hisi_get_charger_type(void)
+{
+ if (!hisi_dwc3_dev) {
+ pr_err("[%s]hisi_dwc3 not yet probed!\n", __func__);
+ return CHARGER_TYPE_NONE;
+ }
+
+ pr_info("[%s]type: %d\n", __func__, hisi_dwc3_dev->charger_type);
+ return hisi_dwc3_dev->charger_type;
+}
+EXPORT_SYMBOL_GPL(hisi_get_charger_type);
+
+static void notify_charger_type(struct hisi_dwc3_device *hisi_dwc3)
+{
+ atomic_notifier_call_chain(&hisi_dwc3->charger_type_notifier,
+ hisi_dwc3->charger_type, hisi_dwc3);
+}
+
+static void set_vbus_power(struct hisi_dwc3_device *hisi_dwc3,
+ unsigned int is_on)
+{
+ enum hisi_charger_type new;
+
+ if (is_on == 0)
+ new = CHARGER_TYPE_NONE;
+ else
+ new = PLEASE_PROVIDE_POWER;
+ if (hisi_dwc3->charger_type != new) {
+ usb_dbg("set port power %d\n", is_on);
+ hisi_dwc3->charger_type = new;
+ notify_charger_type(hisi_dwc3);
+ }
+}
+
+static void hisi_dwc3_wake_lock(struct hisi_dwc3_device *hisi_dwc3)
+{
+ if (!(hisi_dwc3->ws.active)) {
+ usb_dbg("usb otg wake lock\n");
+ __pm_stay_awake(&hisi_dwc3->ws);
+ }
+}
+
+static void hisi_dwc3_wake_unlock(struct hisi_dwc3_device *hisi_dwc3)
+{
+ if (hisi_dwc3->ws.active) {
+ usb_dbg("usb otg wake unlock\n");
+ __pm_relax(&hisi_dwc3->ws);
+ }
+}
+
+static inline bool enumerate_allowed(struct hisi_dwc3_device *hisi_dwc)
+{
+ /* do not start peripheral if real charger connected */
+ return ((hisi_dwc->charger_type == CHARGER_TYPE_SDP) ||
+ (hisi_dwc->charger_type == CHARGER_TYPE_CDP) ||
+ (hisi_dwc->charger_type == CHARGER_TYPE_UNKNOWN));
+}
+
+static inline bool sleep_allowed(struct hisi_dwc3_device *hisi_dwc)
+{
+ return ((hisi_dwc->charger_type == CHARGER_TYPE_DCP) ||
+ (hisi_dwc->charger_type == CHARGER_TYPE_UNKNOWN));
+}
+
+/*
+ * create event queue
+ * event_queue: event queue handle
+ * count: set the queue max node
+ */
+int event_queue_creat(struct hiusb_event_queue *event_queue, unsigned int count)
+{
+ if (!event_queue) {
+ pr_err(" %s bad argument (0x%p)\n",
+ __func__, event_queue);
+ return -EINVAL;
+ }
+
+ count = (count >= MAX_EVENT_COUNT ? MAX_EVENT_COUNT : count);
+ event_queue->max_event = count;
+ event_queue->num_event = (count >= EVENT_QUEUE_UNIT ?
+ EVENT_QUEUE_UNIT : count);
+
+ event_queue->event = kzalloc(
+ (event_queue->num_event *
+ sizeof(enum otg_dev_event_type)), GFP_KERNEL);
+ if (!event_queue->event) {
+ pr_err(" %s :Can't alloc space:%d!\n",
+ __func__, event_queue->num_event);
+ return -ENOMEM;
+ }
+
+ event_queue->enpos = 0;
+ event_queue->depos = 0;
+ event_queue->overlay = 0;
+ event_queue->overlay_index = 0;
+
+ return 0;
+}
+
+void event_queue_destroy(struct hiusb_event_queue *event_queue)
+{
+ if (!event_queue)
+ return;
+
+ kfree(event_queue->event);
+ event_queue->event = NULL;
+ event_queue->enpos = 0;
+ event_queue->depos = 0;
+ event_queue->num_event = 0;
+ event_queue->max_event = 0;
+ event_queue->overlay = 0;
+ event_queue->overlay_index = 0;
+}
+
+/*
+ * check if the queue is full
+ * return true means full, false is not.
+ */
+int event_queue_isfull(struct hiusb_event_queue *event_queue)
+{
+ if (!event_queue)
+ return -EINVAL;
+
+ return (((event_queue->enpos + 1) % event_queue->num_event) ==
+ (event_queue->depos));
+}
+
+/*
+ * check if the queue is full
+ * return true means empty, false or not.
+ */
+int event_queue_isempty(struct hiusb_event_queue *event_queue)
+{
+ if (!event_queue)
+ return -EINVAL;
+
+ return (event_queue->enpos == event_queue->depos);
+}
+
+static inline void event_queue_set_overlay(
+ struct hiusb_event_queue *event_queue)
+{
+ if (event_queue->overlay)
+ return;
+ event_queue->overlay = 1;
+ event_queue->overlay_index = event_queue->enpos;
+}
+
+static inline void event_queue_clear_overlay(
+ struct hiusb_event_queue *event_queue)
+{
+ event_queue->overlay = 0;
+ event_queue->overlay_index = 0;
+}
+
+/*
+ * put the new event en queue
+ * if the event_queue is full, return -ENOSPC
+ */
+int event_enqueue(struct hiusb_event_queue *event_queue,
+ enum otg_dev_event_type event)
+{
+ /* no need verify argument, isfull will check it */
+ if (event_queue_isfull(event_queue)) {
+ pr_err("event queue full!\n");
+ return -ENOSPC;
+ }
+
+ if (event_queue->overlay) {
+ if (event_queue->overlay_index == event_queue->enpos) {
+ event_queue->enpos = ((event_queue->enpos + 1) %
+ event_queue->num_event);
+ }
+
+ if (event_queue_isempty(event_queue)) {
+ pr_err("overlay and queue isempty? just enqueue!\n");
+ event_queue->overlay_index = (
+ (event_queue->overlay_index + 1) %
+ event_queue->num_event);
+ event_queue->enpos = ((event_queue->enpos + 1) %
+ event_queue->num_event);
+ event_queue->overlay = 0;
+ }
+
+ event_queue->event[event_queue->overlay_index] = event;
+ } else {
+ event_queue->event[event_queue->enpos] = event;
+ event_queue->enpos = ((event_queue->enpos + 1) %
+ event_queue->num_event);
+ }
+
+ return 0;
+}
+
+/*
+ * get event from event_queue
+ * this function never return fail
+ * if the event_queue is empty, return NONE_EVENT
+ */
+enum otg_dev_event_type event_dequeue(struct hiusb_event_queue *event_queue)
+{
+ enum otg_dev_event_type event;
+
+ /* no need verify argument, isempty will check it */
+ if (event_queue_isempty(event_queue))
+ return NONE_EVENT;
+
+ event = event_queue->event[event_queue->depos];
+ event_queue->depos = ((event_queue->depos + 1) %
+ event_queue->num_event);
+
+ return event;
+}
+
+static void handle_event(struct hisi_dwc3_device *hisi_dwc,
+ enum otg_dev_event_type event)
+{
+ int ret = 0;
+
+ usb_err("[handle_event] type: %d\n", event);
+ switch (event) {
+ case CHARGER_CONNECT_EVENT:
+ if (hisi_dwc->state == USB_STATE_DEVICE) {
+ usb_dbg("Already in device mode, do nothing\n");
+ } else if (hisi_dwc->state == USB_STATE_OFF) {
+ hisi_dwc->host_flag = 0;
+
+ /* due to detect charger type, must resume hisi_dwc */
+ ret = pm_runtime_get_sync(&hisi_dwc->pdev->dev);
+ if (ret < 0) {
+ usb_err("resume hisi_dwc failed (ret %d)\n",
+ ret);
+ return;
+ }
+
+ /* detect charger type */
+ hisi_dwc->charger_type = detect_charger_type(hisi_dwc);
+ notify_charger_type(hisi_dwc);
+
+ /* In some cases, DCP is detected as SDP wrongly.
+ * To avoid this, start bc_again delay work to
+ * detect charger type once more.
+ * If later the enum process is executed,
+ * then it's a real SDP, so
+ * the work will be canceled.
+ */
+ if (hisi_dwc->bc_again_flag &&
+ (hisi_dwc->charger_type == CHARGER_TYPE_SDP)) {
+ ret = queue_delayed_work(
+ system_power_efficient_wq,
+ &hisi_dwc->bc_again_work,
+ msecs_to_jiffies(BC_AGAIN_DELAY_TIME));
+ usb_dbg("schedule ret:%d, run bc_again_work %dms later\n",
+ ret, BC_AGAIN_DELAY_TIME);
+ }
+
+ /* do not start peripheral if real charger connected */
+ if (enumerate_allowed(hisi_dwc)) {
+ if (hisi_dwc->fpga_usb_mode_gpio > 0) {
+ gpio_direction_output(
+ hisi_dwc->fpga_usb_mode_gpio,
+ 0);
+ usb_dbg("switch to device mode\n");
+ }
+
+ /* start peripheral */
+ ret = dwc3_otg_work(dwc_otg_handler,
+ DWC3_OTG_EVT_VBUS_SET);
+ if (ret) {
+ pm_runtime_put(&hisi_dwc->pdev->dev);
+ hisi_dwc3_wake_unlock(hisi_dwc);
+ usb_err("start peripheral error\n");
+ return;
+ }
+ } else {
+ usb_dbg("a real charger connected\n");
+ }
+
+ hisi_dwc->state = USB_STATE_DEVICE;
+
+ if (sleep_allowed(hisi_dwc))
+ hisi_dwc3_wake_unlock(hisi_dwc);
+ else
+ hisi_dwc3_wake_lock(hisi_dwc);
+
+ usb_dbg("hisi usb status: OFF -> DEVICE\n");
+ } else if (hisi_dwc->state == USB_STATE_HOST) {
+ usb_dbg("Charger connect interrupt in HOST mode\n");
+ }
+
+ break;
+
+ case CHARGER_DISCONNECT_EVENT:
+ hisi_dwc->need_disable_vdp = 0;
+
+ if (hisi_dwc->state == USB_STATE_OFF) {
+ usb_dbg("Already in off mode, do nothing\n");
+ } else if (hisi_dwc->state == USB_STATE_DEVICE) {
+ if (hisi_dwc->bc_again_flag) {
+ ret = cancel_delayed_work_sync(
+ &hisi_dwc->bc_again_work);
+ usb_dbg("cancel bc_again_work sync:%d\n", ret);
+ }
+
+ /* peripheral not started, if real charger connected */
+ if (enumerate_allowed(hisi_dwc)) {
+ /* stop peripheral */
+ ret = dwc3_otg_work(dwc_otg_handler,
+ DWC3_OTG_EVT_VBUS_CLEAR);
+ if (ret) {
+ usb_err("stop peripheral error\n");
+ return;
+ }
+ } else {
+ usb_dbg("connected is a real charger\n");
+ disable_vdp_src(hisi_dwc);
+ }
+
+ /* usb cable disconnect, notify no charger */
+ hisi_dwc->charger_type = CHARGER_TYPE_NONE;
+ notify_charger_type(hisi_dwc);
+
+ hisi_dwc->state = USB_STATE_OFF;
+ hisi_dwc3_wake_unlock(hisi_dwc);
+ pm_runtime_put(&hisi_dwc->pdev->dev);
+
+ usb_dbg("hisi usb status: DEVICE -> OFF\n");
+ } else if (hisi_dwc->state == USB_STATE_HOST) {
+ usb_dbg("Charger disconnect interrupt in HOST mode\n");
+ }
+
+ break;
+
+ case ID_FALL_EVENT:
+ if (hisi_dwc->state == USB_STATE_OFF) {
+ set_vbus_power(hisi_dwc, 1);
+
+ hisi_dwc->host_flag = 1;
+
+ if (hisi_dwc->fpga_usb_mode_gpio > 0) {
+ gpio_direction_output(
+ hisi_dwc->fpga_usb_mode_gpio,
+ 1);
+ usb_dbg("switch to host mode\n");
+ }
+
+ /* start host */
+ ret = dwc3_otg_work(dwc_otg_handler,
+ DWC3_OTG_EVT_ID_CLEAR);
+ if (ret) {
+ usb_err("start host error\n");
+ set_vbus_power(hisi_dwc, 0);
+ return;
+ }
+
+ hisi_dwc->state = USB_STATE_HOST;
+ hisi_dwc3_wake_lock(hisi_dwc);
+
+ usb_dbg("hisi usb_status: OFF -> HOST\n");
+ } else if (hisi_dwc->state == USB_STATE_DEVICE) {
+ usb_dbg("id fall interrupt in DEVICE mode\n");
+ } else if (hisi_dwc->state == USB_STATE_HOST) {
+ usb_dbg("Already in host mode, do nothing\n");
+ }
+ break;
+ case ID_RISE_EVENT:
+ if (hisi_dwc->state == USB_STATE_HOST) {
+ set_vbus_power(hisi_dwc, 0);
+
+ /* stop host */
+ ret = dwc3_otg_work(dwc_otg_handler,
+ DWC3_OTG_EVT_ID_SET);
+ if (ret) {
+ usb_err("stop host error\n");
+ return;
+ }
+
+ hisi_dwc->state = USB_STATE_OFF;
+ hisi_dwc3_wake_unlock(hisi_dwc);
+
+ usb_dbg("hiusb_status: HOST -> OFF\n");
+ } else if (hisi_dwc->state == USB_STATE_DEVICE) {
+ usb_dbg("id rise interrupt in DEVICE mode\n");
+ } else if (hisi_dwc->state == USB_STATE_OFF) {
+ usb_dbg("Already in host mode, do nothing\n");
+ }
+
+ break;
+ default:
+ usb_dbg("illegal event type!\n");
+ break;
+ }
+}
+
+static void event_work(struct work_struct *work)
+{
+ unsigned long flags;
+ enum otg_dev_event_type event;
+ struct hisi_dwc3_device *hisi_dwc = container_of(work,
+ struct hisi_dwc3_device, event_work);
+
+ mutex_lock(&hisi_dwc->lock);
+
+ usb_dbg("+\n");
+
+ while (!event_queue_isempty(&hisi_dwc->event_queue)) {
+ spin_lock_irqsave(&hisi_dwc->event_lock, flags);
+ event = event_dequeue(&hisi_dwc->event_queue);
+ spin_unlock_irqrestore(&hisi_dwc->event_lock, flags);
+
+ handle_event(hisi_dwc, event);
+ }
+
+ event_queue_clear_overlay(&hisi_dwc->event_queue);
+
+ usb_dbg("-\n");
+ mutex_unlock(&hisi_dwc->lock);
+}
+
+static int event_check(enum otg_dev_event_type last_event,
+ enum otg_dev_event_type new_event)
+{
+ int ret = 0;
+
+ if (last_event == NONE_EVENT)
+ return 1;
+
+ switch (new_event) {
+ case CHARGER_CONNECT_EVENT:
+ if ((last_event == CHARGER_DISCONNECT_EVENT) ||
+ (last_event == ID_RISE_EVENT))
+ ret = 1;
+ break;
+ case CHARGER_DISCONNECT_EVENT:
+ if (last_event == CHARGER_CONNECT_EVENT)
+ ret = 1;
+ break;
+ case ID_FALL_EVENT:
+ if ((last_event == CHARGER_DISCONNECT_EVENT) ||
+ (last_event == ID_RISE_EVENT))
+ ret = 1;
+ break;
+ case ID_RISE_EVENT:
+ if (last_event == ID_FALL_EVENT)
+ ret = 1;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int hisi_usb_otg_event(enum otg_dev_event_type event)
+{
+ int ret = 0;
+#ifdef CONFIG_USB_DWC3_OTG
+ unsigned long flags;
+ struct hisi_dwc3_device *hisi_dwc3 = hisi_dwc3_dev;
+#endif
+ usb_err("%s in:%d\n", __func__, event);
+#ifdef CONFIG_USB_DWC3_OTG
+ usb_err("%s in otg:%d\n", __func__, event);
+
+ if (!hisi_dwc3) {
+ usb_dbg(" %s error:%d\n", __func__, event);
+ return -EBUSY;
+ }
+
+ if (hisi_dwc3->eventmask) {
+ usb_dbg("eventmask enabled, mask all events.\n");
+ return ret;
+ }
+
+ spin_lock_irqsave(&hisi_dwc3->event_lock, flags);
+
+ if (event_check(hisi_dwc3->event, event)) {
+ usb_dbg("event: %d\n", event);
+ hisi_dwc3->event = event;
+
+ if ((event == CHARGER_CONNECT_EVENT) ||
+ (event == CHARGER_DISCONNECT_EVENT))
+ hisi_dwc3_wake_lock(hisi_dwc3);
+
+ if (!event_enqueue(&hisi_dwc3->event_queue, event)) {
+ ret = queue_work(system_power_efficient_wq,
+ &hisi_dwc3->event_work);
+ if (!ret)
+ usb_err("schedule event_work wait:%d]\n",
+ event);
+ } else {
+ usb_err("%s can't enqueue event:%d\n",
+ __func__, event);
+ }
+
+ if ((event == ID_RISE_EVENT) ||
+ (event == CHARGER_DISCONNECT_EVENT))
+ event_queue_set_overlay(&hisi_dwc3->event_queue);
+ }
+ spin_unlock_irqrestore(&hisi_dwc3->event_lock, flags);
+#endif
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_usb_otg_event);
+
+static void bc_again(struct hisi_dwc3_device *hisi_dwc)
+{
+ int ret;
+
+ /*
+ * STEP 1
+ */
+ /* stop peripheral which is started when detected as SDP before */
+ if (enumerate_allowed(hisi_dwc)) {
+ ret = dwc3_otg_work(dwc_otg_handler, DWC3_OTG_EVT_VBUS_CLEAR);
+ if (ret) {
+ usb_err("stop peripheral error\n");
+ return;
+ }
+ }
+
+ /*
+ * STEP 2
+ */
+ hisi_dwc->charger_type = detect_charger_type(hisi_dwc);
+ notify_charger_type(hisi_dwc);
+
+ /*
+ * STEP 3
+ */
+ /* must recheck enumerate_allowed, because charger_type maybe changed,
+ * and enumerate_allowed according to charger_type
+ */
+ if (enumerate_allowed(hisi_dwc)) {
+ /* start peripheral */
+ ret = dwc3_otg_work(dwc_otg_handler,
+ DWC3_OTG_EVT_VBUS_SET);
+ if (ret) {
+ pm_runtime_put(&hisi_dwc->pdev->dev);
+ hisi_dwc3_wake_unlock(hisi_dwc);
+ usb_err("start peripheral error\n");
+ return;
+ }
+ } else {
+ usb_dbg("a real charger connected\n");
+ }
+}
+
+void hisi_usb_otg_bc_again(void)
+{
+ struct hisi_dwc3_device *hisi_dwc = hisi_dwc3_dev;
+
+ usb_dbg("+\n");
+
+ if (!hisi_dwc) {
+ usb_err("No usb module, can't call bc again api\n");
+ return;
+ }
+
+ mutex_lock(&hisi_dwc->lock);
+
+ /* we are here because it's detected as SDP before */
+ if (hisi_dwc->charger_type == CHARGER_TYPE_UNKNOWN) {
+ usb_dbg("charger_type is UNKNOWN, start bc_again_work\n");
+ bc_again(hisi_dwc);
+ }
+
+ mutex_unlock(&hisi_dwc->lock);
+ usb_dbg("-\n");
+}
+EXPORT_SYMBOL_GPL(hisi_usb_otg_bc_again);
+
+static void bc_again_work(struct work_struct *work)
+{
+ struct hisi_dwc3_device *hisi_dwc = container_of(work,
+ struct hisi_dwc3_device, bc_again_work.work);
+
+ usb_dbg("+\n");
+ mutex_lock(&hisi_dwc->lock);
+
+ /* we are here because it's detected as SDP before */
+ if (hisi_dwc->charger_type == CHARGER_TYPE_SDP) {
+ usb_dbg("charger_type is SDP, start %s\n", __func__);
+ bc_again(hisi_dwc);
+ }
+
+ mutex_unlock(&hisi_dwc->lock);
+ usb_dbg("-\n");
+}
+
+static int conndone_notifier_fn(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int ret;
+ struct hisi_dwc3_device *hisi_dwc = container_of(nb,
+ struct hisi_dwc3_device, conndone_nb);
+
+ ret = cancel_delayed_work(&hisi_dwc->bc_again_work);
+ usb_dbg("cancel bc_again_work:%d\n", ret);
+
+ return 0;
+}
+
+/**
+ * get_usb_state() - get current USB cable state.
+ * @hisi_dwc: the instance pointer of struct hisi_dwc3_device
+ *
+ * return current USB cable state according to VBUS status and ID status.
+ */
+static enum hisi_usb_state get_usb_state(struct hisi_dwc3_device *hisi_dwc)
+{
+ if (hisi_dwc->fpga_flag) {
+ usb_dbg("this is fpga platform, usb is device mode\n");
+ return USB_STATE_DEVICE;
+ }
+
+ if (dwc3_otg_id_value(dwc_otg_handler) == 0)
+ return USB_STATE_HOST;
+ else
+ return USB_STATE_OFF;
+}
+
+static void get_phy_param(struct hisi_dwc3_device *hisi_dwc3)
+{
+ struct device *dev = &hisi_dwc3->pdev->dev;
+
+ /* hs phy param for device mode */
+ if (of_property_read_u32(dev->of_node, "eye_diagram_param",
+ &hisi_dwc3->eye_diagram_param)) {
+ usb_dbg("get eye diagram param form dt failed, use default value\n");
+ hisi_dwc3->eye_diagram_param = 0x1c466e3;
+ }
+ usb_dbg("eye diagram param: 0x%x\n", hisi_dwc3->eye_diagram_param);
+
+ /* hs phy param for host mode */
+ if (of_property_read_u32(dev->of_node, "eye_diagram_host_param",
+ &hisi_dwc3->eye_diagram_host_param)) {
+ usb_dbg("get eye diagram host param form dt failed, use default value\n");
+ hisi_dwc3->eye_diagram_host_param = 0x1c466e3;
+ }
+ usb_dbg("eye diagram host param: 0x%x\n",
+ hisi_dwc3->eye_diagram_host_param);
+
+ /* ss phy Rx Equalization */
+ if (of_property_read_u32(dev->of_node, "usb3_phy_cr_param",
+ &hisi_dwc3->usb3_phy_cr_param)) {
+ usb_dbg("get usb3_phy_cr_param form dt failed, use default value\n");
+ hisi_dwc3->usb3_phy_cr_param = (1 << 11) | (3 << 8) | (1 << 7);
+ }
+
+ /* ss phy Rx Equalization for host mode */
+ if (of_property_read_u32(dev->of_node, "usb3_phy_host_cr_param",
+ &hisi_dwc3->usb3_phy_host_cr_param)) {
+ usb_dbg("get usb3_phy_host_cr_param form dt failed, use default value\n");
+ hisi_dwc3->usb3_phy_host_cr_param =
+ (1 << 11) | (1 << 8) | (1 << 7);
+ }
+
+ usb_dbg("usb3_phy_cr_param: 0x%x\n", hisi_dwc3->usb3_phy_cr_param);
+ usb_dbg("usb3_phy_host_cr_param: 0x%x\n",
+ hisi_dwc3->usb3_phy_host_cr_param);
+
+ /* tx_vboost_lvl */
+ if (of_property_read_u32(dev->of_node, "usb3_phy_tx_vboost_lvl",
+ &hisi_dwc3->usb3_phy_tx_vboost_lvl)) {
+ usb_dbg("get usb3_phy_tx_vboost_lvl form dt failed, use default value\n");
+ hisi_dwc3->usb3_phy_tx_vboost_lvl = 5;
+ }
+ usb_dbg("usb3_phy_tx_vboost_lvl: %d\n",
+ hisi_dwc3->usb3_phy_tx_vboost_lvl);
+}
+
+/**
+ * get_resource() - prepare resources
+ * @hisi_dwc3: the instance pointer of struct hisi_dwc3_device
+ *
+ * 1. get registers base address and map registers region.
+ * 2. get regulator handler.
+ */
+static int get_resource(struct hisi_dwc3_device *hisi_dwc3)
+{
+ struct device *dev = &hisi_dwc3->pdev->dev;
+ struct resource *res;
+ struct device_node *np;
+
+ /*
+ * map PERI CRG region
+ */
+ np = of_find_compatible_node(NULL, NULL, "hisilicon,hi3660-crgctrl");
+ if (!np) {
+ dev_err(dev, "get peri cfg node failed!\n");
+ return -EINVAL;
+ }
+ hisi_dwc3->pericfg_reg_base = of_iomap(np, 0);
+ if (!hisi_dwc3->pericfg_reg_base) {
+ dev_err(dev, "iomap pericfg_reg_base failed!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * map PCTRL region
+ */
+ np = of_find_compatible_node(NULL, NULL, "hisilicon,hi3660-pctrl");
+ if (!np) {
+ dev_err(dev, "get pctrl node failed!\n");
+ return -EINVAL;
+ }
+ hisi_dwc3->pctrl_reg_base = of_iomap(np, 0);
+ if (!hisi_dwc3->pctrl_reg_base) {
+ dev_err(dev, "iomap pctrl_reg_base failed!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * map SCTRL region
+ */
+ np = of_find_compatible_node(NULL, NULL, "hisilicon,hi3660-sctrl");
+ if (!np) {
+ dev_err(dev, "get sysctrl node failed!\n");
+ return -EINVAL;
+ }
+ hisi_dwc3->sctrl_reg_base = of_iomap(np, 0);
+ if (!hisi_dwc3->sctrl_reg_base) {
+ dev_err(dev, "iomap sctrl_reg_base failed!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * map PMCTRL region
+ */
+ np = of_find_compatible_node(NULL, NULL, "hisilicon,hi3660-pmctrl");
+ if (!np) {
+ dev_err(dev, "get pmctrl node failed!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * map OTG BC region
+ */
+ res = platform_get_resource(hisi_dwc3->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "missing memory base resource\n");
+ return -EINVAL;
+ }
+
+ hisi_dwc3->otg_bc_reg_base = devm_ioremap_nocache(
+ dev, res->start, resource_size(res));
+ if (IS_ERR_OR_NULL(hisi_dwc3->otg_bc_reg_base)) {
+ dev_err(dev, "ioremap res 0 failed\n");
+ return -ENOMEM;
+ }
+
+ get_phy_param(hisi_dwc3);
+
+ /* get abb clk handler */
+ hisi_dwc3->clk = devm_clk_get(&hisi_dwc3->pdev->dev, "clk_usb3phy_ref");
+ if (IS_ERR_OR_NULL(hisi_dwc3->clk)) {
+ dev_err(dev, "get usb3phy ref clk failed\n");
+ return -EINVAL;
+ }
+
+ /* get h clk handler */
+ hisi_dwc3->gt_aclk_usb3otg = devm_clk_get(
+ &hisi_dwc3->pdev->dev, "aclk_usb3otg");
+ if (IS_ERR_OR_NULL(hisi_dwc3->gt_aclk_usb3otg)) {
+ dev_err(dev, "get aclk_usb3otg failed\n");
+ return -EINVAL;
+ }
+
+ /* judge fpga platform or not, from dts */
+ if (of_property_read_u32(dev->of_node, "fpga_flag",
+ &hisi_dwc3->fpga_flag)) {
+ hisi_dwc3->fpga_flag = 0;
+ }
+ usb_dbg("this is %s platform (fpga flag %d)\n",
+ hisi_dwc3->fpga_flag ? "fpga" : "asic", hisi_dwc3->fpga_flag);
+
+ hisi_dwc3->fpga_usb_mode_gpio = -1;
+
+ if (of_property_read_u32(dev->of_node, "bc_again_flag",
+ &hisi_dwc3->bc_again_flag)) {
+ hisi_dwc3->bc_again_flag = 0;
+ }
+
+ return 0;
+}
+
+static int hisi_dwc3_phy_init(struct hisi_dwc3_device *hisi_dwc)
+{
+ return hisi_dwc->phy_ops->init(hisi_dwc);
+}
+
+static int hisi_dwc3_phy_shutdown(struct hisi_dwc3_device *hisi_dwc)
+{
+ return hisi_dwc->phy_ops->shutdown(hisi_dwc);
+}
+
+int hisi_dwc3_probe(struct platform_device *pdev,
+ struct usb3_phy_ops *phy_ops)
+{
+ int ret;
+ struct hisi_dwc3_device *hisi_dwc;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ enum hisi_usb_state init_state;
+
+ usb_dbg("+\n");
+
+ if (!phy_ops) {
+ usb_err("phy_ops is NULL\n");
+ return -EINVAL;
+ }
+
+ hisi_dwc = devm_kzalloc(dev, sizeof(*hisi_dwc), GFP_KERNEL);
+ if (!hisi_dwc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hisi_dwc);
+ hisi_dwc->pdev = pdev;
+ hisi_dwc->phy_ops = phy_ops;
+
+ hisi_dwc3_dev = hisi_dwc;
+
+ /*
+ * set hisi dwc3 dma mask, it should be 0xffffffff, because the ahb
+ * master of usb can only support 32bit width address.
+ */
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ if (!dev->coherent_dma_mask)
+ dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ /*
+ * get resources from dts.
+ */
+ ret = get_resource(hisi_dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "get resource failed!\n");
+ return ret;
+ }
+
+ if (hisi_dwc->fpga_usb_mode_gpio > 0) {
+ ret = gpio_request(hisi_dwc->fpga_usb_mode_gpio, NULL);
+ if (ret) {
+ /* request gpio failure! */
+ usb_err("request gpio %d failed, ret=[%d]\n",
+ hisi_dwc->fpga_usb_mode_gpio, ret);
+ }
+ }
+
+ /* create sysfs files. */
+ ret = create_attr_file(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "create_attr_file failed!\n");
+ return ret;
+ }
+
+ /* initialize */
+ hisi_dwc->charger_type = CHARGER_TYPE_SDP;
+ hisi_dwc->fake_charger_type = CHARGER_TYPE_NONE;
+ hisi_dwc->event = NONE_EVENT;
+ hisi_dwc->host_flag = 0;
+ hisi_dwc->eventmask = 0;
+ spin_lock_init(&hisi_dwc->event_lock);
+ INIT_WORK(&hisi_dwc->event_work, event_work);
+ mutex_init(&hisi_dwc->lock);
+ wakeup_source_init(&hisi_dwc->ws, "usb_wake_lock");
+ ATOMIC_INIT_NOTIFIER_HEAD(&hisi_dwc->charger_type_notifier);
+ event_queue_creat(&hisi_dwc->event_queue, MAX_EVENT_COUNT);
+ hisi_dwc->disable_vdp_src = disable_vdp_src;
+ hisi_dwc->need_disable_vdp = 0;
+
+ /* power on */
+ hisi_dwc->is_regu_on = 0;
+ ret = hisi_dwc3_phy_init(hisi_dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: hisi_dwc3_phy_init failed!\n",
+ __func__);
+ remove_attr_file(dev);
+ return ret;
+ }
+
+ if (hisi_dwc->bc_again_flag) {
+ INIT_DELAYED_WORK(&hisi_dwc->bc_again_work, bc_again_work);
+ hisi_dwc->conndone_nb.notifier_call = conndone_notifier_fn;
+ ret = dwc3_conndone_notifier_register(&hisi_dwc->conndone_nb);
+ if (ret)
+ usb_err("dwc3_conndone_notifier_register failed\n");
+ }
+
+ if (hisi_dwc->charger_type == CHARGER_TYPE_CDP) {
+ usb_dbg("it needs enable VDP_SRC while detect CDP!\n");
+ hisi_dwc->need_disable_vdp = 1;
+ enable_vdp_src(hisi_dwc);
+ }
+
+ /*
+ * enable runtime pm.
+ */
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+ pm_runtime_forbid(dev);
+
+ /*
+ * probe child deivces
+ */
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ pr_err("%s: register dwc3 failed!\n", __func__);
+ goto err1;
+ }
+
+#ifdef CONFIG_USB_DWC3_OTG
+ /* default device state */
+ hisi_dwc->state = USB_STATE_DEVICE;
+
+ if (sleep_allowed(hisi_dwc))
+ hisi_dwc3_wake_unlock(hisi_dwc);
+ else
+ hisi_dwc3_wake_lock(hisi_dwc);
+
+ if (!enumerate_allowed(hisi_dwc)) {
+ /* stop peripheral */
+ ret = dwc3_otg_work(dwc_otg_handler, DWC3_OTG_EVT_VBUS_CLEAR);
+ if (ret)
+ usb_err("stop peripheral error\n");
+ }
+
+ /* balance the put operation when disconnect */
+ pm_runtime_get(dev);
+
+ hisi_dwc->event = CHARGER_CONNECT_EVENT;
+ init_state = get_usb_state(hisi_dwc);
+ if (init_state == USB_STATE_OFF) {
+ usb_dbg("init state: OFF\n");
+ hisi_usb_otg_event(CHARGER_DISCONNECT_EVENT);
+ } else if (init_state == USB_STATE_HOST) {
+ usb_dbg("init state: HOST\n");
+ hisi_usb_otg_event(CHARGER_DISCONNECT_EVENT);
+ msleep(500);
+ hisi_usb_otg_event(ID_FALL_EVENT);
+ }
+#endif
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_allow(dev);
+
+ usb_dbg("-\n");
+
+ return 0;
+
+err1:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ remove_attr_file(dev);
+
+ return ret;
+}
+
+static int hisi_dwc3_remove_child(struct device *dev, void *unused)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+ return 0;
+}
+
+int hisi_dwc3_remove(struct platform_device *pdev)
+{
+ struct hisi_dwc3_device *hisi_dwc3 = platform_get_drvdata(pdev);
+ int ret;
+
+ if (!hisi_dwc3) {
+ usb_err("hisi_dwc3 NULL\n");
+ return -EBUSY;
+ }
+
+ device_for_each_child(&pdev->dev, NULL, hisi_dwc3_remove_child);
+ pm_runtime_disable(&pdev->dev);
+
+ if (hisi_dwc3->bc_again_flag) {
+ dwc3_conndone_notifier_unregister(&hisi_dwc3->conndone_nb);
+ hisi_dwc3->conndone_nb.notifier_call = NULL;
+ }
+
+ ret = hisi_dwc3_phy_shutdown(hisi_dwc3);
+ if (ret)
+ usb_err("hisi_dwc3_phy_shutdown error\n");
+ hisi_dwc3->phy_ops = NULL;
+
+ event_queue_destroy(&hisi_dwc3->event_queue);
+
+ remove_attr_file(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+static int hisi_dwc3_prepare(struct device *dev)
+{
+ struct hisi_dwc3_device *hisi_dwc = platform_get_drvdata(
+ to_platform_device(dev));
+ int ret = 0;
+
+ if (!hisi_dwc)
+ return -ENODEV;
+
+ mutex_lock(&hisi_dwc->lock);
+
+ switch (hisi_dwc->state) {
+ case USB_STATE_OFF:
+ pr_info("%s: off state.\n", __func__);
+ break;
+ case USB_STATE_DEVICE:
+ pr_info("%s: device state.\n", __func__);
+
+ if (enumerate_allowed(hisi_dwc)) {
+ /* stop peripheral */
+ ret = dwc3_otg_work(dwc_otg_handler,
+ DWC3_OTG_EVT_VBUS_CLEAR);
+ if (ret) {
+ usb_err("stop peripheral error\n");
+ goto error;
+ }
+ } else {
+ usb_dbg("connected is a real charger\n");
+ disable_vdp_src(hisi_dwc);
+ }
+
+ break;
+ case USB_STATE_HOST:
+ usb_err("%s: host mode, should not go to sleep!\n", __func__);
+ ret = -EFAULT;
+ goto error;
+ default:
+ pr_err("%s: ilegal state!\n", __func__);
+ ret = -EFAULT;
+ goto error;
+ }
+
+ return ret;
+error:
+ mutex_unlock(&hisi_dwc->lock);
+ return ret;
+}
+
+static void hisi_dwc3_complete(struct device *dev)
+{
+ struct hisi_dwc3_device *hisi_dwc = platform_get_drvdata(
+ to_platform_device(dev));
+ int ret = 0;
+
+ if (!hisi_dwc) {
+ usb_err("hisi_dwc NULL !\n");
+ return;
+ }
+
+ switch (hisi_dwc->state) {
+ case USB_STATE_OFF:
+ usb_dbg("%s: off state.\n", __func__);
+ break;
+ case USB_STATE_DEVICE:
+ usb_dbg("%s: device state.\n", __func__);
+
+ /* update charger type */
+ hisi_dwc->charger_type = detect_charger_type(hisi_dwc);
+ if (sleep_allowed(hisi_dwc))
+ hisi_dwc3_wake_unlock(hisi_dwc);
+ else
+ hisi_dwc3_wake_lock(hisi_dwc);
+
+ /* do not start peripheral if real charger connected */
+ if (enumerate_allowed(hisi_dwc)) {
+ /* start peripheral */
+ ret = dwc3_otg_work(dwc_otg_handler,
+ DWC3_OTG_EVT_VBUS_SET);
+ if (ret) {
+ usb_err("start peripheral error\n");
+ hisi_dwc->state = USB_STATE_OFF;
+ pm_runtime_put(&hisi_dwc->pdev->dev);
+ goto error;
+ }
+ } else {
+ usb_dbg("a real charger connected\n");
+ }
+
+ break;
+ case USB_STATE_HOST:
+ usb_err("%s: host mode, should not go to sleep!\n", __func__);
+ break;
+ default:
+ usb_err("%s: ilegal state!\n", __func__);
+ break;
+ }
+
+error:
+ mutex_unlock(&hisi_dwc->lock);
+}
+
+static int hisi_dwc3_suspend(struct device *dev)
+{
+ struct hisi_dwc3_device *hisi_dwc3 =
+ platform_get_drvdata(to_platform_device(dev));
+ int ret = 0;
+
+ usb_dbg("+\n");
+
+ if (!hisi_dwc3) {
+ usb_err("hisi_dwc3 NULL\n");
+ return -EBUSY;
+ }
+
+ if (hisi_dwc3->runtime_suspended) {
+ usb_dbg("runtime_suspended\n");
+ } else {
+ ret = hisi_dwc3_phy_shutdown(hisi_dwc3);
+ if (ret)
+ usb_err("hisi_dwc3_phy_shutdown failed\n");
+ }
+
+ usb_dbg("-\n");
+
+ return ret;
+}
+
+static int hisi_dwc3_resume(struct device *dev)
+{
+ struct hisi_dwc3_device *hisi_dwc3 =
+ platform_get_drvdata(to_platform_device(dev));
+ int ret = 0;
+
+ usb_dbg("+\n");
+
+ if (!hisi_dwc3) {
+ usb_err("hisi_dwc3 NULL\n");
+ return -EBUSY;
+ }
+
+ if (hisi_dwc3->runtime_suspended) {
+ usb_dbg("runtime_suspended\n");
+ } else {
+ ret = hisi_dwc3_phy_init(hisi_dwc3);
+ if (ret)
+ usb_err("hisi_dwc3_phy_init failed\n");
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ usb_dbg("-\n");
+
+ return ret;
+}
+#endif
+
+static int hisi_dwc3_runtime_suspend(struct device *dev)
+{
+ int ret;
+ struct hisi_dwc3_device *hisi_dwc3 =
+ platform_get_drvdata(to_platform_device(dev));
+
+ usb_dbg("+\n");
+
+ if (!hisi_dwc3) {
+ usb_err("hisi_dwc3 NULL\n");
+ return -EBUSY;
+ }
+
+ ret = hisi_dwc3_phy_shutdown(hisi_dwc3);
+ if (ret)
+ return ret;
+ hisi_dwc3->runtime_suspended = 1;
+ usb_dbg("-\n");
+
+ return 0;
+}
+
+static int hisi_dwc3_runtime_resume(struct device *dev)
+{
+ int ret = 0;
+ struct hisi_dwc3_device *hisi_dwc3 =
+ platform_get_drvdata(to_platform_device(dev));
+
+ usb_dbg("+\n");
+
+ if (!hisi_dwc3) {
+ usb_err("hisi_dwc3 NULL\n");
+ return -EBUSY;
+ }
+
+ ret = hisi_dwc3_phy_init(hisi_dwc3);
+ if (ret)
+ return ret;
+ hisi_dwc3->runtime_suspended = 0;
+ usb_dbg("-\n");
+
+ return ret;
+}
+
+static int hisi_dwc3_runtime_idle(struct device *dev)
+{
+ int ret;
+
+ usb_dbg("+\n");
+ ret = pm_runtime_autosuspend(dev);
+ if (ret)
+ dev_err(dev, "pm_runtime_autosuspend error\n");
+ usb_dbg("-\n");
+
+ return ret;
+}
+
+const struct dev_pm_ops hisi_dwc3_dev_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .prepare = hisi_dwc3_prepare,
+ .complete = hisi_dwc3_complete,
+ SET_SYSTEM_SLEEP_PM_OPS(hisi_dwc3_suspend, hisi_dwc3_resume)
+#endif
+ SET_RUNTIME_PM_OPS(hisi_dwc3_runtime_suspend, hisi_dwc3_runtime_resume,
+ hisi_dwc3_runtime_idle)
+};
+#endif
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("wangbinghui<wangbinghui@hisilicon.com>");
diff --git a/drivers/usb/dwc3/dwc3-hisi.h b/drivers/usb/dwc3/dwc3-hisi.h
new file mode 100644
index 000000000000..f497baff563a
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-hisi.h
@@ -0,0 +1,293 @@
+/*
+ * hisi_usb_vbus.h
+ *
+ * Copyright: (C) 2008-2018 hisilicon.
+ * Contact: wangbinghui<wangbinghui@hisilicon.com>
+ *
+ * USB vbus for Hisilicon device
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose this file to be licensed under the terms
+ * of the GNU General Public License (GPL) Version 2 or the 2-clause
+ * BSD license listed below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ */
+#ifndef _DWC3_HISI_H_
+#define _DWC3_HISI_H_
+
+#include <linux/pm_wakeup.h>
+#include <linux/clk.h>
+#include <linux/hisi/usb/hisi_usb.h>
+#include <linux/regulator/consumer.h>
+
+#define REG_BASE_PERI_CRG (0xFFF35000)
+#define PERI_CRG_CLK_EN4 (0x40)
+#define PERI_CRG_CLK_DIS4 (0x44)
+#define PERI_CRG_RSTDIS4 (0x94)
+#define PERI_CRG_RSTEN4 (0x90)
+#define PERI_CRG_ISODIS (0x148)
+#define PERI_CRG_ISOSTAT (0x14C)
+#define STCL_ADDR (0xFFF0A214)
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+#define PERI_CRG_ISOSTAT_MODEMSUBSYSISOEN BIT(4)
+#define PERI_CRG_ISODIS_MODEMSUBSYSISOEN BIT(4)
+
+#define PCTRL_PERI_CTRL24 (0x64)
+#define PCTRL_PERI_CTRL48 (0xC54)
+
+#define IP_RST_USB3OTG_MUX BIT(8)
+#define IP_RST_USB3OTG_AHBIF BIT(7)
+#define IP_RST_USB3OTG_32K BIT(6)
+#define IP_RST_USB3OTG BIT(5)
+#define IP_RST_USB3OTGPHY_POR BIT(3)
+
+#define GT_CLK_USB3OTG_REF BIT(0)
+#define GT_ACLK_USB3OTG BIT(1)
+#define GT_CLK_USB3PHY_REF BIT(2)
+
+/*
+ * hisi dwc3 phy registers
+ */
+#define DWC3_PHY_RX_OVRD_IN_HI 0x1006
+#define DWC3_PHY_RX_SCOPE_VDCC 0x1026
+
+/* DWC3_PHY_RX_SCOPE_VDCC */
+#define RX_SCOPE_LFPS_EN BIT(0)
+
+/*
+ * hisi dwc3 otg bc registers
+ */
+#define USBOTG3_CTRL0 0x00
+#define USBOTG3_CTRL1 0x04
+#define USBOTG3_CTRL2 0x08
+#define USBOTG3_CTRL3 0x0C
+#define USBOTG3_CTRL4 0x10
+#define USBOTG3_CTRL5 0x14
+#define USBOTG3_CTRL6 0x18
+#define USBOTG3_CTRL7 0x1C
+#define USBOTG3_STS0 0x20
+#define USBOTG3_STS1 0x24
+#define USBOTG3_STS2 0x28
+#define USBOTG3_STS3 0x2C
+#define BC_CTRL0 0x30
+#define BC_CTRL1 0x34
+#define BC_CTRL2 0x38
+#define BC_STS0 0x3C
+#define RAM_CTRL 0x40
+#define USBOTG3_STS4 0x44
+#define USB3PHY_CTRL 0x48
+#define USB3PHY_STS 0x4C
+#define USB3PHY_CR_STS 0x50
+#define USB3PHY_CR_CTRL 0x54
+#define USB3_RES 0x58
+
+/* USTOTG3_CTRL0 */
+# define USBOTG3CTRL0_SESSVLD_SEL BIT(14)
+# define USBOTG3CTRL0_SC_SESSVLD BIT(13)
+# define USBOTG3CTRL0_POWERPRESENT_SEL BIT(12)
+# define USBOTG3CTRL0_SC_POWERPRESENT BIT(11)
+# define USBOTG3CTRL0_BVALID_SEL BIT(10)
+# define USBOTG3CTRL0_SC_BVALID BIT(9)
+# define USBOTG3CTRL0_AVALID_SEL BIT(8)
+# define USBOTG3CTRL0_SC_AVALID BIT(7)
+# define USBOTG3CTRL0_VBUSVALID_SEL BIT(6)
+# define USBOTG3CTRL0_DRVVBUS BIT(5)
+# define USBOTG3CTRL0_DRVVBUS_SEL BIT(4)
+# define USBOTG3CTRL0_IDDIG BIT(3)
+# define USBOTG3CTRL0_IDDIG_SEL BIT(2)
+# define USBOTG3CTRL0_IDPULLUP BIT(1)
+# define USBOTG3CTRL0_IDPULLUP_SEL BIT(0)
+
+/* USTOTG3_CTRL2 */
+# define USBOTG3CTRL2_POWERDOWN_HSP BIT(0)
+# define USBOTG3CTRL2_POWERDOWN_SSP BIT(1)
+
+/* USBOTG3_CTRL3 */
+# define USBOTG3_CTRL3_VBUSVLDEXT BIT(6)
+# define USBOTG3_CTRL3_VBUSVLDEXTSEL BIT(5)
+# define USBOTG3_CTRL3_TXBITSTUFFEHN BIT(4)
+# define USBOTG3_CTRL3_TXBITSTUFFEN BIT(3)
+# define USBOTG3_CTRL3_RETENABLEN BIT(2)
+# define USBOTG3_CTRL3_OTGDISABLE BIT(1)
+# define USBOTG3_CTRL3_COMMONONN BIT(0)
+
+/* USBOTG3_CTRL4 */
+# define USBOTG3_CTRL4_TXVREFTUNE(x) (((x) << 22) & (0xf << 22))
+# define USBOTG3_CTRL4_TXRISETUNE(x) (((x) << 20) & (3 << 20))
+# define USBOTG3_CTRL4_TXRESTUNE(x) (((x) << 18) & (3 << 18))
+# define USBOTG3_CTRL4_TXPREEMPPULSETUNE BIT(17)
+# define USBOTG3_CTRL4_TXPREEMPAMPTUNE(x) (((x) << 15) & (3 << 15))
+# define USBOTG3_CTRL4_TXHSXVTUNE(x) (((x) << 13) & (3 << 13))
+# define USBOTG3_CTRL4_TXFSLSTUNE(x) (((x) << 9) & (0xf << 9))
+# define USBOTG3_CTRL4_SQRXTUNE(x) (((x) << 6) & (7 << 6))
+# define USBOTG3_CTRL4_OTGTUNE_MASK (7 << 3)
+# define USBOTG3_CTRL4_OTGTUNE(x) \
+(((x) << 3) & USBOTG3_CTRL4_OTGTUNE_MASK)
+# define USBOTG3_CTRL4_COMPDISTUNE_MASK 7
+# define USBOTG3_CTRL4_COMPDISTUNE(x) \
+((x) & USBOTG3_CTRL4_COMPDISTUNE_MASK)
+
+# define USBOTG3_CTRL7_REF_SSP_EN BIT(16)
+
+/* USBOTG3_CTRL6 */
+#define TX_VBOOST_LVL_MASK 7
+#define TX_VBOOST_LVL(x) ((x) & TX_VBOOST_LVL_MASK)
+
+/* BC_CTRL0 */
+# define BC_CTRL0_BC_IDPULLUP BIT(10)
+# define BC_CTRL0_BC_SUSPEND_N BIT(9)
+# define BC_CTRL0_BC_DMPULLDOWN BIT(8)
+# define BC_CTRL0_BC_DPPULLDOWN BIT(7)
+# define BC_CTRL0_BC_TXVALIDH BIT(6)
+# define BC_CTRL0_BC_TXVALID BIT(5)
+# define BC_CTRL0_BC_TERMSELECT BIT(4)
+# define BC_CTRL0_BC_XCVRSELECT(x) (((x) << 2) & (3 << 2))
+# define BC_CTRL0_BC_OPMODE(x) ((x) & 3)
+
+/* BC_CTRL1 */
+# define BC_CTRL1_BC_MODE 1
+
+/* BC_CTRL2 */
+# define BC_CTRL2_BC_PHY_VDATDETENB BIT(4)
+# define BC_CTRL2_BC_PHY_VDATARCENB BIT(3)
+# define BC_CTRL2_BC_PHY_CHRGSEL BIT(2)
+# define BC_CTRL2_BC_PHY_DCDENB BIT(1)
+# define BC_CTRL2_BC_PHY_ACAENB BIT(0)
+
+/* BC_STS0 */
+# define BC_STS0_BC_LINESTATE(x) (((x) << 9) & (3 << 9))
+# define BC_STS0_BC_PHY_CHGDET BIT(8)
+# define BC_STS0_BC_PHY_FSVMINUS BIT(7)
+# define BC_STS0_BC_PHY_FSVPLUS BIT(6)
+# define BC_STS0_BC_RID_GND BIT(5)
+# define BC_STS0_BC_RID_FLOAT BIT(4)
+# define BC_STS0_BC_RID_C BIT(3)
+# define BC_STS0_BC_RID_B BIT(2)
+# define BC_STS0_BC_RID_A BIT(1)
+# define BC_STS0_BC_SESSVLD BIT(0)
+
+/* USB3PHY_CR_STS */
+#define USB3OTG_PHY_CR_DATA_OUT(x) (((x) >> 1) & 0xffff)
+#define USB3OTG_PHY_CR_ACK BIT(0)
+
+/* USB3PHY_CR_CTRL */
+#define USB3OTG_PHY_CR_DATA_IN(x) (((x) << 4) & (0xffff << 4))
+#define USB3OTG_PHY_CR_WRITE BIT(3)
+#define USB3OTG_PHY_CR_READ BIT(2)
+#define USB3OTG_PHY_CR_CAP_DATA BIT(1)
+#define USB3OTG_PHY_CR_CAP_ADDR BIT(0)
+
+#define usb_dbg(format, arg...) \
+ pr_err("[USB3][%s]"format, __func__, ##arg)
+
+#define usb_err(format, arg...) \
+ pr_err("[USB3][%s]"format, __func__, ##arg)
+
+enum hisi_usb_state {
+ USB_STATE_UNKNOWN = 0,
+ USB_STATE_OFF,
+ USB_STATE_DEVICE,
+ USB_STATE_HOST,
+};
+
+struct hiusb_event_queue {
+ enum otg_dev_event_type *event;
+ unsigned int num_event;
+ unsigned int max_event;
+ unsigned int enpos, depos;
+ unsigned int overlay, overlay_index;
+};
+
+#define MAX_EVENT_COUNT 16
+#define EVENT_QUEUE_UNIT MAX_EVENT_COUNT
+
+struct hisi_dwc3_device {
+ struct platform_device *pdev;
+
+ void __iomem *otg_bc_reg_base;
+ void __iomem *pericfg_reg_base;
+ void __iomem *pctrl_reg_base;
+ void __iomem *sctrl_reg_base;
+
+ struct regulator *usb_regu;
+ unsigned int is_regu_on;
+ unsigned int runtime_suspended;
+
+ enum hisi_usb_state state;
+ enum hisi_charger_type charger_type;
+ enum hisi_charger_type fake_charger_type;
+
+ enum otg_dev_event_type event;
+ spinlock_t event_lock;
+
+ struct mutex lock;
+ struct wakeup_source ws;
+ struct atomic_notifier_head charger_type_notifier;
+ struct work_struct event_work;
+
+ u32 eye_diagram_param; /* this param will be set to USBOTG3_CTRL4 */
+ u32 eye_diagram_host_param;
+ u32 usb3_phy_cr_param;
+ u32 usb3_phy_host_cr_param;
+ u32 usb3_phy_tx_vboost_lvl;
+ unsigned int host_flag;
+
+ u32 fpga_flag;
+ int fpga_usb_mode_gpio;
+
+ struct clk *clk;
+ struct clk *gt_aclk_usb3otg;
+
+ int eventmask;
+
+ /* for bc again */
+ u32 bc_again_flag;
+ struct delayed_work bc_again_work;
+ struct notifier_block conndone_nb;
+
+ /* event queue for handle event */
+ struct hiusb_event_queue event_queue;
+
+ struct usb3_phy_ops *phy_ops;
+
+ unsigned int need_disable_vdp;
+ void (*disable_vdp_src)(struct hisi_dwc3_device *hisi_dwc3);
+};
+
+#ifdef CONFIG_PM
+extern const struct dev_pm_ops hisi_dwc3_dev_pm_ops;
+#define HISI_DWC3_PM_OPS (&hisi_dwc3_dev_pm_ops)
+#else
+#define HISI_DWC3_PM_OPS NULL
+#endif
+
+struct usb3_phy_ops {
+ struct regulator *subsys_regu;
+
+ int (*init)(struct hisi_dwc3_device *hisi_dwc3);
+ int (*shutdown)(struct hisi_dwc3_device *hisi_dwc3);
+};
+
+typedef ssize_t (*hiusb_debug_show_ops)(void *, char *, ssize_t);
+typedef ssize_t (*hiusb_debug_store_ops)(void *, const char *, ssize_t);
+void hiusb_debug_init(void *data);
+void hiusb_debug_quick_register(void *dev_data,
+ hiusb_debug_show_ops show,
+ hiusb_debug_store_ops store);
+
+void set_hisi_dwc3_power_flag(int val);
+void config_femtophy_param(struct hisi_dwc3_device *hisi_dwc);
+int hisi_dwc3_probe(struct platform_device *pdev, struct usb3_phy_ops *phy_ops);
+int hisi_dwc3_remove(struct platform_device *pdev);
+#endif /* _DWC3_HISI_H_ */
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 72664700b8a2..12ee23f53cdd 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -107,6 +107,10 @@ static int kdwc3_probe(struct platform_device *pdev)
return PTR_ERR(kdwc->usbss);
kdwc->clk = devm_clk_get(kdwc->dev, "usb");
+ if (IS_ERR(kdwc->clk)) {
+ dev_err(kdwc->dev, "unable to get usb clock\n");
+ return PTR_ERR(kdwc->clk);
+ }
error = clk_prepare_enable(kdwc->clk);
if (error < 0) {
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 35b63518baf6..7ebf7953ac9c 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -79,40 +79,40 @@
#define USBOTGSS_DEBUG_OFFSET 0x0600
/* SYSCONFIG REGISTER */
-#define USBOTGSS_SYSCONFIG_DMADISABLE (1 << 16)
+#define USBOTGSS_SYSCONFIG_DMADISABLE BIT(16)
/* IRQ_EOI REGISTER */
-#define USBOTGSS_IRQ_EOI_LINE_NUMBER (1 << 0)
+#define USBOTGSS_IRQ_EOI_LINE_NUMBER BIT(0)
/* IRQS0 BITS */
-#define USBOTGSS_IRQO_COREIRQ_ST (1 << 0)
+#define USBOTGSS_IRQO_COREIRQ_ST BIT(0)
/* IRQMISC BITS */
-#define USBOTGSS_IRQMISC_DMADISABLECLR (1 << 17)
-#define USBOTGSS_IRQMISC_OEVT (1 << 16)
-#define USBOTGSS_IRQMISC_DRVVBUS_RISE (1 << 13)
-#define USBOTGSS_IRQMISC_CHRGVBUS_RISE (1 << 12)
-#define USBOTGSS_IRQMISC_DISCHRGVBUS_RISE (1 << 11)
-#define USBOTGSS_IRQMISC_IDPULLUP_RISE (1 << 8)
-#define USBOTGSS_IRQMISC_DRVVBUS_FALL (1 << 5)
-#define USBOTGSS_IRQMISC_CHRGVBUS_FALL (1 << 4)
-#define USBOTGSS_IRQMISC_DISCHRGVBUS_FALL (1 << 3)
-#define USBOTGSS_IRQMISC_IDPULLUP_FALL (1 << 0)
+#define USBOTGSS_IRQMISC_DMADISABLECLR BIT(17)
+#define USBOTGSS_IRQMISC_OEVT BIT(16)
+#define USBOTGSS_IRQMISC_DRVVBUS_RISE BIT(13)
+#define USBOTGSS_IRQMISC_CHRGVBUS_RISE BIT(12)
+#define USBOTGSS_IRQMISC_DISCHRGVBUS_RISE BIT(11)
+#define USBOTGSS_IRQMISC_IDPULLUP_RISE BIT(8)
+#define USBOTGSS_IRQMISC_DRVVBUS_FALL BIT(5)
+#define USBOTGSS_IRQMISC_CHRGVBUS_FALL BIT(4)
+#define USBOTGSS_IRQMISC_DISCHRGVBUS_FALL BIT(3)
+#define USBOTGSS_IRQMISC_IDPULLUP_FALL BIT(0)
/* UTMI_OTG_STATUS REGISTER */
-#define USBOTGSS_UTMI_OTG_STATUS_DRVVBUS (1 << 5)
-#define USBOTGSS_UTMI_OTG_STATUS_CHRGVBUS (1 << 4)
-#define USBOTGSS_UTMI_OTG_STATUS_DISCHRGVBUS (1 << 3)
-#define USBOTGSS_UTMI_OTG_STATUS_IDPULLUP (1 << 0)
+#define USBOTGSS_UTMI_OTG_STATUS_DRVVBUS BIT(5)
+#define USBOTGSS_UTMI_OTG_STATUS_CHRGVBUS BIT(4)
+#define USBOTGSS_UTMI_OTG_STATUS_DISCHRGVBUS BIT(3)
+#define USBOTGSS_UTMI_OTG_STATUS_IDPULLUP BIT(0)
/* UTMI_OTG_CTRL REGISTER */
-#define USBOTGSS_UTMI_OTG_CTRL_SW_MODE (1 << 31)
-#define USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT (1 << 9)
-#define USBOTGSS_UTMI_OTG_CTRL_TXBITSTUFFENABLE (1 << 8)
-#define USBOTGSS_UTMI_OTG_CTRL_IDDIG (1 << 4)
-#define USBOTGSS_UTMI_OTG_CTRL_SESSEND (1 << 3)
-#define USBOTGSS_UTMI_OTG_CTRL_SESSVALID (1 << 2)
-#define USBOTGSS_UTMI_OTG_CTRL_VBUSVALID (1 << 1)
+#define USBOTGSS_UTMI_OTG_CTRL_SW_MODE BIT(31)
+#define USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT BIT(9)
+#define USBOTGSS_UTMI_OTG_CTRL_TXBITSTUFFENABLE BIT(8)
+#define USBOTGSS_UTMI_OTG_CTRL_IDDIG BIT(4)
+#define USBOTGSS_UTMI_OTG_CTRL_SESSEND BIT(3)
+#define USBOTGSS_UTMI_OTG_CTRL_SESSVALID BIT(2)
+#define USBOTGSS_UTMI_OTG_CTRL_VBUSVALID BIT(1)
struct dwc3_omap {
struct device *dev;
@@ -393,7 +393,7 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap)
{
u32 reg;
struct device_node *node = omap->dev->of_node;
- int utmi_mode = 0;
+ u32 utmi_mode = 0;
reg = dwc3_omap_read_utmi_ctrl(omap);
diff --git a/drivers/usb/dwc3/dwc3-otg.c b/drivers/usb/dwc3/dwc3-otg.c
new file mode 100644
index 000000000000..34a082ea96e3
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-otg.c
@@ -0,0 +1,362 @@
+/*
+ * dwc3-otg.c
+ *
+ * Copyright: (C) 2008-2018 hisilicon.
+ * Contact: wangbinghui<wangbinghui@hisilicon.com>
+ *
+ * USB vbus for Hisilicon device
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose this file to be licensed under the terms
+ * of the GNU General Public License (GPL) Version 2 or the 2-clause
+ * BSD license listed below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+
+#include "core.h"
+#include "io.h"
+#include "dwc3-otg.h"
+
+#define DBG(format, arg...) pr_info("[%s]" format, __func__, ##arg)
+
+struct dwc3_otg *dwc_otg_handler;
+
+static void dump_otg_regs(struct dwc3 *dwc)
+{
+#define DUMP_REG(__reg) pr_info("%s:\t0x%x\n", \
+ #__reg, dwc3_readl(dwc->regs, __reg))
+ DUMP_REG(DWC3_OCFG);
+ DUMP_REG(DWC3_OCTL);
+ DUMP_REG(DWC3_OEVT);
+ DUMP_REG(DWC3_OEVTEN);
+ DUMP_REG(DWC3_OSTS);
+
+ DUMP_REG(DWC3_BCFG);
+ DUMP_REG(DWC3_BCEVT);
+ DUMP_REG(DWC3_BCEVTEN);
+}
+
+#ifndef DWC3_OTG_FORCE_MODE
+static void dwc3_disable_otg_event(struct dwc3 *dwc)
+{
+ dwc3_writel(dwc->regs, DWC3_OEVT, 0x0ffffff0);
+ dwc3_writel(dwc->regs, DWC3_OEVTEN, 0);
+}
+
+static void dwc3_enable_otg_event(struct dwc3 *dwc)
+{
+ dwc3_writel(dwc, DWC3_OEVTEN, 0);
+ dwc3_writel(dwc->regs, DWC3_OEVT, 0x0ffffff0);
+ dwc3_writel(dwc->regs, DWC3_OEVTEN, DWC3_OEVT_OTGBDEVVBUSCHNGEVNT |
+ DWC3_OEVT_OTGCONIDSTSCHNGEVNT);
+}
+#endif
+
+int dwc3_otg_resume(struct dwc3 *dwc)
+{
+ DBG("+\n");
+#ifndef DWC3_OTG_FORCE_MODE
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_OSTS);
+ if (reg & DWC3_OSTS_CONIDSTS) {
+ DBG("%s: ID is 1, set peripheral mode\n", __func__);
+ reg = dwc3_readl(dwc->regs, DWC3_OCTL);
+ reg |= DWC3_OCTL_PERIMODE;
+ dwc3_writel(dwc->regs, DWC3_OCTL, reg);
+ } else {
+ DBG("%s: ID is 0, clear peripheral mode\n", __func__);
+ reg = dwc3_readl(dwc->regs, DWC3_OCTL);
+ reg &= ~DWC3_OCTL_PERIMODE;
+ dwc3_writel(dwc->regs, DWC3_OCTL, reg);
+ }
+#endif
+
+ DBG("-\n");
+
+ return 0;
+}
+
+int dwc3_otg_suspend(struct dwc3 *dwc)
+{
+ DBG("+\n");
+ DBG("-\n");
+ return 0;
+}
+
+static int dwc3_otg_start_host(struct dwc3_otg *dwc_otg)
+{
+ struct dwc3 *dwc = dwc_otg->dwc;
+ unsigned long flags;
+ int ret;
+ u32 reg;
+
+ DBG("+\n");
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+#ifdef DWC3_OTG_FORCE_MODE
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ pr_debug("%s: GCTL value 0x%x\n", __func__, reg);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+#else
+ /* check ID ststus */
+ DBG("+before read DWC3_OSTS\n");
+ reg = dwc3_readl(dwc->regs, DWC3_OSTS);
+ if (reg & DWC3_OSTS_CONIDSTS) {
+ pr_warn("%s: CONIDSTS wrong!\n");
+ dump_otg_regs(dwc);
+ }
+ DBG("+before read DWC3_OCFG\n");
+ reg = dwc3_readl(dwc->regs, DWC3_OCFG);
+ reg |= DWC3_OCFG_OTGSFTRSTMSK;
+ reg |= DWC3_OCFG_DISPRTPWRCUTOFF;
+ reg &= ~(DWC3_OCFG_HNPCAP | DWC3_OCFG_SRPCAP);
+ dwc3_writel(dwc->regs, DWC3_OCFG, reg);
+
+ DBG("set OCFG 0x%x\n", dwc3_readl(dwc->regs, DWC3_OCFG));
+
+ reg = dwc3_readl(dwc->regs, DWC3_OCTL);
+ reg &= ~DWC3_OCTL_PERIMODE;
+ reg |= DWC3_OCTL_PRTPWRCTL;
+ dwc3_writel(dwc->regs, DWC3_OCTL, reg);
+
+ DBG("set OCTL 0x%x\n", dwc3_readl(dwc->regs, DWC3_OCTL));
+#endif
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ ret = platform_device_add(dwc->xhci);
+ if (ret) {
+ pr_err("%s: failed to register xHCI device\n", __func__);
+ return ret;
+ }
+
+#ifdef CONFIG_HISI_USB_DWC3_MASK_IRQ_WORKAROUND
+ if (dwc->irq_state == 0) {
+ enable_irq(dwc->irq);
+ dwc->irq_state = 1;
+ pr_info("[%s]enable irq\n", __func__);
+ }
+#endif
+
+ DBG("-\n");
+
+ return ret;
+}
+
+static void dwc3_otg_stop_host(struct dwc3_otg *dwc_otg)
+{
+ DBG("+\n");
+ platform_device_del(dwc_otg->dwc->xhci);
+ DBG("-\n");
+}
+
+static int dwc3_otg_start_peripheral(struct dwc3_otg *dwc_otg)
+{
+ int ret;
+ unsigned long flags;
+ struct dwc3 *dwc = dwc_otg->dwc;
+ u32 reg;
+
+ DBG("+\n");
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+#ifdef DWC3_OTG_FORCE_MODE
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ pr_debug("%s: GCTL value 0x%x\n", __func__, reg);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+#else
+ reg = dwc3_readl(dwc->regs, DWC3_OSTS);
+ if (!(reg & DWC3_OSTS_CONIDSTS) || !(reg & DWC3_OSTS_BSESVLD)) {
+ pr_warn("%s: CONIDSTS or BSESVLD wrong!\n");
+ dump_otg_regs(dwc);
+ }
+
+ /* set mode as peripheral */
+ reg = dwc3_readl(dwc->regs, DWC3_OCTL);
+ reg |= DWC3_OCTL_PERIMODE;
+ dwc3_writel(dwc->regs, DWC3_OCTL, reg);
+#endif
+
+ ret = dwc3_gadget_resume(dwc);
+ if (ret)
+ pr_err("[%s] gadget resume error!", __func__);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ DBG("-\n");
+
+ return ret;
+}
+
+static int dwc3_otg_stop_peripheral(struct dwc3_otg *dwc_otg)
+{
+ int ret;
+ unsigned long flags;
+ struct dwc3 *dwc = dwc_otg->dwc;
+
+ DBG("+\n");
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ ret = dwc3_gadget_suspend(dwc);
+ if (ret)
+ pr_err("[%s] gadget suspend error!", __func__);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ DBG("-\n");
+
+ return ret;
+}
+
+int dwc3_otg_id_value(struct dwc3_otg *dwc_otg)
+{
+ if (dwc_otg)
+ return !!(dwc3_readl(dwc_otg->dwc->regs, DWC3_OSTS)
+ & DWC3_OSTS_CONIDSTS);
+ else
+ return 1;
+}
+
+int dwc3_otg_work(struct dwc3_otg *dwc_otg, int evt)
+{
+ int ret = 0;
+
+ DBG("+\n evt = %d", evt);
+
+ /* if otg is not enabled, do nothing */
+ if (!dwc_otg) {
+ pr_info("%s: dwc3 is not otg mode!\n", __func__);
+ return 0;
+ }
+
+ switch (evt) {
+ case DWC3_OTG_EVT_ID_SET:
+ dwc3_otg_stop_host(dwc_otg);
+ dwc3_suspend_device(dwc_otg->dwc);
+ break;
+ case DWC3_OTG_EVT_ID_CLEAR:
+ ret = dwc3_resume_device(dwc_otg->dwc);
+ if (ret) {
+ pr_err("%s: resume device failed!\n", __func__);
+ return ret;
+ }
+ ret = dwc3_otg_start_host(dwc_otg);
+ if (ret) {
+ pr_err("%s: start host failed!\n", __func__);
+ dwc3_suspend_device(dwc_otg->dwc);
+ return ret;
+ }
+ break;
+ case DWC3_OTG_EVT_VBUS_SET:
+ ret = dwc3_resume_device(dwc_otg->dwc);
+ if (ret) {
+ pr_err("%s: resume device failed!\n", __func__);
+ return ret;
+ }
+ ret = dwc3_otg_start_peripheral(dwc_otg);
+ if (ret) {
+ pr_err("%s: start peripheral failed!\n", __func__);
+ dwc3_suspend_device(dwc_otg->dwc);
+ return ret;
+ }
+ break;
+ case DWC3_OTG_EVT_VBUS_CLEAR:
+ ret = dwc3_otg_stop_peripheral(dwc_otg);
+ dwc3_suspend_device(dwc_otg->dwc);
+ break;
+ default:
+ break;
+ }
+ DBG("-\n");
+
+ return ret;
+}
+
+static void dwc3_otg_work_fun(struct work_struct *w)
+{
+ struct dwc3_otg *dwc_otg = container_of(
+ w, struct dwc3_otg, otg_work.work);
+
+ mutex_lock(&dwc_otg->lock);
+ if (dwc3_otg_work(dwc_otg, atomic_read(&dwc_otg->otg_evt_flag)))
+ pr_err("%s: dwc3_otg_work failed\n", __func__);
+ mutex_unlock(&dwc_otg->lock);
+}
+
+int dwc3_otg_init(struct dwc3 *dwc)
+{
+ struct dwc3_otg *dwc_otg;
+ u32 reg;
+
+ DBG("+\n");
+
+ dwc_otg = devm_kzalloc(dwc->dev, sizeof(struct dwc3_otg), GFP_KERNEL);
+ if (!dwc_otg) {
+ dev_err(dwc->dev, "unable to allocate dwc3_otg\n");
+ return -ENOMEM;
+ }
+
+ dwc_otg->dwc = dwc;
+ dwc->dwc_otg = dwc_otg;
+
+ mutex_init(&dwc_otg->lock);
+ INIT_DELAYED_WORK(&dwc_otg->otg_work, dwc3_otg_work_fun);
+
+ dwc_otg_handler = dwc_otg;
+
+#ifdef DWC3_OTG_FORCE_MODE
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ pr_debug("%s: GCTL value 0x%x\n", __func__, reg);
+
+ /* default device mode */
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+#else
+ /* disable hnp and srp */
+ reg = dwc3_readl(dwc->regs, DWC3_OCFG);
+ reg &= ~(DWC3_OCFG_HNPCAP | DWC3_OCFG_SRPCAP);
+ dwc3_writel(dwc->regs, DWC3_OCFG, reg);
+
+ reg = dwc3_readl(dwc->regs, DWC3_OSTS);
+ if (reg & DWC3_OSTS_CONIDSTS) {
+ DBG("%s: ID is 1, set peripheral mode\n", __func__);
+ reg = dwc3_readl(dwc->regs, DWC3_OCTL);
+ reg |= DWC3_OCTL_PERIMODE;
+ reg &= ~(DWC3_OCTL_HNPREQ | DWC3_OCTL_DEVSETHNPEN |
+ DWC3_OCTL_HSTSETHNPEN);
+ dwc3_writel(dwc->regs, DWC3_OCTL, reg);
+ } else {
+ DBG("%s: ID is 0, clear peripheral mode\n", __func__);
+ reg = dwc3_readl(dwc->regs, DWC3_OCTL);
+ reg &= ~DWC3_OCTL_PERIMODE;
+ dwc3_writel(dwc->regs, DWC3_OCTL, reg);
+ }
+#endif
+
+ dump_otg_regs(dwc);
+
+ DBG("-\n");
+
+ return 0;
+}
+
+void dwc3_otg_exit(struct dwc3 *dwc)
+{
+ DBG("+\n");
+ dwc_otg_handler = NULL;
+ dwc->dwc_otg->dwc = NULL;
+ dwc->dwc_otg = NULL;
+ DBG("-\n");
+}
diff --git a/drivers/usb/dwc3/dwc3-otg.h b/drivers/usb/dwc3/dwc3-otg.h
new file mode 100644
index 000000000000..b9114b16f050
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-otg.h
@@ -0,0 +1,133 @@
+/*
+ * dwc3-otg.h
+ *
+ * Copyright: (C) 2008-2018 hisilicon.
+ * Contact: wangbinghui<wangbinghui@hisilicon.com>
+ *
+ * USB vbus for Hisilicon device
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose this file to be licensed under the terms
+ * of the GNU General Public License (GPL) Version 2 or the 2-clause
+ * BSD license listed below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ */
+#ifndef __DRIVERS_USB_DWC3_OTG_H
+#define __DRIVERS_USB_DWC3_OTG_H
+
+/* BC Registers */
+#define DWC3_BCFG 0xcc30
+#define DWC3_BCEVT 0xcc38
+#define DWC3_BCEVTEN 0xcc3c
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+/* OTG Configuration Register */
+#define DWC3_OCFG_DISPRTPWRCUTOFF BIT(5)
+#define DWC3_OCFG_OTGHIBDISMASK BIT(4)
+#define DWC3_OCFG_OTGSFTRSTMSK BIT(3)
+#define DWC3_OCFG_HNPCAP BIT(1)
+#define DWC3_OCFG_SRPCAP 1
+
+/* OTG Control Register */
+#define DWC3_OCTL_OTG3_GOERR BIT(7)
+#define DWC3_OCTL_PERIMODE BIT(6)
+#define DWC3_OCTL_PRTPWRCTL BIT(5)
+#define DWC3_OCTL_HNPREQ BIT(4)
+#define DWC3_OCTL_SESREQ BIT(3)
+#define DWC3_OCTL_TERMSELDLPULSE BIT(2)
+#define DWC3_OCTL_DEVSETHNPEN BIT(1)
+#define DWC3_OCTL_HSTSETHNPEN BIT(0)
+
+/* OTG Events Register */
+#define DWC3_OEVT_DEVICEMOD BIT(31)
+#define DWC3_OEVT_OTGXHCIRUNSTPSETEVNT BIT(27)
+#define DWC3_OEVT_OTGDEVRUNSTPSETEVNT BIT(26)
+#define DWC3_OEVT_OTGHIBENTRYEVNT BIT(25)
+#define DWC3_OEVT_OTGCONIDSTSCHNGEVNT BIT(24)
+#define DWC3_OEVT_HRRCONFNOTIFEVNT BIT(23)
+#define DWC3_OEVT_HRRINITNOTIFEVNT BIT(22)
+#define DWC3_OEVT_OTGADEVIDLEEVNT BIT(21)
+#define DWC3_OEVT_OTGADEVBHOSTENDEVNT BIT(20)
+#define DWC3_OEVT_OTGADEVHOSTEVNT BIT(19)
+#define DWC3_OEVT_OTGADEVHNPCHNGEVNT BIT(18)
+#define DWC3_OEVT_OTGADEVSRPDETEVNT BIT(17)
+#define DWC3_OEVT_OTGADEVSESSENDDETEVNT BIT(16)
+#define DWC3_OEVT_OTGBDEVBHOSTENDEVNT BIT(11)
+#define DWC3_OEVT_OTGBDEVHNPCHNGEVNT BIT(10)
+#define DWC3_OEVT_OTGBDEVSESSVLDDETEVNT BIT(9)
+#define DWC3_OEVT_OTGBDEVVBUSCHNGEVNT BIT(8)
+
+/* OTG Status Register */
+#define DWC3_OSTS_OTGSTATE_MSK (0xf << 8)
+#define DWC3_OSTS_PERIPHERALSTATE BIT(4)
+#define DWC3_OSTS_XHCIPRTPOWER BIT(3)
+#define DWC3_OSTS_BSESVLD BIT(2)
+#define DWC3_OSTS_ASESVLD BIT(1)
+#define DWC3_OSTS_CONIDSTS BIT(0)
+
+struct dwc3_otg {
+ struct usb_otg otg;
+ struct dwc3 *dwc;
+ int otg_irq;
+ struct delayed_work otg_work;
+
+ atomic_t otg_evt_flag;
+#define DWC3_OTG_EVT_ID_SET 1
+#define DWC3_OTG_EVT_ID_CLEAR 2
+#define DWC3_OTG_EVT_VBUS_SET 3
+#define DWC3_OTG_EVT_VBUS_CLEAR 4
+
+ struct mutex lock;
+};
+
+#ifdef CONFIG_USB_DWC3_OTG
+extern struct dwc3_otg *dwc_otg_handler;
+int dwc3_otg_init(struct dwc3 *dwc);
+void dwc3_otg_exit(struct dwc3 *dwc);
+int dwc3_otg_work(struct dwc3_otg *dwc_otg, int evt);
+int dwc3_otg_resume(struct dwc3 *dwc);
+int dwc3_otg_suspend(struct dwc3 *dwc);
+int dwc3_otg_id_value(struct dwc3_otg *dwc_otg);
+#else
+#define dwc_otg_handler ((struct dwc3_otg *)NULL)
+static inline int dwc3_otg_init(struct dwc3 *dwc)
+{
+ return 0;
+}
+
+static inline void dwc3_otg_exit(struct dwc3 *dwc)
+{
+}
+
+static inline int dwc3_otg_work(struct dwc3_otg *dwc_otg, int evt)
+{
+ return 0;
+}
+
+static inline int dwc3_otg_resume(struct dwc3 *dwc)
+{
+ return 0;
+}
+
+static inline int dwc3_otg_suspend(struct dwc3 *dwc)
+{
+ return 0;
+}
+
+static inline int dwc3_otg_id_value(struct dwc3_otg *dwc_otg)
+{
+ return 0;
+};
+#endif
+
+#endif /* __DRIVERS_USB_DWC3_OTG_H */
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 427291a19e6d..e1b45eb2ca5d 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -39,6 +39,29 @@
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
+#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
+#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
+
+#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
+#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
+#define PCI_INTEL_BXT_STATE_D0 0
+#define PCI_INTEL_BXT_STATE_D3 3
+
+/**
+ * struct dwc3_pci - Driver private structure
+ * @dwc3: child dwc3 platform_device
+ * @pci: our link to PCI bus
+ * @uuid: _DSM UUID
+ * @has_dsm_for_pm: true for devices which need to run _DSM on runtime PM
+ */
+struct dwc3_pci {
+ struct platform_device *dwc3;
+ struct pci_dev *pci;
+
+ u8 uuid[16];
+
+ unsigned int has_dsm_for_pm:1;
+};
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -49,8 +72,11 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
{ },
};
-static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
+static int dwc3_pci_quirks(struct dwc3_pci *dwc)
{
+ struct platform_device *dwc3 = dwc->dwc3;
+ struct pci_dev *pdev = dwc->pci;
+
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
struct property_entry properties[] = {
@@ -72,6 +98,7 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{ },
};
@@ -83,6 +110,7 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
struct property_entry properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{ }
};
@@ -90,6 +118,12 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
if (ret < 0)
return ret;
+ if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
+ pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) {
+ acpi_str_to_uuid(PCI_INTEL_BXT_DSM_UUID, dwc->uuid);
+ dwc->has_dsm_for_pm = true;
+ }
+
if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
struct gpio_desc *gpio;
@@ -128,6 +162,7 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
PROPERTY_ENTRY_BOOL("snps,usb3_lpm_capable"),
PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
PROPERTY_ENTRY_BOOL("snps,dis_enblslpm_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{ },
};
@@ -140,8 +175,8 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
static int dwc3_pci_probe(struct pci_dev *pci,
const struct pci_device_id *id)
{
+ struct dwc3_pci *dwc;
struct resource res[2];
- struct platform_device *dwc3;
int ret;
struct device *dev = &pci->dev;
@@ -153,11 +188,13 @@ static int dwc3_pci_probe(struct pci_dev *pci,
pci_set_master(pci);
- dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
- if (!dwc3) {
- dev_err(dev, "couldn't allocate dwc3 device\n");
+ dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return -ENOMEM;
+
+ dwc->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
+ if (!dwc->dwc3)
return -ENOMEM;
- }
memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
@@ -170,20 +207,21 @@ static int dwc3_pci_probe(struct pci_dev *pci,
res[1].name = "dwc_usb3";
res[1].flags = IORESOURCE_IRQ;
- ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
+ ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res));
if (ret) {
dev_err(dev, "couldn't add resources to dwc3 device\n");
return ret;
}
- dwc3->dev.parent = dev;
- ACPI_COMPANION_SET(&dwc3->dev, ACPI_COMPANION(dev));
+ dwc->pci = pci;
+ dwc->dwc3->dev.parent = dev;
+ ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev));
- ret = dwc3_pci_quirks(pci, dwc3);
+ ret = dwc3_pci_quirks(dwc);
if (ret)
goto err;
- ret = platform_device_add(dwc3);
+ ret = platform_device_add(dwc->dwc3);
if (ret) {
dev_err(dev, "failed to register dwc3 device\n");
goto err;
@@ -191,21 +229,23 @@ static int dwc3_pci_probe(struct pci_dev *pci,
device_init_wakeup(dev, true);
device_set_run_wake(dev, true);
- pci_set_drvdata(pci, dwc3);
+ pci_set_drvdata(pci, dwc);
pm_runtime_put(dev);
return 0;
err:
- platform_device_put(dwc3);
+ platform_device_put(dwc->dwc3);
return ret;
}
static void dwc3_pci_remove(struct pci_dev *pci)
{
+ struct dwc3_pci *dwc = pci_get_drvdata(pci);
+
device_init_wakeup(&pci->dev, false);
pm_runtime_get(&pci->dev);
acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pci->dev));
- platform_device_unregister(pci_get_drvdata(pci));
+ platform_device_unregister(dwc->dwc3);
}
static const struct pci_device_id dwc3_pci_id_table[] = {
@@ -231,45 +271,82 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
+#if defined(CONFIG_PM) || defined(CONFIG_PM_SLEEP)
+static int dwc3_pci_dsm(struct dwc3_pci *dwc, int param)
+{
+ union acpi_object *obj;
+ union acpi_object tmp;
+ union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
+
+ if (!dwc->has_dsm_for_pm)
+ return 0;
+
+ tmp.type = ACPI_TYPE_INTEGER;
+ tmp.integer.value = param;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), dwc->uuid,
+ 1, PCI_INTEL_BXT_FUNC_PMU_PWR, &argv4);
+ if (!obj) {
+ dev_err(&dwc->pci->dev, "failed to evaluate _DSM\n");
+ return -EIO;
+ }
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+#endif /* CONFIG_PM || CONFIG_PM_SLEEP */
+
#ifdef CONFIG_PM
static int dwc3_pci_runtime_suspend(struct device *dev)
{
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+
if (device_run_wake(dev))
- return 0;
+ return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3);
return -EBUSY;
}
static int dwc3_pci_runtime_resume(struct device *dev)
{
- struct platform_device *dwc3 = dev_get_drvdata(dev);
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+ struct platform_device *dwc3 = dwc->dwc3;
+ int ret;
+
+ ret = dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
+ if (ret)
+ return ret;
return pm_runtime_get(&dwc3->dev);
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
-static int dwc3_pci_pm_dummy(struct device *dev)
+static int dwc3_pci_suspend(struct device *dev)
{
- /*
- * There's nothing to do here. No, seriously. Everything is either taken
- * care either by PCI subsystem or dwc3/core.c, so we have nothing
- * missing here.
- *
- * So you'd think we didn't need this at all, but PCI subsystem will
- * bail out if we don't have a valid callback :-s
- */
- return 0;
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+
+ return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3);
+}
+
+static int dwc3_pci_resume(struct device *dev)
+{
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+
+ return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
}
#endif /* CONFIG_PM_SLEEP */
static struct dev_pm_ops dwc3_pci_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy)
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_suspend, dwc3_pci_resume)
SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume,
NULL)
};
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 4cf5381ff991..505676fd3ba4 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -219,7 +219,6 @@ static int st_dwc3_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- dma_set_coherent_mask(dev, dev->coherent_dma_mask);
dwc3_data->dev = dev;
dwc3_data->regmap = regmap;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 2331469f943d..e94e9d06e6a7 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -39,30 +39,13 @@ static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
struct dwc3_ep *dep, struct dwc3_request *req);
-static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
-{
- switch (state) {
- case EP0_UNCONNECTED:
- return "Unconnected";
- case EP0_SETUP_PHASE:
- return "Setup Phase";
- case EP0_DATA_PHASE:
- return "Data Phase";
- case EP0_STATUS_PHASE:
- return "Status Phase";
- default:
- return "UNKNOWN";
- }
-}
-
-static void dwc3_ep0_prepare_one_trb(struct dwc3 *dwc, u8 epnum,
+static void dwc3_ep0_prepare_one_trb(struct dwc3_ep *dep,
dma_addr_t buf_dma, u32 len, u32 type, bool chain)
{
struct dwc3_trb *trb;
- struct dwc3_ep *dep;
-
- dep = dwc->eps[epnum];
+ struct dwc3 *dwc;
+ dwc = dep->dwc;
trb = &dwc->ep0_trb[dep->trb_enqueue];
if (chain)
@@ -85,28 +68,24 @@ static void dwc3_ep0_prepare_one_trb(struct dwc3 *dwc, u8 epnum,
trace_dwc3_prepare_trb(dep, trb);
}
-static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum)
+static int dwc3_ep0_start_trans(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
- struct dwc3_ep *dep;
+ struct dwc3 *dwc;
int ret;
- dep = dwc->eps[epnum];
- if (dep->flags & DWC3_EP_BUSY) {
- dwc3_trace(trace_dwc3_ep0, "%s still busy", dep->name);
+ if (dep->flags & DWC3_EP_BUSY)
return 0;
- }
+
+ dwc = dep->dwc;
memset(&params, 0, sizeof(params));
params.param0 = upper_32_bits(dwc->ep0_trb_addr);
params.param1 = lower_32_bits(dwc->ep0_trb_addr);
ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params);
- if (ret < 0) {
- dwc3_trace(trace_dwc3_ep0, "%s STARTTRANSFER failed",
- dep->name);
+ if (ret < 0)
return ret;
- }
dep->flags |= DWC3_EP_BUSY;
dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
@@ -119,11 +98,19 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
struct dwc3_request *req)
{
struct dwc3 *dwc = dep->dwc;
+ int ret;
req->request.actual = 0;
req->request.status = -EINPROGRESS;
req->epnum = dep->number;
+ /* we share one TRB for ep0/1 */
+ if (!list_empty(&dep->pending_list)) {
+ dev_WARN(dwc->dev, "ep0 busy!\n");
+ ret = -EBUSY;
+ return ret;
+ }
+
list_add_tail(&req->list, &dep->pending_list);
/*
@@ -166,9 +153,6 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
if (dwc->ep0state == EP0_STATUS_PHASE)
__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
- else
- dwc3_trace(trace_dwc3_ep0,
- "too early for delayed status");
return 0;
}
@@ -214,8 +198,18 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
__dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
dep->flags &= ~DWC3_EP0_DIR_IN;
+
+ return 0;
}
+ /* mark the status phase already queued */
+ if (dwc->ep0_next_event == DWC3_EP0_NRDY_STATUS)
+ dwc->status_queued = true;
+
+ if (req->request.length != 0)
+ dev_WARN(dwc->dev, "status phase len %d\n",
+ req->request.length);
+
return 0;
}
@@ -232,24 +226,12 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
spin_lock_irqsave(&dwc->lock, flags);
if (!dep->endpoint.desc) {
- dwc3_trace(trace_dwc3_ep0,
- "trying to queue request %p to disabled %s",
- request, dep->name);
+ dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
+ dep->name);
ret = -ESHUTDOWN;
goto out;
}
- /* we share one TRB for ep0/1 */
- if (!list_empty(&dep->pending_list)) {
- ret = -EBUSY;
- goto out;
- }
-
- dwc3_trace(trace_dwc3_ep0,
- "queueing request %p to %s length %d state '%s'",
- request, dep->name, request->length,
- dwc3_ep0_state_string(dwc->ep0state));
-
ret = __dwc3_gadget_ep0_queue(dep, req);
out:
@@ -271,6 +253,7 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
__dwc3_gadget_ep_set_halt(dep, 1, false);
dep->flags = DWC3_EP_ENABLED;
dwc->delayed_status = false;
+ dwc->status_queued = false;
if (!list_empty(&dep->pending_list)) {
struct dwc3_request *req;
@@ -309,11 +292,15 @@ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
void dwc3_ep0_out_start(struct dwc3 *dwc)
{
+ struct dwc3_ep *dep;
int ret;
- dwc3_ep0_prepare_one_trb(dwc, 0, dwc->ctrl_req_addr, 8,
+ complete(&dwc->ep0_in_setup);
+
+ dep = dwc->eps[0];
+ dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8,
DWC3_TRBCTL_CONTROL_SETUP, false);
- ret = dwc3_ep0_start_trans(dwc, 0);
+ ret = dwc3_ep0_start_trans(dep);
WARN_ON(ret < 0);
}
@@ -349,6 +336,12 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
u16 usb_status = 0;
__le16 *response_pkt;
+ if (!(ctrl->bRequestType & USB_DIR_IN))
+ return -EINVAL;
+
+ if (!le16_to_cpu(ctrl->wLength))
+ return -EINVAL;
+
recip = ctrl->bRequestType & USB_RECIP_MASK;
switch (recip) {
case USB_RECIP_DEVICE:
@@ -399,126 +392,198 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}
-static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+static int dwc3_ep0_handle_u1(struct dwc3 *dwc, enum usb_device_state state,
+ int set)
+{
+ u32 reg;
+
+ if (state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+ if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
+ (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
+ return -EINVAL;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (set)
+ reg |= DWC3_DCTL_INITU1ENA;
+ else
+ reg &= ~DWC3_DCTL_INITU1ENA;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_u2(struct dwc3 *dwc, enum usb_device_state state,
+ int set)
+{
+ u32 reg;
+
+
+ if (state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+ if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
+ (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
+ return -EINVAL;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (set)
+ reg |= DWC3_DCTL_INITU2ENA;
+ else
+ reg &= ~DWC3_DCTL_INITU2ENA;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state,
+ u32 wIndex, int set)
+{
+ if ((wIndex & 0xff) != 0)
+ return -EINVAL;
+ if (!set)
+ return -EINVAL;
+
+ switch (wIndex >> 8) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ dwc->test_mode_nr = wIndex >> 8;
+ dwc->test_mode = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_device(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl, int set)
{
- struct dwc3_ep *dep;
- u32 recip;
+ enum usb_device_state state;
u32 wValue;
u32 wIndex;
- u32 reg;
- int ret;
- enum usb_device_state state;
+ int ret = 0;
wValue = le16_to_cpu(ctrl->wValue);
wIndex = le16_to_cpu(ctrl->wIndex);
- recip = ctrl->bRequestType & USB_RECIP_MASK;
state = dwc->gadget.state;
- switch (recip) {
- case USB_RECIP_DEVICE:
+ switch (wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ break;
+ /*
+ * 9.4.1 says only only for SS, in AddressState only for
+ * default control pipe
+ */
+ case USB_DEVICE_U1_ENABLE:
+ ret = dwc3_ep0_handle_u1(dwc, state, set);
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ ret = dwc3_ep0_handle_u2(dwc, state, set);
+ break;
+ case USB_DEVICE_LTM_ENABLE:
+ ret = -EINVAL;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ ret = dwc3_ep0_handle_test(dwc, state, wIndex, set);
+ break;
+ default:
+ ret = -EINVAL;
+ }
- switch (wValue) {
- case USB_DEVICE_REMOTE_WAKEUP:
- break;
+ return ret;
+}
+
+static int dwc3_ep0_handle_intf(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ enum usb_device_state state;
+ u32 wValue;
+ u32 wIndex;
+ int ret = 0;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ wIndex = le16_to_cpu(ctrl->wIndex);
+ state = dwc->gadget.state;
+
+ switch (wValue) {
+ case USB_INTRF_FUNC_SUSPEND:
/*
- * 9.4.1 says only only for SS, in AddressState only for
- * default control pipe
+ * REVISIT: Ideally we would enable some low power mode here,
+ * however it's unclear what we should be doing here.
+ *
+ * For now, we're not doing anything, just making sure we return
+ * 0 so USB Command Verifier tests pass without any errors.
*/
- case USB_DEVICE_U1_ENABLE:
- if (state != USB_STATE_CONFIGURED)
- return -EINVAL;
- if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
- (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
- return -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (set)
- reg |= DWC3_DCTL_INITU1ENA;
- else
- reg &= ~DWC3_DCTL_INITU1ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
- break;
+ return ret;
+}
- case USB_DEVICE_U2_ENABLE:
- if (state != USB_STATE_CONFIGURED)
- return -EINVAL;
- if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
- (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
- return -EINVAL;
+static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ struct dwc3_ep *dep;
+ enum usb_device_state state;
+ u32 wValue;
+ u32 wIndex;
+ int ret;
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (set)
- reg |= DWC3_DCTL_INITU2ENA;
- else
- reg &= ~DWC3_DCTL_INITU2ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
- break;
+ wValue = le16_to_cpu(ctrl->wValue);
+ wIndex = le16_to_cpu(ctrl->wIndex);
+ state = dwc->gadget.state;
- case USB_DEVICE_LTM_ENABLE:
+ switch (wValue) {
+ case USB_ENDPOINT_HALT:
+ dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
+ if (!dep)
return -EINVAL;
- case USB_DEVICE_TEST_MODE:
- if ((wIndex & 0xff) != 0)
- return -EINVAL;
- if (!set)
- return -EINVAL;
-
- switch (wIndex >> 8) {
- case TEST_J:
- case TEST_K:
- case TEST_SE0_NAK:
- case TEST_PACKET:
- case TEST_FORCE_EN:
- dwc->test_mode_nr = wIndex >> 8;
- dwc->test_mode = true;
- break;
- default:
- return -EINVAL;
- }
+ if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
break;
- default:
+
+ ret = __dwc3_gadget_ep_set_halt(dep, set, true);
+ if (ret)
return -EINVAL;
- }
break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ u32 recip;
+ int ret;
+ enum usb_device_state state;
+
+ recip = ctrl->bRequestType & USB_RECIP_MASK;
+ state = dwc->gadget.state;
+
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ ret = dwc3_ep0_handle_device(dwc, ctrl, set);
+ break;
case USB_RECIP_INTERFACE:
- switch (wValue) {
- case USB_INTRF_FUNC_SUSPEND:
- if (wIndex & USB_INTRF_FUNC_SUSPEND_LP)
- /* XXX enable Low power suspend */
- ;
- if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
- /* XXX enable remote wakeup */
- ;
- break;
- default:
- return -EINVAL;
- }
+ ret = dwc3_ep0_handle_intf(dwc, ctrl, set);
break;
-
case USB_RECIP_ENDPOINT:
- switch (wValue) {
- case USB_ENDPOINT_HALT:
- dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
- if (!dep)
- return -EINVAL;
- if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
- break;
- ret = __dwc3_gadget_ep_set_halt(dep, set, true);
- if (ret)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
+ ret = dwc3_ep0_handle_endpoint(dwc, ctrl, set);
break;
-
default:
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ return ret;
}
static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
@@ -529,13 +594,12 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
addr = le16_to_cpu(ctrl->wValue);
if (addr > 127) {
- dwc3_trace(trace_dwc3_ep0, "invalid device address %d", addr);
+ dev_err(dwc->dev, "invalid device address %d\n", addr);
return -EINVAL;
}
if (state == USB_STATE_CONFIGURED) {
- dwc3_trace(trace_dwc3_ep0,
- "trying to set address when configured");
+ dev_err(dwc->dev, "can't SetAddress() from Configured State\n");
return -EINVAL;
}
@@ -663,6 +727,12 @@ static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
u16 wLength;
u16 wValue;
+ if (unlikely(ctrl->bRequestType & USB_DIR_IN))
+ return -EINVAL;
+
+ if (unlikely(!le16_to_cpu(ctrl->wLength)))
+ return -EINVAL;
+
if (state == USB_STATE_DEFAULT)
return -EINVAL;
@@ -720,35 +790,27 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_GET_STATUS");
ret = dwc3_ep0_handle_status(dwc, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_CLEAR_FEATURE");
ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
break;
case USB_REQ_SET_FEATURE:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_FEATURE");
ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
break;
case USB_REQ_SET_ADDRESS:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ADDRESS");
ret = dwc3_ep0_set_address(dwc, ctrl);
break;
case USB_REQ_SET_CONFIGURATION:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_CONFIGURATION");
ret = dwc3_ep0_set_config(dwc, ctrl);
break;
case USB_REQ_SET_SEL:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_SEL");
ret = dwc3_ep0_set_sel(dwc, ctrl);
break;
case USB_REQ_SET_ISOCH_DELAY:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
break;
default:
- dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
ret = dwc3_ep0_delegate_req(dwc, ctrl);
break;
}
@@ -759,7 +821,7 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
- struct usb_ctrlrequest *ctrl = dwc->ctrl_req;
+ struct usb_ctrlrequest *ctrl = (void *) dwc->ep0_trb;
int ret = -EINVAL;
u32 len;
@@ -787,9 +849,25 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
if (ret == USB_GADGET_DELAYED_STATUS)
dwc->delayed_status = true;
+ if (dwc->status_queued) {
+ dwc->status_queued = false;
+ if (dwc->delayed_status) {
+ pr_info("delayed status already come, will not wait for it.\n");
+ dwc->delayed_status = false;
+ usb_gadget_set_state(&dwc->gadget,
+ USB_STATE_CONFIGURED);
+ }
+ }
+
out:
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(dwc->dev, "ep0 setup error, ret %d!\n", ret);
+ dev_err(dwc->dev, "ctrl: %02x %02x %04x %04x %04x\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ ctrl->wValue, ctrl->wIndex, ctrl->wLength);
dwc3_ep0_stall_and_restart(dwc);
+ }
+
}
static void dwc3_ep0_complete_data(struct dwc3 *dwc,
@@ -799,7 +877,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
struct usb_request *ur;
struct dwc3_trb *trb;
struct dwc3_ep *ep0;
- unsigned transfer_size = 0;
unsigned maxp;
unsigned remaining_ur_length;
void *buf;
@@ -812,21 +889,18 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
ep0 = dwc->eps[0];
dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
-
trb = dwc->ep0_trb;
-
trace_dwc3_complete_trb(ep0, trb);
r = next_request(&ep0->pending_list);
- if (!r)
+ if (!r) {
+ dev_err(dwc->dev, "ep0 request list empty while complete data\n");
return;
+ }
status = DWC3_TRB_SIZE_TRBSTS(trb->size);
if (status == DWC3_TRBSTS_SETUP_PENDING) {
dwc->setup_packet_pending = true;
-
- dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
-
if (r)
dwc3_gadget_giveback(ep0, r, -ECONNRESET);
@@ -838,58 +912,23 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
remaining_ur_length = ur->length;
length = trb->size & DWC3_TRB_SIZE_MASK;
-
maxp = ep0->endpoint.maxpacket;
-
- if (dwc->ep0_bounced) {
- /*
- * Handle the first TRB before handling the bounce buffer if
- * the request length is greater than the bounce buffer size
- */
- if (ur->length > DWC3_EP0_BOUNCE_SIZE) {
- transfer_size = ALIGN(ur->length - maxp, maxp);
- transferred = transfer_size - length;
- buf = (u8 *)buf + transferred;
- ur->actual += transferred;
- remaining_ur_length -= transferred;
-
- trb++;
- length = trb->size & DWC3_TRB_SIZE_MASK;
-
- ep0->trb_enqueue = 0;
- }
-
- transfer_size = roundup((ur->length - transfer_size),
- maxp);
-
- transferred = min_t(u32, remaining_ur_length,
- transfer_size - length);
- memcpy(buf, dwc->ep0_bounce, transferred);
- } else {
- transferred = ur->length - length;
- }
-
+ transferred = ur->length - length;
ur->actual += transferred;
- if ((epnum & 1) && ur->actual < ur->length) {
- /* for some reason we did not get everything out */
+ if ((IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) &&
+ ur->length && ur->zero) || dwc->ep0_bounced) {
+ trb++;
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ trace_dwc3_complete_trb(ep0, trb);
+ ep0->trb_enqueue = 0;
+ dwc->ep0_bounced = false;
+ }
+ if ((epnum & 1) && ur->actual < ur->length)
dwc3_ep0_stall_and_restart(dwc);
- } else {
+ else
dwc3_gadget_giveback(ep0, r, 0);
-
- if (IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) &&
- ur->length && ur->zero) {
- int ret;
-
- dwc->ep0_next_event = DWC3_EP0_COMPLETE;
-
- dwc3_ep0_prepare_one_trb(dwc, epnum, dwc->ctrl_req_addr,
- 0, DWC3_TRBCTL_CONTROL_DATA, false);
- ret = dwc3_ep0_start_trans(dwc, epnum);
- WARN_ON(ret < 0);
- }
- }
}
static void dwc3_ep0_complete_status(struct dwc3 *dwc,
@@ -916,7 +955,7 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc,
ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
if (ret < 0) {
- dwc3_trace(trace_dwc3_ep0, "Invalid Test #%d",
+ dev_err(dwc->dev, "invalid test #%d\n",
dwc->test_mode_nr);
dwc3_ep0_stall_and_restart(dwc);
return;
@@ -924,10 +963,8 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc,
}
status = DWC3_TRB_SIZE_TRBSTS(trb->size);
- if (status == DWC3_TRBSTS_SETUP_PENDING) {
+ if (status == DWC3_TRBSTS_SETUP_PENDING)
dwc->setup_packet_pending = true;
- dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
- }
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
@@ -944,17 +981,14 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
switch (dwc->ep0state) {
case EP0_SETUP_PHASE:
- dwc3_trace(trace_dwc3_ep0, "Setup Phase");
dwc3_ep0_inspect_setup(dwc, event);
break;
case EP0_DATA_PHASE:
- dwc3_trace(trace_dwc3_ep0, "Data Phase");
dwc3_ep0_complete_data(dwc, event);
break;
case EP0_STATUS_PHASE:
- dwc3_trace(trace_dwc3_ep0, "Status Phase");
dwc3_ep0_complete_status(dwc, event);
break;
default:
@@ -970,55 +1004,69 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
req->direction = !!dep->number;
if (req->request.length == 0) {
- dwc3_ep0_prepare_one_trb(dwc, dep->number,
- dwc->ctrl_req_addr, 0,
+ dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0,
DWC3_TRBCTL_CONTROL_DATA, false);
- ret = dwc3_ep0_start_trans(dwc, dep->number);
+ ret = dwc3_ep0_start_trans(dep);
} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
&& (dep->number == 0)) {
- u32 transfer_size = 0;
u32 maxpacket;
+ u32 rem;
- ret = usb_gadget_map_request(&dwc->gadget, &req->request,
- dep->number);
- if (ret) {
- dwc3_trace(trace_dwc3_ep0, "failed to map request");
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+ &req->request, dep->number);
+ if (ret)
return;
- }
maxpacket = dep->endpoint.maxpacket;
+ rem = req->request.length % maxpacket;
+ dwc->ep0_bounced = true;
- if (req->request.length > DWC3_EP0_BOUNCE_SIZE) {
- transfer_size = ALIGN(req->request.length - maxpacket,
- maxpacket);
- dwc3_ep0_prepare_one_trb(dwc, dep->number,
- req->request.dma,
- transfer_size,
- DWC3_TRBCTL_CONTROL_DATA,
- true);
- }
-
- transfer_size = roundup((req->request.length - transfer_size),
- maxpacket);
+ /* prepare normal TRB */
+ dwc3_ep0_prepare_one_trb(dep, req->request.dma,
+ req->request.length,
+ DWC3_TRBCTL_CONTROL_DATA,
+ true);
+
+ /* Now prepare one extra TRB to align transfer size */
+ dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
+ maxpacket - rem,
+ DWC3_TRBCTL_CONTROL_DATA,
+ false);
+ ret = dwc3_ep0_start_trans(dep);
+ } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
+ req->request.length && req->request.zero) {
+ u32 maxpacket;
+ u32 rem;
- dwc->ep0_bounced = true;
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+ &req->request, dep->number);
+ if (ret)
+ return;
- dwc3_ep0_prepare_one_trb(dwc, dep->number,
- dwc->ep0_bounce_addr, transfer_size,
- DWC3_TRBCTL_CONTROL_DATA, false);
- ret = dwc3_ep0_start_trans(dwc, dep->number);
+ maxpacket = dep->endpoint.maxpacket;
+ rem = req->request.length % maxpacket;
+
+ /* prepare normal TRB */
+ dwc3_ep0_prepare_one_trb(dep, req->request.dma,
+ req->request.length,
+ DWC3_TRBCTL_CONTROL_DATA,
+ true);
+
+ /* Now prepare one extra TRB to align transfer size */
+ dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
+ 0, DWC3_TRBCTL_CONTROL_DATA,
+ false);
+ ret = dwc3_ep0_start_trans(dep);
} else {
- ret = usb_gadget_map_request(&dwc->gadget, &req->request,
- dep->number);
- if (ret) {
- dwc3_trace(trace_dwc3_ep0, "failed to map request");
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+ &req->request, dep->number);
+ if (ret)
return;
- }
- dwc3_ep0_prepare_one_trb(dwc, dep->number, req->request.dma,
+ dwc3_ep0_prepare_one_trb(dep, req->request.dma,
req->request.length, DWC3_TRBCTL_CONTROL_DATA,
false);
- ret = dwc3_ep0_start_trans(dwc, dep->number);
+ ret = dwc3_ep0_start_trans(dep);
}
WARN_ON(ret < 0);
@@ -1032,9 +1080,8 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
: DWC3_TRBCTL_CONTROL_STATUS2;
- dwc3_ep0_prepare_one_trb(dwc, dep->number,
- dwc->ctrl_req_addr, 0, type, false);
- return dwc3_ep0_start_trans(dwc, dep->number);
+ dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0, type, false);
+ return dwc3_ep0_start_trans(dep);
}
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
@@ -1073,8 +1120,6 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
{
switch (event->status) {
case DEPEVT_STATUS_CONTROL_DATA:
- dwc3_trace(trace_dwc3_ep0, "Control Data");
-
/*
* We already have a DATA transfer in the controller's cache,
* if we receive a XferNotReady(DATA) we will ignore it, unless
@@ -1087,8 +1132,7 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
if (dwc->ep0_expect_in != event->endpoint_number) {
struct dwc3_ep *dep = dwc->eps[dwc->ep0_expect_in];
- dwc3_trace(trace_dwc3_ep0,
- "Wrong direction for Data phase");
+ dev_err(dwc->dev, "unexpected direction for Data Phase\n");
dwc3_ep0_end_control_data(dwc, dep);
dwc3_ep0_stall_and_restart(dwc);
return;
@@ -1100,16 +1144,29 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
return;
- dwc3_trace(trace_dwc3_ep0, "Control Status");
-
dwc->ep0state = EP0_STATUS_PHASE;
if (dwc->delayed_status) {
+ struct dwc3_ep *dep = dwc->eps[0];
+
WARN_ON_ONCE(event->endpoint_number != 1);
- dwc3_trace(trace_dwc3_ep0, "Delayed Status");
+ /*
+ * We should handle the delay STATUS phase here if the
+ * request for handling delay STATUS has been queued
+ * into the list.
+ */
+ if (!list_empty(&dep->pending_list)) {
+ dwc->delayed_status = false;
+ usb_gadget_set_state(&dwc->gadget,
+ USB_STATE_CONFIGURED);
+ dwc3_ep0_do_control_status(dwc, event);
+ }
+
return;
}
+ dwc->status_queued = false;
+
dwc3_ep0_do_control_status(dwc, event);
}
}
@@ -1117,10 +1174,6 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
- dwc3_trace(trace_dwc3_ep0, "%s: state '%s'",
- dwc3_ep_event_string(event),
- dwc3_ep0_state_string(dwc->ep0state));
-
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
dwc3_ep0_xfer_complete(dwc, event);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 16c67120d72b..2c8f8f762756 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -34,6 +34,7 @@
#include "core.h"
#include "gadget.h"
#include "io.h"
+#include "dwc3-hisi.h"
/**
* dwc3_gadget_set_test_mode - Enables USB2 Test Modes
@@ -139,9 +140,6 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
udelay(5);
}
- dwc3_trace(trace_dwc3_gadget,
- "link state change request timed out");
-
return -ETIMEDOUT;
}
@@ -174,28 +172,17 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status)
{
struct dwc3 *dwc = dep->dwc;
- unsigned int unmap_after_complete = false;
req->started = false;
list_del(&req->list);
req->trb = NULL;
+ req->remaining = 0;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
- /*
- * NOTICE we don't want to unmap before calling ->complete() if we're
- * dealing with a bounced ep0 request. If we unmap it here, we would end
- * up overwritting the contents of req->buf and this could confuse the
- * gadget driver.
- */
- if (dwc->ep0_bounced && dep->number <= 1) {
- dwc->ep0_bounced = false;
- unmap_after_complete = true;
- } else {
- usb_gadget_unmap_request(&dwc->gadget,
- &req->request, req->direction);
- }
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
trace_dwc3_gadget_giveback(req);
@@ -203,10 +190,6 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
usb_gadget_giveback_request(&dep->endpoint, &req->request);
spin_lock(&dwc->lock);
- if (unmap_after_complete)
- usb_gadget_unmap_request(&dwc->gadget,
- &req->request, req->direction);
-
if (dep->number > 1)
pm_runtime_put(dwc->dev);
}
@@ -229,7 +212,7 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
ret = -EINVAL;
break;
}
- } while (timeout--);
+ } while (--timeout);
if (!timeout) {
ret = -ETIMEDOUT;
@@ -246,8 +229,9 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
struct dwc3_gadget_ep_cmd_params *params)
{
+ const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
- u32 timeout = 500;
+ u32 timeout = 3000;
u32 reg;
int cmd_status = 0;
@@ -271,7 +255,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
}
}
- if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
+ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
int needs_wakeup;
needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
@@ -289,7 +273,28 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
- dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
+ /*
+ * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
+ * not relying on XferNotReady, we can make use of a special "No
+ * Response Update Transfer" command where we should clear both CmdAct
+ * and CmdIOC bits.
+ *
+ * With this, we don't need to wait for command completion and can
+ * straight away issue further commands to the endpoint.
+ *
+ * NOTICE: We're making an assumption that control endpoints will never
+ * make use of Update Transfer command. This is a safe assumption
+ * because we can never have more than one request at a time with
+ * Control Endpoints. If anybody changes that assumption, this chunk
+ * needs to be updated accordingly.
+ */
+ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
+ !usb_endpoint_xfer_isoc(desc))
+ cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
+ else
+ cmd |= DWC3_DEPCMD_CMDACT;
+
+ dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
do {
reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
if (!(reg & DWC3_DEPCMD_CMDACT)) {
@@ -331,6 +336,20 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
+ if (ret == 0) {
+ switch (DWC3_DEPCMD_CMD(cmd)) {
+ case DWC3_DEPCMD_STARTTRANSFER:
+ dep->flags |= DWC3_EP_TRANSFER_STARTED;
+ break;
+ case DWC3_DEPCMD_ENDTRANSFER:
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+ }
+
if (unlikely(susphy)) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
@@ -378,7 +397,7 @@ static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
if (dep->trb_pool)
return 0;
- dep->trb_pool = dma_alloc_coherent(dwc->dev,
+ dep->trb_pool = dma_alloc_coherent(dwc->sysdev,
sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
&dep->trb_pool_dma, GFP_KERNEL);
if (!dep->trb_pool) {
@@ -394,7 +413,7 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
- dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+ dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
dep->trb_pool, dep->trb_pool_dma);
dep->trb_pool = NULL;
@@ -467,16 +486,19 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
}
static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
- const struct usb_endpoint_descriptor *desc,
- const struct usb_ss_ep_comp_descriptor *comp_desc,
bool modify, bool restore)
{
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ const struct usb_endpoint_descriptor *desc;
struct dwc3_gadget_ep_cmd_params params;
if (dev_WARN_ONCE(dwc->dev, modify && restore,
"Can't modify and restore\n"))
return -EINVAL;
+ comp_desc = dep->endpoint.comp_desc;
+ desc = dep->endpoint.desc;
+
memset(&params, 0x00, sizeof(params));
params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
@@ -555,24 +577,21 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
* Caller should take care of locking
*/
static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
- const struct usb_endpoint_descriptor *desc,
- const struct usb_ss_ep_comp_descriptor *comp_desc,
bool modify, bool restore)
{
+ const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
+
u32 reg;
int ret;
- dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
-
if (!(dep->flags & DWC3_EP_ENABLED)) {
ret = dwc3_gadget_start_config(dwc, dep);
if (ret)
return ret;
}
- ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
- restore);
+ ret = dwc3_gadget_set_ep_config(dwc, dep, modify, restore);
if (ret)
return ret;
@@ -580,17 +599,18 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
- dep->endpoint.desc = desc;
- dep->comp_desc = comp_desc;
dep->type = usb_endpoint_type(desc);
dep->flags |= DWC3_EP_ENABLED;
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
reg |= DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+ init_waitqueue_head(&dep->wait_end_transfer);
+
if (usb_endpoint_xfer_control(desc))
- return 0;
+ goto out;
/* Initialize the TRB ring */
dep->trb_dequeue = 0;
@@ -608,6 +628,39 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
}
+ /*
+ * Issue StartTransfer here with no-op TRB so we can always rely on No
+ * Response Update Transfer command.
+ */
+ if (usb_endpoint_xfer_bulk(desc)) {
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_trb *trb;
+ dma_addr_t trb_dma;
+ u32 cmd;
+
+ memset(&params, 0, sizeof(params));
+ trb = &dep->trb_pool[0];
+ trb_dma = dwc3_trb_dma_offset(dep, trb);
+
+ params.param0 = upper_32_bits(trb_dma);
+ params.param1 = lower_32_bits(trb_dma);
+
+ cmd = DWC3_DEPCMD_STARTTRANSFER;
+
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ if (ret < 0)
+ return ret;
+
+ dep->flags |= DWC3_EP_BUSY;
+
+ dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
+ WARN_ON_ONCE(!dep->resource_index);
+ }
+
+
+out:
+ trace_dwc3_gadget_ep_enable(dep);
+
return 0;
}
@@ -645,7 +698,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
struct dwc3 *dwc = dep->dwc;
u32 reg;
- dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
+ trace_dwc3_gadget_ep_disable(dep);
dwc3_remove_requests(dwc, dep);
@@ -658,10 +711,14 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
dep->stream_capable = false;
- dep->endpoint.desc = NULL;
- dep->comp_desc = NULL;
dep->type = 0;
- dep->flags = 0;
+ dep->flags &= DWC3_EP_END_TRANSFER_PENDING;
+
+ /* Clear out the ep descriptors for non-ep0 */
+ if (dep->number > 1) {
+ dep->endpoint.comp_desc = NULL;
+ dep->endpoint.desc = NULL;
+ }
return 0;
}
@@ -708,7 +765,7 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
return 0;
spin_lock_irqsave(&dwc->lock, flags);
- ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
+ ret = __dwc3_gadget_ep_enable(dep, false, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -774,34 +831,14 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep);
-/**
- * dwc3_prepare_one_trb - setup one TRB from one request
- * @dep: endpoint for which this request is prepared
- * @req: dwc3_request pointer
- */
-static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
- struct dwc3_request *req, dma_addr_t dma,
- unsigned length, unsigned chain, unsigned node)
+static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
+ dma_addr_t dma, unsigned length, unsigned chain, unsigned node,
+ unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt)
{
- struct dwc3_trb *trb;
struct dwc3 *dwc = dep->dwc;
struct usb_gadget *gadget = &dwc->gadget;
enum usb_device_speed speed = gadget->speed;
- dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s",
- dep->name, req, (unsigned long long) dma,
- length, chain ? " chain" : "");
-
- trb = &dep->trb_pool[dep->trb_enqueue];
-
- if (!req->trb) {
- dwc3_gadget_move_started_request(req);
- req->trb = trb;
- req->trb_dma = dwc3_trb_dma_offset(dep, trb);
- req->first_trb_index = dep->trb_enqueue;
- dep->queued_requests++;
- }
-
dwc3_ep_inc_enq(dep);
trb->size = DWC3_TRB_SIZE_LENGTH(length);
@@ -871,21 +908,27 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
* This is only possible with faulty memory because we
* checked it already :)
*/
- BUG();
+ dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
+ usb_endpoint_type(dep->endpoint.desc));
}
/* always enable Continue on Short Packet */
- trb->ctrl |= DWC3_TRB_CTRL_CSP;
+ if (usb_endpoint_dir_out(dep->endpoint.desc)) {
+ trb->ctrl |= DWC3_TRB_CTRL_CSP;
+
+ if (short_not_ok)
+ trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
+ }
- if ((!req->request.no_interrupt && !chain) ||
+ if ((!no_interrupt && !chain) ||
(dwc3_calc_trbs_left(dep) == 0))
- trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
+ trb->ctrl |= DWC3_TRB_CTRL_IOC;
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
- trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
+ trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
trb->ctrl |= DWC3_TRB_CTRL_HWO;
@@ -893,6 +936,36 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
}
/**
+ * dwc3_prepare_one_trb - setup one TRB from one request
+ * @dep: endpoint for which this request is prepared
+ * @req: dwc3_request pointer
+ * @chain: should this TRB be chained to the next?
+ * @node: only for isochronous endpoints. First TRB needs different type.
+ */
+static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
+ struct dwc3_request *req, unsigned chain, unsigned node)
+{
+ struct dwc3_trb *trb;
+ unsigned length = req->request.length;
+ unsigned stream_id = req->request.stream_id;
+ unsigned short_not_ok = req->request.short_not_ok;
+ unsigned no_interrupt = req->request.no_interrupt;
+ dma_addr_t dma = req->request.dma;
+
+ trb = &dep->trb_pool[dep->trb_enqueue];
+
+ if (!req->trb) {
+ dwc3_gadget_move_started_request(req);
+ req->trb = trb;
+ req->trb_dma = dwc3_trb_dma_offset(dep, trb);
+ dep->queued_requests++;
+ }
+
+ __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node,
+ stream_id, short_not_ok, no_interrupt);
+}
+
+/**
* dwc3_ep_prev_trb() - Returns the previous TRB in the ring
* @dep: The endpoint with the TRB ring
* @index: The index of the current TRB in the ring
@@ -914,6 +987,7 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
{
struct dwc3_trb *tmp;
+ struct dwc3 *dwc = dep->dwc;
u8 trbs_left;
/*
@@ -925,7 +999,8 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
*/
if (dep->trb_enqueue == dep->trb_dequeue) {
tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
- if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
+ if (dev_WARN_ONCE(dwc->dev, tmp->ctrl & DWC3_TRB_CTRL_HWO,
+ "%s No TRBS left\n", dep->name))
return 0;
return DWC3_TRB_NUM - 1;
@@ -945,21 +1020,36 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
{
struct scatterlist *sg = req->sg;
struct scatterlist *s;
- unsigned int length;
- dma_addr_t dma;
int i;
for_each_sg(sg, s, req->num_pending_sgs, i) {
+ unsigned int length = req->request.length;
+ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+ unsigned int rem = length % maxp;
unsigned chain = true;
- length = sg_dma_len(s);
- dma = sg_dma_address(s);
-
if (sg_is_last(s))
chain = false;
- dwc3_prepare_one_trb(dep, req, dma, length,
- chain, i);
+ if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_trb *trb;
+
+ req->unaligned = true;
+
+ /* prepare normal TRB */
+ dwc3_prepare_one_trb(dep, req, true, i);
+
+ /* Now prepare one extra TRB to align transfer size */
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr,
+ maxp - rem, false, 0,
+ req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt);
+ } else {
+ dwc3_prepare_one_trb(dep, req, chain, i);
+ }
if (!dwc3_calc_trbs_left(dep))
break;
@@ -969,14 +1059,44 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
struct dwc3_request *req)
{
- unsigned int length;
- dma_addr_t dma;
-
- dma = req->request.dma;
- length = req->request.length;
-
- dwc3_prepare_one_trb(dep, req, dma, length,
- false, 0);
+ unsigned int length = req->request.length;
+ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+ unsigned int rem = length % maxp;
+
+ if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) {
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_trb *trb;
+
+ req->unaligned = true;
+
+ /* prepare normal TRB */
+ dwc3_prepare_one_trb(dep, req, true, 0);
+
+ /* Now prepare one extra TRB to align transfer size */
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem,
+ false, 0, req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt);
+ } else if (req->request.zero && req->request.length &&
+ (IS_ALIGNED(req->request.length,dep->endpoint.maxpacket))) {
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_trb *trb;
+
+ req->zero = true;
+
+ /* prepare normal TRB */
+ dwc3_prepare_one_trb(dep, req, true, 0);
+
+ /* Now prepare one extra TRB to handle ZLP */
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
+ false, 0, req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt);
+ } else {
+ dwc3_prepare_one_trb(dep, req, false, 0);
+ }
}
/*
@@ -996,6 +1116,24 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
if (!dwc3_calc_trbs_left(dep))
return;
+ /*
+ * We can get in a situation where there's a request in the started list
+ * but there weren't enough TRBs to fully kick it in the first time
+ * around, so it has been waiting for more TRBs to be freed up.
+ *
+ * In that case, we should check if we have a request with pending_sgs
+ * in the started list and prepare TRBs for that request first,
+ * otherwise we will prepare TRBs completely out of order and that will
+ * break things.
+ */
+ list_for_each_entry(req, &dep->started_list, list) {
+ if (req->num_pending_sgs > 0)
+ dwc3_prepare_one_trb_sg(dep, req);
+
+ if (!dwc3_calc_trbs_left(dep))
+ return;
+ }
+
list_for_each_entry_safe(req, n, &dep->pending_list, list) {
if (req->num_pending_sgs > 0)
dwc3_prepare_one_trb_sg(dep, req);
@@ -1011,7 +1149,6 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_request *req;
- struct dwc3 *dwc = dep->dwc;
int starting;
int ret;
u32 cmd;
@@ -1044,9 +1181,10 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
* here and stop, unmap, free and del each of the linked
* requests instead of what we do now.
*/
- usb_gadget_unmap_request(&dwc->gadget, &req->request,
- req->direction);
- list_del(&req->list);
+ if (req->trb)
+ memset(req->trb, 0, sizeof(struct dwc3_trb));
+ dep->queued_requests--;
+ dwc3_gadget_giveback(dep, req, ret);
return ret;
}
@@ -1060,21 +1198,31 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
return 0;
}
+static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ return DWC3_DSTS_SOFFN(reg);
+}
+
static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
struct dwc3_ep *dep, u32 cur_uf)
{
u32 uf;
if (list_empty(&dep->pending_list)) {
- dwc3_trace(trace_dwc3_gadget,
- "ISOC ep %s run out for requests",
+ dev_info(dwc->dev, "%s: ran out of requests\n",
dep->name);
dep->flags |= DWC3_EP_PENDING_REQUEST;
return;
}
- /* 4 micro frames in the future */
- uf = cur_uf + dep->interval * 4;
+ /*
+ * Schedule the first trb for one interval in the future or at
+ * least 4 microframes.
+ */
+ uf = cur_uf + max_t(u32, 4, dep->interval);
__dwc3_gadget_kick_transfer(dep, uf);
}
@@ -1096,16 +1244,15 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
int ret;
if (!dep->endpoint.desc) {
- dwc3_trace(trace_dwc3_gadget,
- "trying to queue request %p to disabled %s",
- &req->request, dep->endpoint.name);
+ dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
+ dep->name);
return -ESHUTDOWN;
}
if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
&req->request, req->dep->name)) {
- dwc3_trace(trace_dwc3_gadget, "request %pK belongs to '%s'",
- &req->request, req->dep->name);
+ dev_err(dwc->dev, "%s: request %p belongs to '%s'\n",
+ dep->name, &req->request, req->dep->name);
return -EINVAL;
}
@@ -1118,8 +1265,8 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
trace_dwc3_ep_queue(req);
- ret = usb_gadget_map_request(&dwc->gadget, &req->request,
- dep->direction);
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
+ dep->direction);
if (ret)
return ret;
@@ -1137,54 +1284,41 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
* errors which will force us issue EndTransfer command.
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- if ((dep->flags & DWC3_EP_PENDING_REQUEST) &&
- list_empty(&dep->started_list)) {
- dwc3_stop_active_transfer(dwc, dep->number, true);
- dep->flags = DWC3_EP_ENABLED;
+ if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
+ if (dep->flags & DWC3_EP_TRANSFER_STARTED) {
+ dwc3_stop_active_transfer(dwc, dep->number, true);
+ dep->flags = DWC3_EP_ENABLED;
+ } else {
+ u32 cur_uf;
+
+ cur_uf = __dwc3_gadget_get_frame(dwc);
+ __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
+ dep->flags &= ~DWC3_EP_PENDING_REQUEST;
+ }
+ return 0;
}
- return 0;
+
+ if ((dep->flags & DWC3_EP_BUSY) &&
+ !(dep->flags & DWC3_EP_MISSED_ISOC)) {
+ WARN_ON_ONCE(!dep->resource_index);
+ ret = __dwc3_gadget_kick_transfer(dep,
+ dep->resource_index);
+ }
+
+ goto out;
}
if (!dwc3_calc_trbs_left(dep))
return 0;
ret = __dwc3_gadget_kick_transfer(dep, 0);
- if (ret && ret != -EBUSY)
- dwc3_trace(trace_dwc3_gadget,
- "%s: failed to kick transfers",
- dep->name);
+out:
if (ret == -EBUSY)
ret = 0;
return ret;
}
-static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
- struct usb_request *request)
-{
- dwc3_gadget_ep_free_request(ep, request);
-}
-
-static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
-{
- struct dwc3_request *req;
- struct usb_request *request;
- struct usb_ep *ep = &dep->endpoint;
-
- dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
- request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
- if (!request)
- return -ENOMEM;
-
- request->length = 0;
- request->buf = dwc->zlp_buf;
- request->complete = __dwc3_gadget_ep_zlp_complete;
-
- req = to_dwc3_request(request);
-
- return __dwc3_gadget_ep_queue(dep, req);
-}
-
static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags)
{
@@ -1198,17 +1332,6 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_queue(dep, req);
-
- /*
- * Okay, here's the thing, if gadget driver has requested for a ZLP by
- * setting request->zero, instead of doing magic, we will just queue an
- * extra usb_request ourselves so that it gets handled the same way as
- * any other request.
- */
- if (ret == 0 && request->zero && request->length &&
- (request->length % ep->maxpacket == 0))
- ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
-
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -1243,6 +1366,68 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
if (r == req) {
/* wait until it is processed */
dwc3_stop_active_transfer(dwc, dep->number, true);
+
+ /*
+ * If request was already started, this means we had to
+ * stop the transfer. With that we also need to ignore
+ * all TRBs used by the request, however TRBs can only
+ * be modified after completion of END_TRANSFER
+ * command. So what we do here is that we wait for
+ * END_TRANSFER completion and only after that, we jump
+ * over TRBs by clearing HWO and incrementing dequeue
+ * pointer.
+ *
+ * Note that we have 2 possible types of transfers here:
+ *
+ * i) Linear buffer request
+ * ii) SG-list based request
+ *
+ * SG-list based requests will have r->num_pending_sgs
+ * set to a valid number (> 0). Linear requests,
+ * normally use a single TRB.
+ *
+ * For each of these two cases, if r->unaligned flag is
+ * set, one extra TRB has been used to align transfer
+ * size to wMaxPacketSize.
+ *
+ * All of these cases need to be taken into
+ * consideration so we don't mess up our TRB ring
+ * pointers.
+ */
+ wait_event_lock_irq(dep->wait_end_transfer,
+ !(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
+ dwc->lock);
+
+ if (!r->trb)
+ goto out1;
+
+ if (r->num_pending_sgs) {
+ struct dwc3_trb *trb;
+ int i = 0;
+
+ for (i = 0; i < r->num_pending_sgs; i++) {
+ trb = r->trb + i;
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }
+
+ if (r->unaligned || r->zero) {
+ trb = r->trb + r->num_pending_sgs + 1;
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }
+ } else {
+ struct dwc3_trb *trb = r->trb;
+
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+
+ if (r->unaligned || r->zero) {
+ trb = r->trb + 1;
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }
+ }
goto out1;
}
dev_err(dwc->dev, "request %pK was not queued to %s\n",
@@ -1253,6 +1438,10 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
out1:
/* giveback the request */
+ if (!dep->queued_requests)
+ goto out0;
+
+ dep->queued_requests--;
dwc3_gadget_giveback(dep, req, -ECONNRESET);
out0:
@@ -1293,9 +1482,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
if (!protocol && ((dep->direction && transfer_in_flight) ||
(!dep->direction && started))) {
- dwc3_trace(trace_dwc3_gadget,
- "%s: pending request, cannot halt",
- dep->name);
return -EAGAIN;
}
@@ -1391,10 +1577,8 @@ static const struct usb_ep_ops dwc3_gadget_ep_ops = {
static int dwc3_gadget_get_frame(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
- u32 reg;
- reg = dwc3_readl(dwc->regs, DWC3_DSTS);
- return DWC3_DSTS_SOFFN(reg);
+ return __dwc3_gadget_get_frame(dwc);
}
static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
@@ -1417,10 +1601,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
speed = reg & DWC3_DSTS_CONNECTSPD;
if ((speed == DWC3_DSTS_SUPERSPEED) ||
- (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
- dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
+ (speed == DWC3_DSTS_SUPERSPEED_PLUS))
return 0;
- }
link_state = DWC3_DSTS_USBLNKST(reg);
@@ -1429,9 +1611,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
break;
default:
- dwc3_trace(trace_dwc3_gadget,
- "can't wakeup from '%s'",
- dwc3_gadget_link_string(link_state));
return -EINVAL;
}
@@ -1536,11 +1715,6 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
if (!timeout)
return -ETIMEDOUT;
- dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
- dwc->gadget_driver
- ? dwc->gadget_driver->function : "no-function",
- is_on ? "connect" : "disconnect");
-
return 0;
}
@@ -1552,6 +1726,21 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
is_on = !!is_on;
+ /*
+ * Per databook, when we want to stop the gadget, if a control transfer
+ * is still in process, complete it and get the core into setup phase.
+ */
+ if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
+ reinit_completion(&dwc->ep0_in_setup);
+
+ ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
+ msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+ if (ret == 0) {
+ dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
+ return -ETIMEDOUT;
+ }
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1569,11 +1758,13 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
DWC3_DEVTEN_CMDCMPLTEN |
DWC3_DEVTEN_ERRTICERREN |
DWC3_DEVTEN_WKUPEVTEN |
- DWC3_DEVTEN_ULSTCNGEN |
DWC3_DEVTEN_CONNECTDONEEN |
DWC3_DEVTEN_USBRSTEN |
DWC3_DEVTEN_DISCONNEVTEN);
+ if (dwc->revision < DWC3_REVISION_250A)
+ reg |= DWC3_DEVTEN_ULSTCNGEN;
+
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
@@ -1633,6 +1824,17 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
int ret = 0;
u32 reg;
+ /*
+ * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
+ * the core supports IMOD, disable it.
+ */
+ if (dwc->imod_interval) {
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+ } else if (dwc3_has_imod(dwc)) {
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
+ }
+
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_SPEED_MASK);
@@ -1693,16 +1895,14 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
- false);
+ ret = __dwc3_gadget_ep_enable(dep, false, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err0;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
- false);
+ ret = __dwc3_gadget_ep_enable(dep, false, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err1;
@@ -1768,9 +1968,6 @@ err0:
static void __dwc3_gadget_stop(struct dwc3 *dwc)
{
- if (pm_runtime_suspended(dwc->dev))
- return;
-
dwc3_gadget_disable_irq(dwc);
__dwc3_gadget_ep_disable(dwc->eps[0]);
__dwc3_gadget_ep_disable(dwc->eps[1]);
@@ -1780,9 +1977,30 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
+ int epnum;
spin_lock_irqsave(&dwc->lock, flags);
+
+ if (pm_runtime_suspended(dwc->dev))
+ goto out;
+
__dwc3_gadget_stop(dwc);
+
+ for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ continue;
+
+ wait_event_lock_irq(dep->wait_end_transfer,
+ !(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
+ dwc->lock);
+ }
+
+out:
dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1802,14 +2020,15 @@ static const struct usb_gadget_ops dwc3_gadget_ops = {
/* -------------------------------------------------------------------------- */
-static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
- u8 num, u32 direction)
+static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 num)
{
struct dwc3_ep *dep;
- u8 i;
+ u8 epnum;
- for (i = 0; i < num; i++) {
- u8 epnum = (i << 1) | (direction ? 1 : 0);
+ INIT_LIST_HEAD(&dwc->gadget.ep_list);
+
+ for (epnum = 0; epnum < num; epnum++) {
+ bool direction = epnum & 1;
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
if (!dep)
@@ -1817,17 +2036,21 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
dep->dwc = dwc;
dep->number = epnum;
- dep->direction = !!direction;
+ dep->direction = direction;
dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
dwc->eps[epnum] = dep;
snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
- (epnum & 1) ? "in" : "out");
+ direction ? "in" : "out");
dep->endpoint.name = dep->name;
- spin_lock_init(&dep->lock);
- dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
+ if (!(dep->number > 1)) {
+ dep->endpoint.desc = &dwc3_gadget_ep0_desc;
+ dep->endpoint.comp_desc = NULL;
+ }
+
+ spin_lock_init(&dep->lock);
if (epnum == 0 || epnum == 1) {
usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
@@ -1835,6 +2058,44 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
dep->endpoint.ops = &dwc3_gadget_ep0_ops;
if (!epnum)
dwc->gadget.ep0 = &dep->endpoint;
+ } else if (direction) {
+ int mdwidth;
+ int size;
+ int ret;
+ int num;
+
+ mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ /* MDWIDTH is represented in bits, we need it in bytes */
+ mdwidth /= 8;
+
+ size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(epnum >> 1));
+ size = DWC3_GTXFIFOSIZ_TXFDEF(size);
+
+ /* FIFO Depth is in MDWDITH bytes. Multiply */
+ size *= mdwidth;
+
+ num = size / 1024;
+ if (num == 0)
+ num = 1;
+
+ /*
+ * FIFO sizes account an extra MDWIDTH * (num + 1) bytes for
+ * internal overhead. We don't really know how these are used,
+ * but documentation say it exists.
+ */
+ size -= mdwidth * (num + 1);
+ size /= num;
+
+ usb_ep_set_maxpacket_limit(&dep->endpoint, size);
+
+ dep->endpoint.max_streams = 15;
+ dep->endpoint.ops = &dwc3_gadget_ep_ops;
+ list_add_tail(&dep->endpoint.ep_list,
+ &dwc->gadget.ep_list);
+
+ ret = dwc3_alloc_trb_pool(dep);
+ if (ret)
+ return ret;
} else {
int ret;
@@ -1857,7 +2118,7 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
dep->endpoint.caps.type_int = true;
}
- dep->endpoint.caps.dir_in = !!direction;
+ dep->endpoint.caps.dir_in = direction;
dep->endpoint.caps.dir_out = !direction;
INIT_LIST_HEAD(&dep->pending_list);
@@ -1867,29 +2128,6 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
return 0;
}
-static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
-{
- int ret;
-
- INIT_LIST_HEAD(&dwc->gadget.ep_list);
-
- ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
- if (ret < 0) {
- dwc3_trace(trace_dwc3_gadget,
- "failed to allocate OUT endpoints");
- return ret;
- }
-
- ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
- if (ret < 0) {
- dwc3_trace(trace_dwc3_gadget,
- "failed to allocate IN endpoints");
- return ret;
- }
-
- return 0;
-}
-
static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
@@ -1948,19 +2186,26 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
- if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
+ /*
+ * If we're dealing with unaligned size OUT transfer, we will be left
+ * with one TRB pending in the ring. We need to manually clear HWO bit
+ * from that TRB.
+ */
+ if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) {
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
return 1;
+ }
count = trb->size & DWC3_TRB_SIZE_MASK;
- req->request.actual += count;
+ req->remaining += count;
+
+ if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
+ return 1;
if (dep->direction) {
if (count) {
trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
- dwc3_trace(trace_dwc3_gadget,
- "%s: incomplete IN transfer",
- dep->name);
/*
* If missed isoc occurred and there is
* no request queued then issue END
@@ -2006,11 +2251,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
struct dwc3_request *req, *n;
struct dwc3_trb *trb;
bool ioc = false;
- int ret;
+ int ret = 0;
list_for_each_entry_safe(req, n, &dep->started_list, list) {
unsigned length;
- unsigned actual;
int chain;
length = req->request.length;
@@ -2024,6 +2268,9 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
for_each_sg(sg, s, pending, i) {
trb = &dep->trb_pool[dep->trb_dequeue];
+ if (trb->ctrl & DWC3_TRB_CTRL_HWO)
+ break;
+
req->sg = sg_next(s);
req->num_pending_sgs--;
@@ -2038,17 +2285,17 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
event, status, chain);
}
- /*
- * We assume here we will always receive the entire data block
- * which we should receive. Meaning, if we program RX to
- * receive 4K but we receive only 2K, we assume that's all we
- * should receive and we simply bounce the request back to the
- * gadget driver for further processing.
- */
- actual = length - req->request.actual;
- req->request.actual = actual;
+ if (req->unaligned || req->zero) {
+ trb = &dep->trb_pool[dep->trb_dequeue];
+ ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
+ event, status, false);
+ req->unaligned = false;
+ req->zero = false;
+ }
+
+ req->request.actual = length - req->remaining;
- if (ret && chain && (actual < length) && req->num_pending_sgs)
+ if ((req->request.actual < length) && req->num_pending_sgs)
return __dwc3_gadget_kick_transfer(dep, 0);
dwc3_gadget_giveback(dep, req, status);
@@ -2156,11 +2403,18 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
{
struct dwc3_ep *dep;
u8 epnum = event->endpoint_number;
+ u8 cmd;
dep = dwc->eps[epnum];
- if (!(dep->flags & DWC3_EP_ENABLED))
- return;
+ if (!(dep->flags & DWC3_EP_ENABLED)) {
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ return;
+
+ /* Handle only EPCMDCMPLT when EP disabled */
+ if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT)
+ return;
+ }
if (epnum == 0 || epnum == 1) {
dwc3_ep0_interrupt(dwc, event);
@@ -2172,9 +2426,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep->resource_index = 0;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- dwc3_trace(trace_dwc3_gadget,
- "%s is an Isochronous endpoint",
- dep->name);
+ dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n");
return;
}
@@ -2187,22 +2439,11 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
dwc3_gadget_start_isoc(dwc, dep, event);
} else {
- int active;
int ret;
- active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
-
- dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
- dep->name, active ? "Transfer Active"
- : "Transfer Not Active");
-
ret = __dwc3_gadget_kick_transfer(dep, 0);
if (!ret || ret == -EBUSY)
return;
-
- dwc3_trace(trace_dwc3_gadget,
- "%s: failed to kick transfers",
- dep->name);
}
break;
@@ -2212,26 +2453,16 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep->name);
return;
}
+ break;
+ case DWC3_DEPEVT_EPCMDCMPLT:
+ cmd = DEPEVT_PARAMETER_CMD(event->parameters);
- switch (event->status) {
- case DEPEVT_STREAMEVT_FOUND:
- dwc3_trace(trace_dwc3_gadget,
- "Stream %d found and started",
- event->parameters);
-
- break;
- case DEPEVT_STREAMEVT_NOTFOUND:
- /* FALLTHROUGH */
- default:
- dwc3_trace(trace_dwc3_gadget,
- "unable to find suitable stream");
+ if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
+ wake_up(&dep->wait_end_transfer);
}
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
- dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
- break;
- case DWC3_DEPEVT_EPCMDCMPLT:
- dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
break;
}
}
@@ -2284,7 +2515,8 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
dep = dwc->eps[epnum];
- if (!dep->resource_index)
+ if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
+ !dep->resource_index)
return;
/*
@@ -2328,25 +2560,9 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
dep->resource_index = 0;
dep->flags &= ~DWC3_EP_BUSY;
- if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
+ if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) {
+ dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
udelay(100);
-}
-
-static void dwc3_stop_active_transfers(struct dwc3 *dwc)
-{
- u32 epnum;
-
- for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
- struct dwc3_ep *dep;
-
- dep = dwc->eps[epnum];
- if (!dep)
- continue;
-
- if (!(dep->flags & DWC3_EP_ENABLED))
- continue;
-
- dwc3_remove_requests(dwc, dep);
}
}
@@ -2435,8 +2651,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
dwc->test_mode = false;
-
- dwc3_stop_active_transfers(dwc);
dwc3_clear_stall_all_ep(dwc);
/* Reset device address to zero */
@@ -2445,30 +2659,16 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
}
-static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
-{
- u32 reg;
- u32 usb30_clock = DWC3_GCTL_CLK_BUS;
-
- /*
- * We change the clock only at SS but I dunno why I would want to do
- * this. Maybe it becomes part of the power saving plan.
- */
-
- if ((speed != DWC3_DSTS_SUPERSPEED) &&
- (speed != DWC3_DSTS_SUPERSPEED_PLUS))
- return;
+ATOMIC_NOTIFIER_HEAD(conndone_nh);
- /*
- * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
- * each time on Connect Done.
- */
- if (!usb30_clock)
- return;
+int dwc3_conndone_notifier_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&conndone_nh, nb);
+}
- reg = dwc3_readl(dwc->regs, DWC3_GCTL);
- reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
- dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+int dwc3_conndone_notifier_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&conndone_nh, nb);
}
static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
@@ -2482,7 +2682,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
speed = reg & DWC3_DSTS_CONNECTSPD;
dwc->speed = speed;
- dwc3_update_ram_clk_sel(dwc, speed);
+ /*
+ * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
+ * each time on Connect Done.
+ *
+ * Currently we always use the reset value. If any platform
+ * wants to set this to a different value, we need to add a
+ * setting and update GCTL.RAMCLKSEL here.
+ */
switch (speed) {
case DWC3_DSTS_SUPERSPEED_PLUS:
@@ -2563,16 +2770,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
}
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
- false);
+ ret = __dwc3_gadget_ep_enable(dep, true, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
- false);
+ ret = __dwc3_gadget_ep_enable(dep, true, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
@@ -2629,8 +2834,6 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
(next == DWC3_LINK_STATE_RESUME)) {
- dwc3_trace(trace_dwc3_gadget,
- "ignoring transition U3 -> Resume");
return;
}
}
@@ -2764,11 +2967,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
break;
case DWC3_DEVICE_EVENT_EOPF:
/* It changed to be suspend event for version 2.30a and above */
- if (dwc->revision < DWC3_REVISION_230A) {
- dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
- } else {
- dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
-
+ if (dwc->revision >= DWC3_REVISION_230A) {
/*
* Ignore suspend event until the gadget enters into
* USB_STATE_CONFIGURED state.
@@ -2779,16 +2978,9 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
}
break;
case DWC3_DEVICE_EVENT_SOF:
- dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
- break;
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
- dwc3_trace(trace_dwc3_gadget, "Erratic Error");
- break;
case DWC3_DEVICE_EVENT_CMD_CMPL:
- dwc3_trace(trace_dwc3_gadget, "Command Complete");
- break;
case DWC3_DEVICE_EVENT_OVERFLOW:
- dwc3_trace(trace_dwc3_gadget, "Overflow");
break;
default:
dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
@@ -2798,7 +2990,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
static void dwc3_process_event_entry(struct dwc3 *dwc,
const union dwc3_event *event)
{
- trace_dwc3_event(event->raw);
+ trace_dwc3_event(event->raw, dwc);
/* Endpoint IRQ, handle it and return early */
if (event->type.is_devspec == 0) {
@@ -2831,7 +3023,7 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
while (left > 0) {
union dwc3_event event;
- event.raw = *(u32 *) (evt->buf + evt->lpos);
+ event.raw = *(u32 *) (evt->cache + evt->lpos);
dwc3_process_event_entry(dwc, &event);
@@ -2844,10 +3036,8 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
* boundary so I worry about that once we try to handle
* that.
*/
- evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
+ evt->lpos = (evt->lpos + 4) % evt->length;
left -= 4;
-
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
}
evt->count = 0;
@@ -2859,6 +3049,11 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
reg &= ~DWC3_GEVNTSIZ_INTMASK;
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
+ if (dwc->imod_interval) {
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+ }
+
return ret;
}
@@ -2879,6 +3074,7 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
{
struct dwc3 *dwc = evt->dwc;
+ u32 amount;
u32 count;
u32 reg;
@@ -2911,6 +3107,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
reg |= DWC3_GEVNTSIZ_INTMASK;
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
+ amount = min(count, evt->length - evt->lpos);
+ memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
+
+ if (amount < count)
+ memcpy(evt->cache, evt->buf, count - amount);
+
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
+
return IRQ_WAKE_THREAD;
}
@@ -2921,6 +3125,39 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
return dwc3_check_event_buf(evt);
}
+static int dwc3_gadget_get_irq(struct dwc3 *dwc)
+{
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int irq;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq > 0)
+ goto out;
+
+ if (irq != -EPROBE_DEFER)
+ dev_err(dwc->dev, "missing peripheral IRQ\n");
+
+ if (!irq)
+ irq = -EINVAL;
+
+out:
+ return irq;
+}
+
/**
* dwc3_gadget_init - Initializes gadget related registers
* @dwc: pointer to our controller context structure
@@ -2929,70 +3166,40 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
*/
int dwc3_gadget_init(struct dwc3 *dwc)
{
- int ret, irq;
- struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int ret;
+ int irq;
- irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
- if (irq == -EPROBE_DEFER)
- return irq;
-
- if (irq <= 0) {
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
- if (irq == -EPROBE_DEFER)
- return irq;
-
- if (irq <= 0) {
- irq = platform_get_irq(dwc3_pdev, 0);
- if (irq <= 0) {
- if (irq != -EPROBE_DEFER) {
- dev_err(dwc->dev,
- "missing peripheral IRQ\n");
- }
- if (!irq)
- irq = -EINVAL;
- return irq;
- }
- }
+ irq = dwc3_gadget_get_irq(dwc);
+ if (irq < 0) {
+ ret = irq;
+ goto err0;
}
dwc->irq_gadget = irq;
- dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
- &dwc->ctrl_req_addr, GFP_KERNEL);
- if (!dwc->ctrl_req) {
- dev_err(dwc->dev, "failed to allocate ctrl request\n");
- ret = -ENOMEM;
- goto err0;
- }
-
- dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
- &dwc->ep0_trb_addr, GFP_KERNEL);
+ dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
+ sizeof(*dwc->ep0_trb) * 2,
+ &dwc->ep0_trb_addr, GFP_KERNEL);
if (!dwc->ep0_trb) {
dev_err(dwc->dev, "failed to allocate ep0 trb\n");
ret = -ENOMEM;
- goto err1;
+ goto err0;
}
- dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
+ dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL);
if (!dwc->setup_buf) {
ret = -ENOMEM;
- goto err2;
+ goto err1;
}
- dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
- DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
- GFP_KERNEL);
- if (!dwc->ep0_bounce) {
- dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
+ dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE,
+ &dwc->bounce_addr, GFP_KERNEL);
+ if (!dwc->bounce) {
ret = -ENOMEM;
- goto err3;
+ goto err2;
}
- dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
- if (!dwc->zlp_buf) {
- ret = -ENOMEM;
- goto err4;
- }
+ init_completion(&dwc->ep0_in_setup);
dwc->gadget.ops = &dwc3_gadget_ops;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
@@ -3017,53 +3224,41 @@ int dwc3_gadget_init(struct dwc3 *dwc)
* composite.c that we are USB 2.0 + LPM ECN.
*/
if (dwc->revision < DWC3_REVISION_220A)
- dwc3_trace(trace_dwc3_gadget,
- "Changing max_speed on rev %08x",
+ dev_info(dwc->dev, "changing max_speed on rev %08x\n",
dwc->revision);
dwc->gadget.max_speed = dwc->maximum_speed;
/*
- * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
- * on ep out.
- */
- dwc->gadget.quirk_ep_out_aligned_size = true;
-
- /*
* REVISIT: Here we should clear all pending IRQs to be
* sure we're starting from a well known location.
*/
- ret = dwc3_gadget_init_endpoints(dwc);
+ ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps);
if (ret)
- goto err5;
+ goto err3;
ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
if (ret) {
dev_err(dwc->dev, "failed to register udc\n");
- goto err5;
+ goto err4;
}
return 0;
-err5:
- kfree(dwc->zlp_buf);
-
err4:
dwc3_gadget_free_endpoints(dwc);
- dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
- dwc->ep0_bounce, dwc->ep0_bounce_addr);
err3:
- kfree(dwc->setup_buf);
+ dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
+ dwc->bounce_addr);
err2:
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
- dwc->ep0_trb, dwc->ep0_trb_addr);
+ kfree(dwc->setup_buf);
err1:
- dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
- dwc->ctrl_req, dwc->ctrl_req_addr);
+ dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
+ dwc->ep0_trb, dwc->ep0_trb_addr);
err0:
return ret;
@@ -3074,20 +3269,12 @@ err0:
void dwc3_gadget_exit(struct dwc3 *dwc)
{
usb_del_gadget_udc(&dwc->gadget);
-
dwc3_gadget_free_endpoints(dwc);
-
- dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
- dwc->ep0_bounce, dwc->ep0_bounce_addr);
-
+ dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
+ dwc->bounce_addr);
kfree(dwc->setup_buf);
- kfree(dwc->zlp_buf);
-
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
- dwc->ep0_trb, dwc->ep0_trb_addr);
-
- dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
- dwc->ctrl_req, dwc->ctrl_req_addr);
+ dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
+ dwc->ep0_trb, dwc->ep0_trb_addr);
}
int dwc3_gadget_suspend(struct dwc3 *dwc)
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 39459b718e98..e4602d0e515b 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -29,16 +29,16 @@ struct dwc3;
/* DEPCFG parameter 1 */
#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0)
-#define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8)
-#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9)
-#define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10)
-#define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11)
-#define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13)
+#define DWC3_DEPCFG_XFER_COMPLETE_EN BIT(8)
+#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN BIT(9)
+#define DWC3_DEPCFG_XFER_NOT_READY_EN BIT(10)
+#define DWC3_DEPCFG_FIFO_ERROR_EN BIT(11)
+#define DWC3_DEPCFG_STREAM_EVENT_EN BIT(12)
#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
-#define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24)
+#define DWC3_DEPCFG_STREAM_CAPABLE BIT(24)
#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
-#define DWC3_DEPCFG_BULK_BASED (1 << 30)
-#define DWC3_DEPCFG_FIFO_BASED (1 << 31)
+#define DWC3_DEPCFG_BULK_BASED BIT(30)
+#define DWC3_DEPCFG_FIFO_BASED BIT(31)
/* DEPCFG parameter 0 */
#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1)
@@ -47,10 +47,10 @@ struct dwc3;
#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22)
#define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26)
/* This applies for core versions earlier than 1.94a */
-#define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31)
+#define DWC3_DEPCFG_IGN_SEQ_NUM BIT(31)
/* These apply for core versions 1.94a and later */
#define DWC3_DEPCFG_ACTION_INIT (0 << 30)
-#define DWC3_DEPCFG_ACTION_RESTORE (1 << 30)
+#define DWC3_DEPCFG_ACTION_RESTORE BIT(30)
#define DWC3_DEPCFG_ACTION_MODIFY (2 << 30)
/* DEPXFERCFG parameter 0 */
@@ -62,10 +62,7 @@ struct dwc3;
static inline struct dwc3_request *next_request(struct list_head *list)
{
- if (list_empty(list))
- return NULL;
-
- return list_first_entry(list, struct dwc3_request, list);
+ return list_first_entry_or_null(list, struct dwc3_request, list);
}
static inline void dwc3_gadget_move_started_request(struct dwc3_request *req)
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 626d87d545fc..ccbf0c35a9b1 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -19,6 +19,39 @@
#include "core.h"
+static int dwc3_host_get_irq(struct dwc3 *dwc)
+{
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int irq;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "host");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq > 0)
+ goto out;
+
+ if (irq != -EPROBE_DEFER)
+ dev_err(dwc->dev, "missing host IRQ\n");
+
+ if (!irq)
+ irq = -EINVAL;
+
+out:
+ return irq;
+}
+
int dwc3_host_init(struct dwc3 *dwc)
{
struct property_entry props[3];
@@ -28,39 +61,18 @@ int dwc3_host_init(struct dwc3 *dwc)
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int prop_idx = 0;
- irq = platform_get_irq_byname(dwc3_pdev, "host");
- if (irq == -EPROBE_DEFER)
+ irq = dwc3_host_get_irq(dwc);
+ if (irq < 0)
return irq;
- if (irq <= 0) {
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
- if (irq == -EPROBE_DEFER)
- return irq;
-
- if (irq <= 0) {
- irq = platform_get_irq(dwc3_pdev, 0);
- if (irq <= 0) {
- if (irq != -EPROBE_DEFER) {
- dev_err(dwc->dev,
- "missing host IRQ\n");
- }
- if (!irq)
- irq = -EINVAL;
- return irq;
- } else {
- res = platform_get_resource(dwc3_pdev,
- IORESOURCE_IRQ, 0);
- }
- } else {
- res = platform_get_resource_byname(dwc3_pdev,
- IORESOURCE_IRQ,
- "dwc_usb3");
- }
-
- } else {
+ res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, "host");
+ if (!res)
res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ,
- "host");
- }
+ "dwc_usb3");
+ if (!res)
+ res = platform_get_resource(dwc3_pdev, IORESOURCE_IRQ, 0);
+ if (!res)
+ return -ENOMEM;
dwc->xhci_resources[1].start = irq;
dwc->xhci_resources[1].end = irq;
@@ -73,11 +85,7 @@ int dwc3_host_init(struct dwc3 *dwc)
return -ENOMEM;
}
- dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
-
xhci->dev.parent = dwc->dev;
- xhci->dev.dma_mask = dwc->dev->dma_mask;
- xhci->dev.dma_parms = dwc->dev->dma_parms;
dwc->xhci = xhci;
@@ -88,6 +96,15 @@ int dwc3_host_init(struct dwc3 *dwc)
goto err1;
}
+#ifdef CONFIG_USB_DWC3_HISI
+ /* if otg, otg will do device_add */
+ if (dwc->dwc_otg) {
+ dev_err(dwc->dev, "%s if otg, otg will do device_add.\n",
+ __func__);
+ return 0;
+ }
+#endif
+
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
if (dwc->usb3_lpm_capable)
@@ -114,9 +131,9 @@ int dwc3_host_init(struct dwc3 *dwc)
}
phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy",
- dev_name(&xhci->dev));
+ dev_name(dwc->dev));
phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy",
- dev_name(&xhci->dev));
+ dev_name(dwc->dev));
ret = platform_device_add(xhci);
if (ret) {
@@ -127,9 +144,9 @@ int dwc3_host_init(struct dwc3 *dwc)
return 0;
err2:
phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
- dev_name(&xhci->dev));
+ dev_name(dwc->dev));
phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
- dev_name(&xhci->dev));
+ dev_name(dwc->dev));
err1:
platform_device_put(xhci);
return ret;
@@ -137,9 +154,13 @@ err1:
void dwc3_host_exit(struct dwc3 *dwc)
{
+#ifdef CONFIG_USB_DWC3_HISI
+ if (dwc->dwc_otg)
+ return;
+#endif
phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
- dev_name(&dwc->xhci->dev));
+ dev_name(dwc->dev));
phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
- dev_name(&dwc->xhci->dev));
+ dev_name(dwc->dev));
platform_device_unregister(dwc->xhci);
}
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index a06f9a8fecc7..adc8648c92b2 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -28,6 +28,13 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset)
{
u32 value;
+#ifdef CONFIG_USB_DWC3_HISI
+ extern atomic_t hisi_dwc3_power_on;
+
+ if (unlikely(atomic_read(&hisi_dwc3_power_on) == 0))
+ return 0;
+#endif
+
/*
* We requested the mem region starting from the Globals address
* space, see dwc3_probe in core.c.
@@ -40,14 +47,20 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset)
* documentation, so we revert it back to the proper addresses, the
* same way they are described on SNPS documentation
*/
- dwc3_trace(trace_dwc3_readl, "addr %p value %08x",
- base - DWC3_GLOBALS_REGS_START + offset, value);
+ trace_dwc3_readl(base - DWC3_GLOBALS_REGS_START, offset, value);
return value;
}
static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value)
{
+#ifdef CONFIG_USB_DWC3_HISI
+ extern atomic_t hisi_dwc3_power_on;
+
+ if (unlikely(atomic_read(&hisi_dwc3_power_on) == 0))
+ return;
+#endif
+
/*
* We requested the mem region starting from the Globals address
* space, see dwc3_probe in core.c.
@@ -60,8 +73,7 @@ static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value)
* documentation, so we revert it back to the proper addresses, the
* same way they are described on SNPS documentation
*/
- dwc3_trace(trace_dwc3_writel, "addr %p value %08x",
- base - DWC3_GLOBALS_REGS_START + offset, value);
+ trace_dwc3_writel(base - DWC3_GLOBALS_REGS_START, offset, value);
}
#endif /* __DRIVERS_USB_DWC3_IO_H */
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index d24cefd191b5..f1bd444d22a3 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -27,57 +27,51 @@
#include "core.h"
#include "debug.h"
-DECLARE_EVENT_CLASS(dwc3_log_msg,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf),
- TP_STRUCT__entry(__dynamic_array(char, msg, DWC3_MSG_MAX)),
+DECLARE_EVENT_CLASS(dwc3_log_io,
+ TP_PROTO(void *base, u32 offset, u32 value),
+ TP_ARGS(base, offset, value),
+ TP_STRUCT__entry(
+ __field(void *, base)
+ __field(u32, offset)
+ __field(u32, value)
+ ),
TP_fast_assign(
- vsnprintf(__get_str(msg), DWC3_MSG_MAX, vaf->fmt, *vaf->va);
+ __entry->base = base;
+ __entry->offset = offset;
+ __entry->value = value;
),
- TP_printk("%s", __get_str(msg))
-);
-
-DEFINE_EVENT(dwc3_log_msg, dwc3_readl,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
+ TP_printk("addr %p value %08x", __entry->base + __entry->offset,
+ __entry->value)
);
-DEFINE_EVENT(dwc3_log_msg, dwc3_writel,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
+DEFINE_EVENT(dwc3_log_io, dwc3_readl,
+ TP_PROTO(void *base, u32 offset, u32 value),
+ TP_ARGS(base, offset, value)
);
-DEFINE_EVENT(dwc3_log_msg, dwc3_gadget,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-
-DEFINE_EVENT(dwc3_log_msg, dwc3_core,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-
-DEFINE_EVENT(dwc3_log_msg, dwc3_ep0,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
+DEFINE_EVENT(dwc3_log_io, dwc3_writel,
+ TP_PROTO(void *base, u32 offset, u32 value),
+ TP_ARGS(base, offset, value)
);
DECLARE_EVENT_CLASS(dwc3_log_event,
- TP_PROTO(u32 event),
- TP_ARGS(event),
+ TP_PROTO(u32 event, struct dwc3 *dwc),
+ TP_ARGS(event, dwc),
TP_STRUCT__entry(
__field(u32, event)
+ __field(u32, ep0state)
),
TP_fast_assign(
__entry->event = event;
+ __entry->ep0state = dwc->ep0state;
),
TP_printk("event (%08x): %s", __entry->event,
- dwc3_decode_event(__entry->event))
+ dwc3_decode_event(__entry->event, __entry->ep0state))
);
DEFINE_EVENT(dwc3_log_event, dwc3_event,
- TP_PROTO(u32 event),
- TP_ARGS(event)
+ TP_PROTO(u32 event, struct dwc3 *dwc),
+ TP_ARGS(event, dwc)
);
DECLARE_EVENT_CLASS(dwc3_log_ctrl,
@@ -179,7 +173,7 @@ DECLARE_EVENT_CLASS(dwc3_log_generic_cmd,
__entry->param = param;
__entry->status = status;
),
- TP_printk("cmd '%s' [%d] param %08x --> status: %s",
+ TP_printk("cmd '%s' [%x] param %08x --> status: %s",
dwc3_gadget_generic_cmd_string(__entry->cmd),
__entry->cmd, __entry->param,
dwc3_gadget_generic_cmd_status_string(__entry->status)
@@ -237,6 +231,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__field(u32, bph)
__field(u32, size)
__field(u32, ctrl)
+ __field(u32, type)
),
TP_fast_assign(
snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
@@ -247,47 +242,38 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__entry->bph = trb->bph;
__entry->size = trb->size;
__entry->ctrl = trb->ctrl;
+ __entry->type = usb_endpoint_type(dep->endpoint.desc);
),
- TP_printk("%s: %d/%d trb %p buf %08x%08x size %d ctrl %08x (%c%c%c%c:%c%c:%s)",
+ TP_printk("%s: %d/%d trb %p buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)",
__get_str(name), __entry->queued, __entry->allocated,
__entry->trb, __entry->bph, __entry->bpl,
- __entry->size, __entry->ctrl,
+ ({char *s;
+ int pcm = ((__entry->size >> 24) & 3) + 1;
+ switch (__entry->type) {
+ case USB_ENDPOINT_XFER_INT:
+ case USB_ENDPOINT_XFER_ISOC:
+ switch (pcm) {
+ case 1:
+ s = "1x ";
+ break;
+ case 2:
+ s = "2x ";
+ break;
+ case 3:
+ s = "3x ";
+ break;
+ }
+ default:
+ s = "";
+ } s; }),
+ DWC3_TRB_SIZE_LENGTH(__entry->size), __entry->ctrl,
__entry->ctrl & DWC3_TRB_CTRL_HWO ? 'H' : 'h',
__entry->ctrl & DWC3_TRB_CTRL_LST ? 'L' : 'l',
__entry->ctrl & DWC3_TRB_CTRL_CHN ? 'C' : 'c',
__entry->ctrl & DWC3_TRB_CTRL_CSP ? 'S' : 's',
__entry->ctrl & DWC3_TRB_CTRL_ISP_IMI ? 'S' : 's',
__entry->ctrl & DWC3_TRB_CTRL_IOC ? 'C' : 'c',
- ({char *s;
- switch (__entry->ctrl & 0x3f0) {
- case DWC3_TRBCTL_NORMAL:
- s = "normal";
- break;
- case DWC3_TRBCTL_CONTROL_SETUP:
- s = "setup";
- break;
- case DWC3_TRBCTL_CONTROL_STATUS2:
- s = "status2";
- break;
- case DWC3_TRBCTL_CONTROL_STATUS3:
- s = "status3";
- break;
- case DWC3_TRBCTL_CONTROL_DATA:
- s = "data";
- break;
- case DWC3_TRBCTL_ISOCHRONOUS_FIRST:
- s = "isoc-first";
- break;
- case DWC3_TRBCTL_ISOCHRONOUS:
- s = "isoc";
- break;
- case DWC3_TRBCTL_LINK_TRB:
- s = "link";
- break;
- default:
- s = "UNKNOWN";
- break;
- } s; })
+ dwc3_trb_type_string(DWC3_TRBCTL_TYPE(__entry->ctrl))
)
);
@@ -301,6 +287,57 @@ DEFINE_EVENT(dwc3_log_trb, dwc3_complete_trb,
TP_ARGS(dep, trb)
);
+DECLARE_EVENT_CLASS(dwc3_log_ep,
+ TP_PROTO(struct dwc3_ep *dep),
+ TP_ARGS(dep),
+ TP_STRUCT__entry(
+ __dynamic_array(char, name, DWC3_MSG_MAX)
+ __field(unsigned, maxpacket)
+ __field(unsigned, maxpacket_limit)
+ __field(unsigned, max_streams)
+ __field(unsigned, maxburst)
+ __field(unsigned, flags)
+ __field(unsigned, direction)
+ __field(u8, trb_enqueue)
+ __field(u8, trb_dequeue)
+ ),
+ TP_fast_assign(
+ snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
+ __entry->maxpacket = dep->endpoint.maxpacket;
+ __entry->maxpacket_limit = dep->endpoint.maxpacket_limit;
+ __entry->max_streams = dep->endpoint.max_streams;
+ __entry->maxburst = dep->endpoint.maxburst;
+ __entry->flags = dep->flags;
+ __entry->direction = dep->direction;
+ __entry->trb_enqueue = dep->trb_enqueue;
+ __entry->trb_dequeue = dep->trb_dequeue;
+ ),
+ TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c%c:%c:%c",
+ __get_str(name), __entry->maxpacket,
+ __entry->maxpacket_limit, __entry->max_streams,
+ __entry->maxburst, __entry->trb_enqueue,
+ __entry->trb_dequeue,
+ __entry->flags & DWC3_EP_ENABLED ? 'E' : 'e',
+ __entry->flags & DWC3_EP_STALL ? 'S' : 's',
+ __entry->flags & DWC3_EP_WEDGE ? 'W' : 'w',
+ __entry->flags & DWC3_EP_BUSY ? 'B' : 'b',
+ __entry->flags & DWC3_EP_PENDING_REQUEST ? 'P' : 'p',
+ __entry->flags & DWC3_EP_MISSED_ISOC ? 'M' : 'm',
+ __entry->flags & DWC3_EP_END_TRANSFER_PENDING ? 'E' : 'e',
+ __entry->direction ? '<' : '>'
+ )
+);
+
+DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_enable,
+ TP_PROTO(struct dwc3_ep *dep),
+ TP_ARGS(dep)
+);
+
+DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_disable,
+ TP_PROTO(struct dwc3_ep *dep),
+ TP_ARGS(dep)
+);
+
#endif /* __DWC3_TRACE_H */
/* this part has to be here */
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 82eea55a7b5c..697946b28145 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -586,7 +586,7 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
if (size > MEDIUM_STREAM_ARRAY_SIZE)
@@ -614,7 +614,7 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
if (size > MEDIUM_STREAM_ARRAY_SIZE)
@@ -1703,7 +1703,7 @@ void xhci_slot_copy(struct xhci_hcd *xhci,
static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
{
int i;
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -1775,7 +1775,7 @@ static void scratchpad_free(struct xhci_hcd *xhci)
{
int num_sp;
int i;
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
if (!xhci->scratchpad)
return;
@@ -1851,7 +1851,7 @@ void xhci_free_command(struct xhci_hcd *xhci,
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
int size;
int i, j, num_ports;
@@ -2396,7 +2396,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
dma_addr_t dma;
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
unsigned int val, val2;
u64 val_64;
struct xhci_segment *seg;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index dec100811946..984d8c605ddb 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/usb/phy.h>
@@ -139,6 +140,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
const struct hc_driver *driver;
+ struct device *sysdev;
struct xhci_hcd *xhci;
struct resource *res;
struct usb_hcd *hcd;
@@ -155,22 +157,39 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
+ /*
+ * sysdev must point to a device that is known to the system firmware
+ * or PCI hardware. We handle these three cases here:
+ * 1. xhci_plat comes from firmware
+ * 2. xhci_plat is child of a device from firmware (dwc3-plat)
+ * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
+ */
+ sysdev = &pdev->dev;
+ if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node)
+ sysdev = sysdev->parent;
+#ifdef CONFIG_PCI
+ else if (sysdev->parent && sysdev->parent->parent &&
+ sysdev->parent->parent->bus == &pci_bus_type)
+ sysdev = sysdev->parent->parent;
+#endif
+
/* Try to set 64-bit DMA first */
- if (WARN_ON(!pdev->dev.dma_mask))
+ if (WARN_ON(!sysdev->dma_mask))
/* Platform did not initialize dma_mask */
- ret = dma_coerce_mask_and_coherent(&pdev->dev,
+ ret = dma_coerce_mask_and_coherent(sysdev,
DMA_BIT_MASK(64));
else
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
/* If seting 64-bit DMA mask fails, fall back to 32-bit DMA mask */
if (ret) {
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(32));
if (ret)
return ret;
}
- hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
+ dev_name(&pdev->dev), NULL);
if (!hcd)
return -ENOMEM;
@@ -213,20 +232,20 @@ static int xhci_plat_probe(struct platform_device *pdev)
xhci->clk = clk;
xhci->main_hcd = hcd;
- xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
+ xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
dev_name(&pdev->dev), hcd);
if (!xhci->shared_hcd) {
ret = -ENOMEM;
goto disable_clk;
}
- if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable"))
+ if (device_property_read_bool(sysdev, "usb3-lpm-capable"))
xhci->quirks |= XHCI_LPM_SUPPORT;
if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
xhci->quirks |= XHCI_BROKEN_PORT_PED;
- hcd->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
+ hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
if (IS_ERR(hcd->usb_phy)) {
ret = PTR_ERR(hcd->usb_phy);
if (ret == -EPROBE_DEFER)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index a7d239f5fc5f..0452040c0cc8 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -234,6 +234,9 @@ static int xhci_free_msi(struct xhci_hcd *xhci)
static int xhci_setup_msi(struct xhci_hcd *xhci)
{
int ret;
+ /*
+ * TODO:Check with MSI Soc for sysdev
+ */
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
ret = pci_enable_msi(pdev);
@@ -260,7 +263,7 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
*/
static void xhci_free_irq(struct xhci_hcd *xhci)
{
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.sysdev);
int ret;
/* return if using legacy interrupt */
@@ -746,7 +749,7 @@ void xhci_shutdown(struct usb_hcd *hcd)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
- usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
+ usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
@@ -763,7 +766,7 @@ void xhci_shutdown(struct usb_hcd *hcd)
/* Yet another workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
- pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
+ pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot);
}
#ifdef CONFIG_PM
@@ -4827,7 +4830,11 @@ int xhci_get_frame(struct usb_hcd *hcd)
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
{
struct xhci_hcd *xhci;
- struct device *dev = hcd->self.controller;
+ /*
+ * TODO: Check with DWC3 clients for sysdev according to
+ * quirks
+ */
+ struct device *dev = hcd->self.sysdev;
int retval;
/* Accept arbitrarily long scatter-gather lists */
diff --git a/drivers/usb/pd/Kconfig b/drivers/usb/pd/Kconfig
new file mode 100644
index 000000000000..5954dbba6361
--- /dev/null
+++ b/drivers/usb/pd/Kconfig
@@ -0,0 +1 @@
+source "drivers/usb/pd/richtek/Kconfig"
diff --git a/drivers/usb/pd/Makefile b/drivers/usb/pd/Makefile
new file mode 100644
index 000000000000..ead73b45f846
--- /dev/null
+++ b/drivers/usb/pd/Makefile
@@ -0,0 +1,2 @@
+obj-y += hisi_pd.o
+obj-y += richtek/
diff --git a/drivers/usb/pd/hisi_pd.c b/drivers/usb/pd/hisi_pd.c
new file mode 100644
index 000000000000..5fe0f46bd1e9
--- /dev/null
+++ b/drivers/usb/pd/hisi_pd.c
@@ -0,0 +1,602 @@
+/************************************************************
+ *
+ * Copyright (C), Hisilicon Tech. Co., Ltd.
+ * FileName: hisi_pd.c
+ * Author: Hisilicon Version : 0.1 Date: 2016-5-9
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Description: .c file for power delivery core layer which is used to handle
+ * pulic logic management for different chips and to
+ * provide interfaces for exteranl modules.
+ * Version:
+ * Function List:
+ * History:
+ * <author> <time> <version > <desc>
+ ***********************************************************/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/notifier.h>
+#include <linux/mutex.h>
+#include <linux/version.h>
+#include <linux/hisi/log/hisi_log.h>
+#include <linux/hisi/usb/hisi_pd_dev.h>
+#include <linux/hisi/usb/hisi_usb.h>
+#include <linux/hisi/usb/pd/richtek/tcpm.h>
+
+struct pd_dpm_info *g_pd_di;
+static bool g_pd_cc_orientation;
+static struct class *typec_class;
+static struct device *typec_dev;
+static int pd_dpm_typec_state;
+
+#ifndef HISILOG_TAG
+#define HISILOG_TAG hisi_pd
+HISILOG_REGIST();
+#endif
+
+static bool pd_dpm_get_cc_orientation(void)
+{
+ hisilog_info("%s cc_orientation =%d\n", __func__, g_pd_cc_orientation);
+ return g_pd_cc_orientation;
+}
+
+static void pd_dpm_set_cc_orientation(bool cc_orientation)
+{
+ hisilog_info("%s cc_orientation =%d\n", __func__, cc_orientation);
+ g_pd_cc_orientation = cc_orientation;
+}
+
+void pd_dpm_get_typec_state(int *typec_state)
+{
+ hisilog_info("%s = %d\n",
+ __func__, pd_dpm_typec_state);
+
+ *typec_state = pd_dpm_typec_state;
+}
+
+static void pd_dpm_set_typec_state(int typec_state)
+{
+ hisilog_info("%s = %d\n",
+ __func__, typec_state);
+
+ pd_dpm_typec_state = typec_state;
+}
+
+static ssize_t pd_dpm_cc_orientation_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ pd_dpm_get_cc_orientation() ? "2" : "1");
+}
+
+static ssize_t pd_dpm_pd_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ pd_dpm_get_pd_finish_flag() ? "0" : "1");
+}
+
+static DEVICE_ATTR(cc_orientation, 0444, pd_dpm_cc_orientation_show, NULL);
+static DEVICE_ATTR(pd_state, 0444, pd_dpm_pd_state_show, NULL);
+
+static struct attribute *pd_dpm_ctrl_attributes[] = {
+ &dev_attr_cc_orientation.attr,
+ &dev_attr_pd_state.attr,
+ NULL,
+};
+
+static const struct attribute_group pd_dpm_attr_group = {
+ .attrs = pd_dpm_ctrl_attributes,
+};
+
+int pd_dpm_wake_unlock_notifier_call(struct pd_dpm_info *di,
+ unsigned long event, void *data)
+{
+ return atomic_notifier_call_chain(&di->pd_wake_unlock_evt_nh,
+ event, data);
+}
+
+int pd_dpm_vbus_notifier_call(struct pd_dpm_info *di, unsigned long event,
+ void *data)
+{
+ hisilog_err("%s: pd_dpm_vbus_notifier_call!!!,++++\n",
+ __func__);
+ return atomic_notifier_call_chain(&di->pd_evt_nh, event, data);
+}
+
+bool pd_dpm_get_pd_finish_flag(void)
+{
+ if (g_pd_di)
+ return g_pd_di->pd_finish_flag;
+ else
+ return false;
+}
+
+bool pd_dpm_get_pd_source_vbus(void)
+{
+ if (g_pd_di)
+ return g_pd_di->pd_source_vbus;
+ else
+ return false;
+}
+
+void pd_dpm_report_pd_source_vbus(struct pd_dpm_info *di, void *data)
+{
+ struct pd_dpm_vbus_state *vbus_state = data;
+
+ mutex_lock(&di->sink_vbus_lock);
+
+ if (vbus_state->vbus_type & TCP_VBUS_CTRL_PD_DETECT)
+ di->pd_finish_flag = true;
+
+ if (vbus_state->mv == 0) {
+ hisilog_info("%s : Disable\n", __func__);
+ pd_dpm_vbus_notifier_call(g_pd_di, CHARGER_TYPE_NONE, data);
+ } else {
+ di->pd_source_vbus = true;
+ hisilog_info("%s : Source %d mV, %d mA\n",
+ __func__, vbus_state->mv, vbus_state->ma);
+ pd_dpm_vbus_notifier_call(g_pd_di, PLEASE_PROVIDE_POWER, data);
+ }
+ mutex_unlock(&di->sink_vbus_lock);
+}
+
+void pd_dpm_report_pd_sink_vbus(struct pd_dpm_info *di, void *data)
+{
+ bool skip = false;
+ unsigned long event;
+ struct pd_dpm_vbus_state *vbus_state = data;
+
+ mutex_lock(&di->sink_vbus_lock);
+
+ if (vbus_state->vbus_type & TCP_VBUS_CTRL_PD_DETECT)
+ di->pd_finish_flag = true;
+
+ if (di->pd_finish_flag)
+ event = PD_DPM_VBUS_TYPE_PD;
+ else if (di->bc12_finish_flag)
+ skip = true;
+ else
+ event = PD_DPM_VBUS_TYPE_TYPEC;
+
+ if (!skip) {
+ vbus_state = data;
+
+ if (vbus_state->mv == 0) {
+ if (event == PD_DPM_VBUS_TYPE_PD) {
+ hisilog_info("%s : Disable\n", __func__);
+ pd_dpm_vbus_notifier_call(g_pd_di,
+ CHARGER_TYPE_NONE,
+ data);
+ }
+ } else {
+ di->pd_source_vbus = false;
+ hisilog_info("%s : Sink %d mV, %d mA\n",
+ __func__, vbus_state->mv, vbus_state->ma);
+ pd_dpm_vbus_notifier_call(g_pd_di, event, data);
+ }
+ } else {
+ hisilog_info("%s : skip\n", __func__);
+ }
+
+ mutex_unlock(&di->sink_vbus_lock);
+}
+
+int pd_dpm_report_bc12(struct notifier_block *usb_nb, unsigned long event,
+ void *data)
+{
+ struct pd_dpm_info *di = container_of(usb_nb,
+ struct pd_dpm_info, usb_nb);
+
+ if (event == CHARGER_TYPE_NONE && !di->pd_finish_flag) {
+ di->bc12_finish_flag = false;
+ hisilog_info("%s : PD_WAKE_UNLOCK\n",
+ __func__);
+ pd_dpm_wake_unlock_notifier_call(g_pd_di, PD_WAKE_UNLOCK, NULL);
+ }
+
+ if (event == PLEASE_PROVIDE_POWER)
+ return NOTIFY_OK;
+
+ if (!di->pd_finish_flag) {
+ hisilog_info("%s : event (%lu)\n", __func__, event);
+ pd_dpm_vbus_notifier_call(di, event, data);
+ } else {
+ hisilog_info("%s : igrone\n", __func__);
+ }
+
+ return NOTIFY_OK;
+}
+
+int register_pd_wake_unlock_notifier(struct notifier_block *nb)
+{
+ int ret = 0;
+
+ if (!nb)
+ return -EINVAL;
+
+ if (!g_pd_di)
+ return ret;
+
+ ret = atomic_notifier_chain_register(&g_pd_di->pd_wake_unlock_evt_nh,
+ nb);
+ if (ret != 0)
+ return ret;
+
+ return ret;
+}
+EXPORT_SYMBOL(register_pd_wake_unlock_notifier);
+
+int unregister_pd_wake_unlock_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister
+ (&g_pd_di->pd_wake_unlock_evt_nh, nb);
+}
+EXPORT_SYMBOL(unregister_pd_wake_unlock_notifier);
+
+int register_pd_dpm_notifier(struct notifier_block *nb)
+{
+ int ret = 0;
+
+ if (!nb)
+ return -EINVAL;
+
+ if (!g_pd_di)
+ return ret;
+
+ ret = atomic_notifier_chain_register(&g_pd_di->pd_evt_nh, nb);
+ if (ret != 0)
+ return ret;
+
+ return ret;
+}
+EXPORT_SYMBOL(register_pd_dpm_notifier);
+
+int unregister_pd_dpm_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&g_pd_di->pd_evt_nh, nb);
+}
+EXPORT_SYMBOL(unregister_pd_dpm_notifier);
+
+static inline void pd_dpm_report_device_attach(void)
+{
+ hisilog_info("%s \r\n", __func__);
+ if (pd_dpm_get_pd_finish_flag()) {
+ hisilog_info("%s, in pd process, report charger connect event\n",
+ __func__);
+ hisi_usb_otg_event(CHARGER_CONNECT_EVENT);
+ }
+}
+
+static inline void pd_dpm_report_host_attach(void)
+{
+ hisilog_info("%s \r\n", __func__);
+}
+
+static inline void pd_dpm_report_device_detach(void)
+{
+ hisilog_info("%s \r\n", __func__);
+ if (pd_dpm_get_pd_finish_flag()) {
+ hisilog_info("%s, in pd process, report charger connect event\n",
+ __func__);
+ hisi_usb_otg_event(CHARGER_DISCONNECT_EVENT);
+ }
+ pd_dpm_vbus_notifier_call(g_pd_di, CHARGER_TYPE_NONE, NULL);
+}
+
+static inline void pd_dpm_report_host_detach(void)
+{
+ hisilog_info("%s \r\n", __func__);
+}
+
+static void pd_dpm_report_attach(int new_state)
+{
+ switch (new_state) {
+ case PD_DPM_USB_TYPEC_DEVICE_ATTACHED:
+ pd_dpm_report_device_attach();
+ break;
+
+ case PD_DPM_USB_TYPEC_HOST_ATTACHED:
+ pd_dpm_report_host_attach();
+ break;
+ }
+}
+
+static void pd_dpm_report_detach(int last_state)
+{
+ switch (last_state) {
+ case PD_DPM_USB_TYPEC_DEVICE_ATTACHED:
+ pd_dpm_report_device_detach();
+ break;
+
+ case PD_DPM_USB_TYPEC_HOST_ATTACHED:
+ pd_dpm_report_host_detach();
+ break;
+ }
+}
+
+static void pd_dpm_usb_update_state(
+ struct work_struct *work)
+{
+ int new_ev, last_ev;
+ struct pd_dpm_info *usb_cb_data =
+ container_of(to_delayed_work(work),
+ struct pd_dpm_info,
+ usb_state_update_work);
+
+ mutex_lock(&usb_cb_data->usb_lock);
+ new_ev = usb_cb_data->pending_usb_event;
+ mutex_unlock(&usb_cb_data->usb_lock);
+
+ last_ev = usb_cb_data->last_usb_event;
+
+ if (last_ev == new_ev)
+ return;
+
+ switch (new_ev) {
+ case PD_DPM_USB_TYPEC_DETACHED:
+ pd_dpm_report_detach(last_ev);
+ break;
+
+ case PD_DPM_USB_TYPEC_DEVICE_ATTACHED:
+ case PD_DPM_USB_TYPEC_HOST_ATTACHED:
+ if (last_ev != PD_DPM_USB_TYPEC_DETACHED)
+ pd_dpm_report_detach(last_ev);
+ pd_dpm_report_attach(new_ev);
+ break;
+ default:
+ return;
+ }
+
+ usb_cb_data->last_usb_event = new_ev;
+}
+
+int pd_dpm_handle_pe_event(unsigned long event, void *data)
+{
+ bool attach_event = false;
+ int usb_event = PD_DPM_USB_TYPEC_NONE;
+ struct pd_dpm_typec_state *typec_state = NULL;
+
+ hisilog_err("%s:!!!,event=%ld,+++\n",
+ __func__, event);
+
+ switch (event) {
+ case PD_DPM_PE_EVT_TYPEC_STATE:
+ {
+ typec_state = data;
+ switch (typec_state->new_state) {
+ case PD_DPM_TYPEC_ATTACHED_SNK:
+ attach_event = true;
+ usb_event = PD_DPM_USB_TYPEC_DEVICE_ATTACHED;
+ break;
+
+ case PD_DPM_TYPEC_ATTACHED_SRC:
+ attach_event = true;
+ usb_event = PD_DPM_USB_TYPEC_HOST_ATTACHED;
+ break;
+
+ case PD_DPM_TYPEC_UNATTACHED:
+ mutex_lock(&g_pd_di->sink_vbus_lock);
+ g_pd_di->pd_finish_flag = false;
+ g_pd_di->bc12_finish_flag = false;
+ g_pd_di->pd_source_vbus = false;
+ mutex_unlock(&g_pd_di->sink_vbus_lock);
+ usb_event = PD_DPM_USB_TYPEC_DETACHED;
+ break;
+
+ default:
+ hisilog_info("%s can not detect typec state\r\n",
+ __func__);
+ break;
+ }
+ pd_dpm_set_typec_state(usb_event);
+ }
+ break;
+
+ case PD_DPM_PE_EVT_PD_STATE:
+ {
+ struct pd_dpm_pd_state *pd_state = data;
+
+ switch (pd_state->connected) {
+ case PD_CONNECT_PE_READY_SNK:
+ case PD_CONNECT_PE_READY_SRC:
+ break;
+ }
+ }
+ break;
+
+ case PD_DPM_PE_EVT_DIS_VBUS_CTRL:
+ {
+ if (!g_pd_di) {
+ hisilog_err("%s: g_pd_di is null!!!,+++\n",
+ __func__);
+ return -1;
+ }
+
+ if (g_pd_di->pd_finish_flag) {
+ struct pd_dpm_vbus_state vbus_state;
+
+ hisilog_info("%s : Disable VBUS Control\n",
+ __func__);
+ vbus_state.mv = 0;
+ vbus_state.ma = 0;
+
+ pd_dpm_vbus_notifier_call(g_pd_di,
+ CHARGER_TYPE_NONE,
+ &vbus_state);
+ }
+ }
+ break;
+
+ case PD_DPM_PE_EVT_SINK_VBUS:
+ {
+ pd_dpm_report_pd_sink_vbus(g_pd_di, data);
+ }
+ break;
+
+ case PD_DPM_PE_EVT_SOURCE_VBUS:
+ {
+ pd_dpm_report_pd_source_vbus(g_pd_di, data);
+ }
+ break;
+
+ case PD_DPM_PE_EVT_DR_SWAP:
+ {
+ struct pd_dpm_swap_state *swap_state = data;
+
+ if (swap_state->new_role == PD_ROLE_DFP)
+ usb_event = PD_DPM_USB_TYPEC_HOST_ATTACHED;
+ else
+ usb_event = PD_DPM_USB_TYPEC_DEVICE_ATTACHED;
+ }
+ break;
+
+ case PD_DPM_PE_EVT_PR_SWAP:
+ break;
+
+ default:
+ hisilog_info("%s unkonw event \r\n", __func__);
+ break;
+ };
+
+ if (attach_event)
+ pd_dpm_set_cc_orientation(typec_state->polarity);
+
+ if (usb_event != PD_DPM_USB_TYPEC_NONE) {
+ mutex_lock(&g_pd_di->usb_lock);
+ if (g_pd_di->pending_usb_event != usb_event) {
+ cancel_delayed_work(&g_pd_di->usb_state_update_work);
+ g_pd_di->pending_usb_event = usb_event;
+ queue_delayed_work(g_pd_di->usb_wq,
+ &g_pd_di->usb_state_update_work,
+ msecs_to_jiffies(0));
+ } else {
+ pr_info("Pending event is same --> ignore this event %d\n",
+ usb_event);
+ }
+ mutex_unlock(&g_pd_di->usb_lock);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pd_dpm_handle_pe_event);
+
+static int pd_dpm_parse_dt(struct pd_dpm_info *info,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!np)
+ return -EINVAL;
+ /* default name */
+ if (of_property_read_string(np, "tcp_name",
+ &info->tcpc_name) < 0)
+ info->tcpc_name = "type_c_port0";
+
+ return 0;
+}
+
+static int pd_dpm_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct pd_dpm_info *di;
+
+ di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
+ di->dev = &pdev->dev;
+ hisilog_info("%s : +++++++++\n", __func__);
+ g_pd_di = di;
+
+ mutex_init(&di->sink_vbus_lock);
+
+ ATOMIC_INIT_NOTIFIER_HEAD(&di->pd_evt_nh);
+ ATOMIC_INIT_NOTIFIER_HEAD(&di->pd_wake_unlock_evt_nh);
+
+ di->usb_nb.notifier_call = pd_dpm_report_bc12;
+ ret = hisi_charger_type_notifier_register(&di->usb_nb);
+ if (ret < 0)
+ hisilog_err("hisi_charger_type_notifier_register failed\n");
+
+ if (typec_class) {
+ typec_dev = device_create(typec_class, NULL, 0, NULL, "typec");
+ ret = sysfs_create_group(&typec_dev->kobj, &pd_dpm_attr_group);
+ if (ret)
+ hisilog_err("%s: typec sysfs group create error\n",
+ __func__);
+ }
+
+ hisilog_info("%s ++++\r\n\r\n", __func__);
+
+ di->last_usb_event = PD_DPM_USB_TYPEC_NONE;
+ di->pending_usb_event = PD_DPM_USB_TYPEC_NONE;
+
+ mutex_init(&di->usb_lock);
+
+ di->usb_wq = create_workqueue("pd_dpm_usb_wq");
+ INIT_DELAYED_WORK(&di->usb_state_update_work,
+ pd_dpm_usb_update_state);
+ platform_set_drvdata(pdev, di);
+
+ pd_dpm_parse_dt(di, &pdev->dev);
+ notify_tcp_dev_ready(di->tcpc_name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pd_dpm_probe);
+
+static const struct of_device_id pd_dpm_callback_match_table[] = {
+ {.compatible = "hisilicon,pd_dpm",},
+ {},
+};
+
+static struct platform_driver pd_dpm_callback_driver = {
+ .probe = pd_dpm_probe,
+ .remove = NULL,
+ .driver = {
+ .name = "hisilicon,pd_dpm",
+ .owner = THIS_MODULE,
+ .of_match_table = pd_dpm_callback_match_table,
+ }
+};
+
+static int __init pd_dpm_init(void)
+{
+ hisilog_info("%s\n", __func__);
+ /*adjust the original product*/
+ typec_class = class_create(THIS_MODULE, "hisi_typec");
+ if (IS_ERR(typec_class)) {
+ hisilog_err("%s: cannot create class\n", __func__);
+ return PTR_ERR(typec_class);
+ }
+
+ return platform_driver_register(&pd_dpm_callback_driver);
+}
+
+static void __exit pd_dpm_exit(void)
+{
+ platform_driver_unregister(&pd_dpm_callback_driver);
+}
+
+device_initcall(pd_dpm_init);
+module_exit(pd_dpm_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("hisilicon pd dpm");
+MODULE_AUTHOR("wangbinghui<wangbinghui@hisilicon.com>");
diff --git a/drivers/usb/pd/richtek/Kconfig b/drivers/usb/pd/richtek/Kconfig
new file mode 100644
index 000000000000..a5078580f87f
--- /dev/null
+++ b/drivers/usb/pd/richtek/Kconfig
@@ -0,0 +1,48 @@
+config TCPC_CLASS
+ bool "TypeC Port Controller Device Class"
+ help
+ Say Y to enable
+ Typec Port
+ Controller Device
+ Class
+
+config USB_POWER_DELIVERY
+ bool "Support USB power delivery Function"
+ default n
+ help
+ Say Y to enable
+ USB
+ Power Delivery
+ support
+
+config TCPC_RT1711H
+ bool "Richtek RT1711H TypeC port Controller Driver"
+ depends on TCPC_CLASS
+ default n
+ help
+ Say Y to enable
+ Richtek RT1711H
+ TypeC port Controller
+ Driver
+
+config USB_PD_VBUS_STABLE_TOUT
+ int "PD VBUS Stable Timeout"
+ depends on USB_POWER_DELIVERY
+ range 0 1000 # >= 0, <= 1000
+ default 125
+ help
+ Setup a timeout value (ms)
+ for
+ VBUS change
+ stable
+
+config USB_PD_VBUS_PRESENT_TOUT
+ int "PD VBUS Present Timeout"
+ depends on USB_POWER_DELIVERY
+ range 0 1000 # >= 0, <= 1000
+ default 20
+ help
+ Setup a timeout value (ms)
+ for
+ VBUS present
+ stable
diff --git a/drivers/usb/pd/richtek/Makefile b/drivers/usb/pd/richtek/Makefile
new file mode 100644
index 000000000000..c8990c4d911b
--- /dev/null
+++ b/drivers/usb/pd/richtek/Makefile
@@ -0,0 +1,12 @@
+obj-$(CONFIG_DUAL_ROLE_USB_INTF) += tcpci_dual_role.o
+obj-$(CONFIG_TCPC_RT1711H) += tcpc_rt1711h.o
+obj-$(CONFIG_TCPC_CLASS) += tcpci_core.o tcpci_typec.o tcpci_alert.o tcpci_timer.o tcpm.o rt-regmap.o
+obj-$(CONFIG_USB_POWER_DELIVERY) += tcpci_event.o \
+ pd_core.o pd_policy_engine.o pd_process_evt.o \
+ pd_dpm_core.o \
+ pd_process_evt_snk.o pd_process_evt_src.o pd_process_evt_vdm.o \
+ pd_process_evt_drs.o pd_process_evt_prs.o pd_process_evt_vcs.o \
+ pd_process_evt_dbg.o \
+ pd_policy_engine_src.o pd_policy_engine_snk.o pd_policy_engine_ufp.o pd_policy_engine_vcs.o \
+ pd_policy_engine_dfp.o pd_policy_engine_dr.o pd_policy_engine_drs.o pd_policy_engine_prs.o \
+ pd_policy_engine_dbg.o
diff --git a/drivers/usb/pd/richtek/pd_core.c b/drivers/usb/pd/richtek/pd_core.c
new file mode 100644
index 000000000000..24c0f9b826bf
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_core.c
@@ -0,0 +1,708 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Core Driver
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_typec.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_event.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+#include <linux/hisi/usb/pd/richtek/rt1711h.h>
+
+/* From DTS */
+
+static int pd_parse_pdata(pd_port_t *pd_port)
+{
+ struct device_node *np;
+ int ret = 0, i;
+
+ pr_info("%s\n", __func__);
+ np = of_find_node_by_name(pd_port->tcpc_dev->dev.of_node, "pd-data");
+
+ if (np) {
+ ret = of_property_read_u32(
+ np, "pd,source-pdo-size",
+ (u32 *)&pd_port->local_src_cap_default.nr);
+ if (ret < 0)
+ pr_err("%s get source pdo size fail\n", __func__);
+
+ ret = of_property_read_u32_array(
+ np, "pd,source-pdo-data",
+ (u32 *)pd_port->local_src_cap_default.pdos,
+ pd_port->local_src_cap_default.nr);
+ if (ret < 0)
+ pr_err("%s get source pdo data fail\n", __func__);
+
+ pr_info("%s src pdo data =\n", __func__);
+ for (i = 0; i < pd_port->local_src_cap_default.nr; i++) {
+ pr_info("%s %d: 0x%08x\n", __func__, i,
+ pd_port->local_src_cap_default.pdos[i]);
+ }
+
+ ret = of_property_read_u32(np, "pd,sink-pdo-size",
+ (u32 *)&pd_port->local_snk_cap.nr);
+ if (ret < 0)
+ pr_err("%s get sink pdo size fail\n", __func__);
+
+ ret = of_property_read_u32_array(
+ np, "pd,sink-pdo-data",
+ (u32 *)pd_port->local_snk_cap.pdos,
+ pd_port->local_snk_cap.nr);
+ if (ret < 0)
+ pr_err("%s get sink pdo data fail\n", __func__);
+
+ pr_info("%s snk pdo data =\n", __func__);
+ for (i = 0; i < pd_port->local_snk_cap.nr; i++) {
+ pr_info("%s %d: 0x%08x\n", __func__, i,
+ pd_port->local_snk_cap.pdos[i]);
+ }
+
+ ret = of_property_read_u32(np, "pd,id-vdo-size",
+ (u32 *)&pd_port->id_vdo_nr);
+ if (ret < 0)
+ pr_err("%s get id vdo size fail\n", __func__);
+ ret = of_property_read_u32_array(
+ np, "pd,id-vdo-data",
+ (u32 *)pd_port->id_vdos, pd_port->id_vdo_nr);
+ if (ret < 0)
+ pr_err("%s get id vdo data fail\n", __func__);
+
+ pr_info("%s id vdos data =\n", __func__);
+ for (i = 0; i < pd_port->id_vdo_nr; i++)
+ pr_info("%s %d: 0x%08x\n", __func__, i,
+ pd_port->id_vdos[i]);
+ }
+
+ return 0;
+}
+
+#define DEFAULT_DP_ROLE_CAP (MODE_DP_SRC)
+#define DEFAULT_DP_FIRST_CONNECTED (DPSTS_DFP_D_CONNECTED)
+#define DEFAULT_DP_SECOND_CONNECTED (DPSTS_DFP_D_CONNECTED)
+
+static const struct {
+ const char *prop_name;
+ u32 val;
+} supported_dpm_caps[] = {
+ {"local_dr_power", DPM_CAP_LOCAL_DR_POWER},
+ {"local_dr_data", DPM_CAP_LOCAL_DR_DATA},
+ {"local_ext_power", DPM_CAP_LOCAL_EXT_POWER},
+ {"local_usb_comm", DPM_CAP_LOCAL_USB_COMM},
+ {"local_usb_suspend", DPM_CAP_LOCAL_USB_SUSPEND},
+ {"local_high_cap", DPM_CAP_LOCAL_HIGH_CAP},
+ {"local_give_back", DPM_CAP_LOCAL_GIVE_BACK},
+ {"local_no_suspend", DPM_CAP_LOCAL_NO_SUSPEND},
+ {"local_vconn_supply", DPM_CAP_LOCAL_VCONN_SUPPLY},
+
+ {"attemp_discover_cable_dfp", DPM_CAP_ATTEMP_DISCOVER_CABLE_DFP},
+ {"attemp_enter_dp_mode", DPM_CAP_ATTEMP_ENTER_DP_MODE},
+ {"attemp_discover_cable", DPM_CAP_ATTEMP_DISCOVER_CABLE},
+ {"attemp_discover_id", DPM_CAP_ATTEMP_DISCOVER_ID},
+
+ {"pr_reject_as_source", DPM_CAP_PR_SWAP_REJECT_AS_SRC},
+ {"pr_reject_as_sink", DPM_CAP_PR_SWAP_REJECT_AS_SNK},
+ {"pr_check_gp_source", DPM_CAP_PR_SWAP_CHECK_GP_SRC},
+ {"pr_check_gp_sink", DPM_CAP_PR_SWAP_CHECK_GP_SNK},
+
+ {"dr_reject_as_dfp", DPM_CAP_DR_SWAP_REJECT_AS_DFP},
+ {"dr_reject_as_ufp", DPM_CAP_DR_SWAP_REJECT_AS_UFP},
+
+ {"snk_prefer_low_voltage", DPM_CAP_SNK_PREFER_LOW_VOLTAGE},
+ {"snk_ignore_mismatch_current", DPM_CAP_SNK_IGNORE_MISMATCH_CURRENT},
+};
+
+static void pd_core_power_flags_init(pd_port_t *pd_port)
+{
+ u32 src_flag, snk_flag, val;
+ struct device_node *np;
+ int i;
+ pd_port_power_caps *snk_cap = &pd_port->local_snk_cap;
+ pd_port_power_caps *src_cap = &pd_port->local_src_cap_default;
+
+ np = of_find_node_by_name(pd_port->tcpc_dev->dev.of_node, "dpm_caps");
+
+ for (i = 0; i < ARRAY_SIZE(supported_dpm_caps); i++) {
+ if (of_property_read_bool(np,
+ supported_dpm_caps[i].prop_name))
+ pd_port->dpm_caps |=
+ supported_dpm_caps[i].val;
+ pr_info("dpm_caps: %s\n",
+ supported_dpm_caps[i].prop_name);
+ }
+
+ if (of_property_read_u32(np, "pr_check", &val) == 0)
+ pd_port->dpm_caps |= DPM_CAP_PR_CHECK_PROP(val);
+ else
+ pr_err("%s get pr_check data fail\n", __func__);
+
+ if (of_property_read_u32(np, "dr_check", &val) == 0)
+ pd_port->dpm_caps |= DPM_CAP_DR_CHECK_PROP(val);
+ else
+ pr_err("%s get dr_check data fail\n", __func__);
+
+ pr_info("dpm_caps = 0x%08x\n", pd_port->dpm_caps);
+
+ src_flag = 0;
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_DR_POWER)
+ src_flag |= PDO_FIXED_DUAL_ROLE;
+
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_DR_DATA)
+ src_flag |= PDO_FIXED_DATA_SWAP;
+
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_EXT_POWER)
+ src_flag |= PDO_FIXED_EXTERNAL;
+
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_USB_COMM)
+ src_flag |= PDO_FIXED_COMM_CAP;
+
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_USB_SUSPEND)
+ src_flag |= PDO_FIXED_SUSPEND;
+
+ snk_flag = src_flag;
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_HIGH_CAP)
+ snk_flag |= PDO_FIXED_HIGH_CAP;
+
+ snk_cap->pdos[0] |= snk_flag;
+ src_cap->pdos[0] |= src_flag;
+}
+
+int pd_core_init(struct tcpc_device *tcpc_dev)
+{
+ int ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ mutex_init(&pd_port->pd_lock);
+ pd_port->tcpc_dev = tcpc_dev;
+
+ pd_port->pe_pd_state = PE_IDLE2;
+ pd_port->pe_vdm_state = PE_IDLE2;
+
+ pd_port->pd_connect_state = PD_CONNECT_NONE;
+
+ ret = pd_parse_pdata(pd_port);
+ if (ret)
+ return ret;
+
+ pd_port->svid_data_cnt = 0;
+ pd_core_power_flags_init(pd_port);
+
+ PE_INFO("%s\r\n", __func__);
+ return 0;
+}
+
+void pd_extract_rdo_power(u32 rdo, u32 pdo,
+ u32 *op_curr, u32 *max_curr)
+{
+ u32 op_power, max_power, vmin;
+
+ switch (pdo & PDO_TYPE_MASK) {
+ case PDO_TYPE_FIXED:
+ case PDO_TYPE_VARIABLE:
+ *op_curr = RDO_FIXED_VAR_EXTRACT_OP_CURR(rdo);
+ *max_curr = RDO_FIXED_VAR_EXTRACT_MAX_CURR(rdo);
+ break;
+
+ case PDO_TYPE_BATTERY: /* TODO:check it later !! */
+ vmin = PDO_BATT_EXTRACT_MIN_VOLT(pdo);
+ op_power = RDO_BATT_EXTRACT_OP_POWER(rdo);
+ max_power = RDO_BATT_EXTRACT_MAX_POWER(rdo);
+
+ *op_curr = op_power / vmin;
+ *max_curr = max_power / vmin;
+ break;
+
+ default:
+ *op_curr = *max_curr = 0;
+ break;
+ }
+}
+
+u32 pd_reset_pdo_power(u32 pdo, u32 imax)
+{
+ u32 ioper;
+
+ switch (pdo & PDO_TYPE_MASK) {
+ case PDO_TYPE_FIXED:
+ ioper = PDO_FIXED_EXTRACT_CURR(pdo);
+ if (ioper > imax)
+ return PDO_FIXED_RESET_CURR(pdo, imax);
+ break;
+
+ case PDO_TYPE_VARIABLE:
+ ioper = PDO_VAR_EXTRACT_CURR(pdo);
+ if (ioper > imax)
+ return PDO_VAR_RESET_CURR(pdo, imax);
+ break;
+
+ case PDO_TYPE_BATTERY:
+ /* TODO:check it later !! */
+ PD_ERR("No Support\r\n");
+ break;
+ }
+ return pdo;
+}
+
+void pd_extract_pdo_power(u32 pdo,
+ u32 *vmin, u32 *vmax, u32 *ioper)
+{
+ u32 pwatt;
+
+ switch (pdo & PDO_TYPE_MASK) {
+ case PDO_TYPE_FIXED:
+ *ioper = PDO_FIXED_EXTRACT_CURR(pdo);
+ *vmin = *vmax = PDO_FIXED_EXTRACT_VOLT(pdo);
+ break;
+
+ case PDO_TYPE_VARIABLE:
+ *ioper = PDO_VAR_EXTRACT_CURR(pdo);
+ *vmin = PDO_VAR_EXTRACT_MIN_VOLT(pdo);
+ *vmax = PDO_VAR_EXTRACT_MAX_VOLT(pdo);
+ break;
+
+ case PDO_TYPE_BATTERY: /* TODO:check it later !! */
+ *vmin = PDO_BATT_EXTRACT_MIN_VOLT(pdo);
+ *vmax = PDO_BATT_EXTRACT_MAX_VOLT(pdo);
+ pwatt = PDO_BATT_EXTRACT_OP_POWER(pdo);
+ *ioper = pwatt / *vmin;
+ break;
+
+ default:
+ *vmin = *vmax = *ioper = 0;
+ }
+}
+
+u32 pd_extract_cable_curr(u32 vdo)
+{
+ u32 cable_curr;
+
+ switch (PD_VDO_CABLE_CURR(vdo)) {
+ case CABLE_CURR_1A5:
+ cable_curr = 1500;
+ break;
+ case CABLE_CURR_5A:
+ cable_curr = 5000;
+ break;
+ default:
+ case CABLE_CURR_3A:
+ cable_curr = 3000;
+ break;
+ }
+
+ return cable_curr;
+}
+
+void pd_reset_svid_data(pd_port_t *pd_port)
+{
+ u8 i;
+ svdm_svid_data_t *svid_data;
+
+ for (i = 0; i < pd_port->svid_data_cnt; i++) {
+ svid_data = &pd_port->svid_data[i];
+ svid_data->exist = false;
+ svid_data->remote_mode.mode_cnt = 0;
+ svid_data->active_mode = 0;
+ }
+}
+
+int pd_reset_protocol_layer(pd_port_t *pd_port)
+{
+ int i = 0;
+
+ pd_notify_pe_reset_protocol(pd_port);
+
+ pd_port->explicit_contract = 0;
+ pd_port->local_selected_cap = 0;
+ pd_port->remote_selected_cap = 0;
+ pd_port->during_swap = 0;
+ pd_port->dpm_ack_immediately = 0;
+
+#ifdef CONFIG_USB_PD_DFP_READY_DISCOVER_ID
+ pd_port->vconn_return = false;
+#endif /* CONFIG_USB_PD_DFP_READY_DISCOVER_ID */
+
+ for (i = 0; i < PD_SOP_NR; i++) {
+ pd_port->msg_id_tx[i] = 0;
+ pd_port->msg_id_rx[i] = 0;
+ pd_port->msg_id_rx_init[i] = false;
+ }
+
+ return 0;
+}
+
+int pd_set_rx_enable(pd_port_t *pd_port, u8 enable)
+{
+ return tcpci_set_rx_enable(pd_port->tcpc_dev, enable);
+}
+
+int pd_enable_vbus_valid_detection(pd_port_t *pd_port, bool wait_valid)
+{
+ PE_DBG("WaitVBUS=%d\r\n", wait_valid);
+ pd_notify_pe_wait_vbus_once(pd_port,
+ wait_valid ? PD_WAIT_VBUS_VALID_ONCE :
+ PD_WAIT_VBUS_INVALID_ONCE);
+ return 0;
+}
+
+int pd_enable_vbus_safe0v_detection(pd_port_t *pd_port)
+{
+ PE_DBG("WaitVSafe0V\r\n");
+ pd_notify_pe_wait_vbus_once(pd_port, PD_WAIT_VBUS_SAFE0V_ONCE);
+ return 0;
+}
+
+int pd_enable_vbus_stable_detection(pd_port_t *pd_port)
+{
+ PE_DBG("WaitVStable\r\n");
+ pd_notify_pe_wait_vbus_once(pd_port, PD_WAIT_VBUS_STABLE_ONCE);
+ return 0;
+}
+
+int pd_set_data_role(pd_port_t *pd_port, u8 dr)
+{
+ pd_port->data_role = dr;
+
+ tcpci_notify_role_swap(pd_port->tcpc_dev, TCP_NOTIFY_DR_SWAP, dr);
+ return tcpci_set_msg_header(pd_port->tcpc_dev,
+ pd_port->power_role, pd_port->data_role);
+}
+
+int pd_set_power_role(pd_port_t *pd_port, u8 pr)
+{
+ int ret;
+
+ pd_port->power_role = pr;
+ ret = tcpci_set_msg_header(pd_port->tcpc_dev,
+ pd_port->power_role, pd_port->data_role);
+ if (ret)
+ return ret;
+
+ pd_notify_pe_pr_changed(pd_port);
+
+ tcpci_notify_role_swap(pd_port->tcpc_dev, TCP_NOTIFY_PR_SWAP, pr);
+ return ret;
+}
+
+int pd_init_role(pd_port_t *pd_port, u8 pr, u8 dr, bool vr)
+{
+ pd_port->power_role = pr;
+ pd_port->data_role = dr;
+ pd_port->vconn_source = vr;
+
+ return tcpci_set_msg_header(pd_port->tcpc_dev,
+ pd_port->power_role, pd_port->data_role);
+}
+
+int pd_set_vconn(pd_port_t *pd_port, int enable)
+{
+ pd_port->vconn_source = enable;
+
+ tcpci_notify_role_swap(pd_port->tcpc_dev,
+ TCP_NOTIFY_VCONN_SWAP, enable);
+ return tcpci_set_vconn(pd_port->tcpc_dev, enable);
+}
+
+static inline int pd_reset_modal_operation(pd_port_t *pd_port)
+{
+ u8 i;
+ svdm_svid_data_t *svid_data;
+
+ for (i = 0; i < pd_port->svid_data_cnt; i++) {
+ svid_data = &pd_port->svid_data[i];
+
+ if (svid_data->active_mode) {
+ svid_data->active_mode = 0;
+ tcpci_exit_mode(pd_port->tcpc_dev, svid_data->svid);
+ }
+ }
+
+ pd_port->modal_operation = false;
+ return 0;
+}
+
+int pd_reset_local_hw(pd_port_t *pd_port)
+{
+ pd_notify_pe_transit_to_default(pd_port);
+ pd_unlock_msg_output(pd_port);
+
+ pd_reset_pe_timer(pd_port);
+ pd_set_rx_enable(pd_port, PD_RX_CAP_PE_HARDRESET);
+
+ pd_port->explicit_contract = false;
+ pd_port->pd_connected = false;
+ pd_port->pe_ready = false;
+ pd_port->dpm_ack_immediately = false;
+
+ pd_reset_modal_operation(pd_port);
+
+ pd_set_vconn(pd_port, false);
+
+ if (pd_port->power_role == PD_ROLE_SINK) {
+ pd_port->state_machine = PE_STATE_MACHINE_SINK;
+ pd_set_data_role(pd_port, PD_ROLE_UFP);
+ } else {
+ pd_port->state_machine = PE_STATE_MACHINE_SOURCE;
+ pd_set_data_role(pd_port, PD_ROLE_DFP);
+ }
+
+ pd_dpm_notify_pe_hardreset(pd_port);
+ PE_DBG("reset_local_hw\r\n");
+
+ return 0;
+}
+
+int pd_enable_bist_test_mode(pd_port_t *pd_port, bool en)
+{
+ PE_DBG("bist_test_mode=%d\r\n", en);
+ return tcpci_set_bist_test_mode(pd_port->tcpc_dev, en);
+}
+
+/* ---- Handle PD Message ----*/
+
+int pd_handle_soft_reset(pd_port_t *pd_port, u8 state_machine)
+{
+ pd_port->state_machine = state_machine;
+
+ pd_reset_protocol_layer(pd_port);
+ pd_update_dpm_request_state(pd_port, DPM_REQ_ERR_RECV_SRESET);
+ return pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_ACCEPT);
+}
+
+/* ---- Send PD Message ----*/
+
+static int pd_send_message(pd_port_t *pd_port, u8 sop_type,
+ u8 msg, u16 count, const u32 *data)
+{
+ int ret;
+ u16 msg_hdr;
+ u8 type = PD_TX_STATE_WAIT_CRC_PD;
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ if (tcpc_dev->typec_attach_old == 0) {
+ PE_DBG("[SendMsg] Unattached\r\n");
+ return 0;
+ }
+
+ if (tcpc_dev->pd_hard_reset_event_pending) {
+ PE_DBG("[SendMsg] HardReset Pending");
+ return 0;
+ }
+
+ if (sop_type == TCPC_TX_SOP) {
+ msg_hdr = PD_HEADER_SOP(msg, pd_port->power_role,
+ pd_port->data_role,
+ pd_port->msg_id_tx[sop_type], count);
+ } else {
+ msg_hdr = PD_HEADER_SOP_PRIME(
+ msg, 0,
+ pd_port->msg_id_tx[sop_type], count);
+ }
+
+ if ((count > 0) && (msg == PD_DATA_VENDOR_DEF))
+ type = PD_TX_STATE_WAIT_CRC_VDM;
+
+ pd_port->msg_id_tx[sop_type] = (pd_port->msg_id_tx[sop_type] + 1) % 8;
+
+ pd_notify_pe_transmit_msg(pd_port, type);
+ ret = tcpci_transmit(pd_port->tcpc_dev, sop_type, msg_hdr, data);
+ if (ret < 0)
+ PD_ERR("[SendMsg] Failed, %d\r\n", ret);
+
+ return ret;
+}
+
+int pd_send_ctrl_msg(pd_port_t *pd_port, u8 sop_type, u8 msg)
+{
+ return pd_send_message(pd_port, sop_type, msg, 0, NULL);
+}
+
+int pd_send_data_msg(pd_port_t *pd_port,
+ u8 sop_type, u8 msg,
+ u8 cnt, u32 *payload)
+{
+ return pd_send_message(pd_port, sop_type, msg, cnt, payload);
+}
+
+int pd_send_soft_reset(pd_port_t *pd_port, u8 state_machine)
+{
+ pd_port->state_machine = state_machine;
+
+ pd_reset_protocol_layer(pd_port);
+ pd_update_dpm_request_state(pd_port, DPM_REQ_ERR_SEND_SRESET);
+ return pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_SOFT_RESET);
+}
+
+int pd_send_hard_reset(pd_port_t *pd_port)
+{
+ int ret;
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ PE_DBG("Send HARD Reset\r\n");
+
+ pd_port->hard_reset_counter++;
+ pd_notify_pe_send_hard_reset(pd_port);
+ pd_update_dpm_request_state(pd_port, DPM_REQ_ERR_SEND_HRESET);
+ ret = tcpci_transmit(tcpc_dev, TCPC_TX_HARD_RESET, 0, NULL);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_USB_PD_IGNORE_HRESET_COMPLETE_TIMER
+ if (!(tcpc_dev->tcpc_flags & TCPC_FLAGS_WAIT_HRESET_COMPLETE)) {
+ pd_put_sent_hard_reset_event(tcpc_dev);
+ return 0;
+ }
+#endif
+ return 0;
+}
+
+int pd_send_bist_mode2(pd_port_t *pd_port)
+{
+ int ret = 0;
+
+ pd_update_dpm_request_state(pd_port, DPM_REQ_ERR_SEND_BIST);
+
+#ifdef CONFIG_USB_PD_TRANSMIT_BIST2
+ TCPC_DBG("BIST_MODE_2\r\n");
+ ret = tcpci_transmit(
+ pd_port->tcpc_dev, TCPC_TX_BIST_MODE_2, 0, NULL);
+#else
+ ret = tcpci_set_bist_carrier_mode(
+ pd_port->tcpc_dev, 1 << 2);
+#endif
+
+ return ret;
+}
+
+int pd_disable_bist_mode2(pd_port_t *pd_port)
+{
+#ifndef CONFIG_USB_PD_TRANSMIT_BIST2
+ return tcpci_set_bist_carrier_mode(
+ pd_port->tcpc_dev, 0);
+#else
+ return 0;
+#endif
+}
+
+/* ---- Send / Reply VDM Command ----*/
+
+int pd_send_svdm_request(pd_port_t *pd_port,
+ u8 sop_type, u16 svid, u8 vdm_cmd,
+ u8 obj_pos, u8 cnt, u32 *data_obj)
+{
+ int ret;
+ u32 payload[VDO_MAX_SIZE];
+ char buf[1024] = { 0 };
+
+ if (cnt >= (VDO_MAX_SIZE - 1))
+ snprintf(buf, sizeof(buf), "%d over the vdo max size\n", cnt);
+
+ payload[0] = VDO_S(svid, CMDT_INIT, vdm_cmd, obj_pos);
+ memcpy(&payload[1], data_obj, sizeof(u32) * cnt);
+
+ ret = pd_send_data_msg(
+ pd_port, sop_type,
+ PD_DATA_VENDOR_DEF, 1 + cnt, payload);
+
+ if (ret == 0 && (vdm_cmd != CMD_ATTENTION))
+ pd_enable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+
+ return ret;
+}
+
+int pd_reply_svdm_request(pd_port_t *pd_port, pd_event_t *pd_event,
+ u8 reply, u8 cnt, u32 *data_obj)
+{
+ u32 vdo;
+ u32 payload[VDO_MAX_SIZE];
+ char buf[1024] = { 0 };
+
+ if (cnt >= (VDO_MAX_SIZE - 1))
+ snprintf(buf, sizeof(buf), "%d over the vdo max size\n", cnt);
+
+ if (!pd_event->pd_msg)
+ snprintf(buf, sizeof(buf), "the pd_msg is NULL\n");
+
+ vdo = pd_event->pd_msg->payload[0];
+ payload[0] = VDO_S(
+ PD_VDO_VID(vdo), reply, PD_VDO_CMD(vdo), PD_VDO_OPOS(vdo));
+
+ if (cnt > 0) {
+ if (!data_obj)
+ snprintf(buf, sizeof(buf), "the data_obj is NULL\n");
+
+ memcpy(&payload[1], data_obj, sizeof(u32) * cnt);
+ }
+
+ return pd_send_data_msg(pd_port,
+ TCPC_TX_SOP, PD_DATA_VENDOR_DEF, 1 + cnt, payload);
+}
+
+void pd_lock_msg_output(pd_port_t *pd_port)
+{
+ if (pd_port->msg_output_lock)
+ return;
+ pd_port->msg_output_lock = true;
+}
+
+void pd_unlock_msg_output(pd_port_t *pd_port)
+{
+ if (!pd_port->msg_output_lock)
+ return;
+ pd_port->msg_output_lock = false;
+}
+
+int pd_update_connect_state(pd_port_t *pd_port, u8 state)
+{
+ if (pd_port->pd_connect_state == state)
+ return 0;
+
+ switch (state) {
+ case PD_CONNECT_TYPEC_ONLY:
+ if (pd_port->power_role == PD_ROLE_SOURCE) {
+ state = PD_CONNECT_TYPEC_ONLY_SRC;
+ } else {
+ switch (pd_port->tcpc_dev->typec_remote_rp_level) {
+ case TYPEC_CC_VOLT_SNK_DFT:
+ state = PD_CONNECT_TYPEC_ONLY_SNK_DFT;
+ break;
+
+ case TYPEC_CC_VOLT_SNK_1_5:
+ case TYPEC_CC_VOLT_SNK_3_0:
+ state = PD_CONNECT_TYPEC_ONLY_SNK;
+ break;
+ }
+ }
+ break;
+
+ case PD_CONNECT_PE_READY:
+ state = pd_port->power_role == PD_ROLE_SOURCE ?
+ PD_CONNECT_PE_READY_SRC : PD_CONNECT_PE_READY_SNK;
+ break;
+
+ case PD_CONNECT_NONE:
+ break;
+ }
+
+ pd_port->pd_connect_state = state;
+ return tcpci_notify_pd_state(pd_port->tcpc_dev, state);
+}
+
+void pd_update_dpm_request_state(pd_port_t *pd_port, u8 state)
+{
+ /* TODO */
+}
diff --git a/drivers/usb/pd/richtek/pd_dpm_core.c b/drivers/usb/pd/richtek/pd_dpm_core.c
new file mode 100644
index 000000000000..949ad11ea48d
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_dpm_core.c
@@ -0,0 +1,1450 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * PD Device Policy Manager Core Driver
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/rt1711h.h>
+#include "pd_dpm_prv.h"
+
+/* DPM Init */
+
+static void pd_dpm_update_pdos_flags(pd_port_t *pd_port, u32 pdo)
+{
+ pd_port->dpm_flags &= ~DPM_FLAGS_RESET_PARTNER_MASK;
+
+ /* Only update PDO flags if pdo's type is fixed */
+ if ((pdo & PDO_TYPE_MASK) != PDO_TYPE_FIXED)
+ return;
+
+ if (pdo & PDO_FIXED_DUAL_ROLE)
+ pd_port->dpm_flags |= DPM_FLAGS_PARTNER_DR_POWER;
+
+ if (pdo & PDO_FIXED_DATA_SWAP)
+ pd_port->dpm_flags |= DPM_FLAGS_PARTNER_DR_DATA;
+
+ if (pdo & PDO_FIXED_EXTERNAL)
+ pd_port->dpm_flags |= DPM_FLAGS_PARTNER_EXTPOWER;
+
+ if (pdo & PDO_FIXED_COMM_CAP)
+ pd_port->dpm_flags |= DPM_FLAGS_PARTNER_USB_COMM;
+}
+
+int pd_dpm_enable_vconn(pd_port_t *pd_port, bool en)
+{
+ return pd_set_vconn(pd_port, en);
+}
+
+int pd_dpm_send_sink_caps(pd_port_t *pd_port)
+{
+ pd_port_power_caps *snk_cap = &pd_port->local_snk_cap;
+
+ return pd_send_data_msg(pd_port, TCPC_TX_SOP, PD_DATA_SINK_CAP,
+ snk_cap->nr, snk_cap->pdos);
+}
+
+int pd_dpm_send_source_caps(pd_port_t *pd_port)
+{
+ u8 i;
+ u32 cable_curr = 3000;
+
+ pd_port_power_caps *src_cap0 = &pd_port->local_src_cap_default;
+ pd_port_power_caps *src_cap1 = &pd_port->local_src_cap;
+
+ if (pd_port->power_cable_present) {
+ cable_curr =
+ pd_extract_cable_curr(
+ pd_port->cable_vdos[VDO_INDEX_CABLE]);
+ DPM_DBG("cable_limit: %dmA\r\n", cable_curr);
+ }
+
+ src_cap1->nr = src_cap0->nr;
+ for (i = 0; i < src_cap0->nr; i++) {
+ src_cap1->pdos[i] =
+ pd_reset_pdo_power(src_cap0->pdos[i], cable_curr);
+ }
+
+ return pd_send_data_msg(pd_port, TCPC_TX_SOP, PD_DATA_SOURCE_CAP,
+ src_cap1->nr, src_cap1->pdos);
+}
+
+enum {
+ GOOD_PW_NONE = 0, /* both no GP */
+ GOOD_PW_PARTNER, /* partner has GP */
+ GOOD_PW_LOCAL, /* local has GP */
+ GOOD_PW_BOTH, /* both have GPs */
+};
+
+static inline int dpm_check_good_power(pd_port_t *pd_port)
+{
+ bool local_ex, partner_ex;
+
+ local_ex =
+ (pd_port->dpm_caps & DPM_CAP_LOCAL_EXT_POWER) != 0;
+
+ partner_ex =
+ (pd_port->dpm_flags & DPM_FLAGS_PARTNER_EXTPOWER) != 0;
+
+ if (local_ex != partner_ex) {
+ if (partner_ex)
+ return GOOD_PW_PARTNER;
+ return GOOD_PW_LOCAL;
+ }
+
+ if (local_ex)
+ return GOOD_PW_BOTH;
+
+ return GOOD_PW_NONE;
+}
+
+static inline bool dpm_response_request(pd_port_t *pd_port, bool accept)
+{
+ if (accept)
+ return pd_put_dpm_ack_event(pd_port);
+ return pd_put_dpm_nak_event(pd_port, PD_DPM_NAK_REJECT);
+}
+
+/* ---- SNK ---- */
+
+struct dpm_pdo_info_t {
+ u8 type;
+ int vmin;
+ int vmax;
+ int uw;
+ int ma;
+};
+
+struct dpm_rdo_info_t {
+ u8 pos;
+ u8 type;
+ bool mismatch;
+
+ int vmin;
+ int vmax;
+
+ union {
+ u32 max_uw;
+ u32 max_ma;
+ };
+
+ union {
+ u32 oper_uw;
+ u32 oper_ma;
+ };
+};
+
+#define DPM_PDO_TYPE_FIXED 0
+#define DPM_PDO_TYPE_BAT 1
+#define DPM_PDO_TYPE_VAR 2
+#define DPM_PDO_TYPE(pdo) (((pdo) & PDO_TYPE_MASK) >> 30)
+
+static inline bool dpm_is_valid_pdo_pair(
+ struct dpm_pdo_info_t *sink,
+ struct dpm_pdo_info_t *source, u32 caps)
+{
+ if (sink->vmax < source->vmax)
+ return false;
+
+ if (sink->vmin > source->vmin)
+ return false;
+
+ if (caps & DPM_CAP_SNK_IGNORE_MISMATCH_CURRENT)
+ return (sink->ma <= source->ma);
+
+ return true;
+}
+
+static inline void dpm_extract_pdo_info(
+ u32 pdo, struct dpm_pdo_info_t *info)
+{
+ memset(info, 0, sizeof(struct dpm_pdo_info_t));
+
+ info->type = DPM_PDO_TYPE(pdo);
+
+ switch (info->type) {
+ case DPM_PDO_TYPE_FIXED:
+ info->ma = PDO_FIXED_EXTRACT_CURR(pdo);
+ info->vmin = PDO_FIXED_EXTRACT_VOLT(pdo);
+ info->vmax = info->vmin;
+ info->uw = info->ma * info->vmax;
+ break;
+
+ case DPM_PDO_TYPE_VAR:
+ info->ma = PDO_VAR_OP_CURR(pdo);
+ info->vmin = PDO_VAR_EXTRACT_MIN_VOLT(pdo);
+ info->vmax = PDO_VAR_EXTRACT_MAX_VOLT(pdo);
+ info->uw = info->ma * info->vmax;
+ break;
+
+ case DPM_PDO_TYPE_BAT:
+ info->uw = PDO_BATT_EXTRACT_OP_POWER(pdo) * 1000;
+ info->vmin = PDO_BATT_EXTRACT_MIN_VOLT(pdo);
+ info->vmax = PDO_BATT_EXTRACT_MAX_VOLT(pdo);
+ info->ma = info->uw / info->vmin;
+ break;
+ }
+}
+
+#ifndef MIN
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+static inline int dpm_calc_src_cap_power_uw(
+ struct dpm_pdo_info_t *source, struct dpm_pdo_info_t *sink)
+{
+ int uw, ma;
+
+ if (source->type == DPM_PDO_TYPE_BAT) {
+ uw = source->uw;
+
+ if (sink->type == DPM_PDO_TYPE_BAT)
+ uw = MIN(uw, sink->uw);
+ } else {
+ ma = source->ma;
+
+ if (sink->type != DPM_PDO_TYPE_BAT)
+ ma = MIN(ma, sink->ma);
+
+ uw = ma * source->vmax;
+ }
+
+ return uw;
+}
+
+static bool dpm_find_match_req_info(
+ struct dpm_rdo_info_t *req_info,
+ u32 snk_pdo, int cnt, u32 *src_pdos,
+ int min_uw, u32 caps)
+{
+ bool overload;
+ int ret = -1;
+ int i;
+ int uw, max_uw = min_uw, cur_mv = 0;
+ struct dpm_pdo_info_t sink, source;
+
+ dpm_extract_pdo_info(snk_pdo, &sink);
+
+ for (i = 0; i < cnt; i++) {
+ dpm_extract_pdo_info(src_pdos[i], &source);
+ if (!dpm_is_valid_pdo_pair(&sink, &source, caps))
+ continue;
+
+ uw = dpm_calc_src_cap_power_uw(&source, &sink);
+
+ overload = uw > max_uw;
+
+ if (caps & DPM_CAP_SNK_PREFER_LOW_VOLTAGE)
+ overload |= (uw == max_uw) && (source.vmax < cur_mv);
+
+ if (overload) {
+ ret = i;
+ max_uw = uw;
+ cur_mv = source.vmax;
+ }
+ }
+
+ if (ret >= 0) {
+ req_info->pos = ret + 1;
+ req_info->type = source.type;
+
+ dpm_extract_pdo_info(src_pdos[ret], &source);
+
+ req_info->vmax = source.vmax;
+ req_info->vmin = source.vmin;
+
+ if (sink.type == DPM_PDO_TYPE_BAT)
+ req_info->mismatch = max_uw < sink.uw;
+ else
+ req_info->mismatch = source.ma < sink.ma;
+
+ if (source.type == DPM_PDO_TYPE_BAT) {
+ req_info->max_uw = sink.uw;
+ req_info->oper_uw = max_uw;
+ } else {
+ req_info->max_ma = sink.ma;
+ req_info->oper_ma = MIN(sink.ma, source.ma);
+ }
+ }
+
+ return (ret >= 0);
+}
+
+static bool dpm_build_request_info(
+ pd_port_t *pd_port, struct dpm_rdo_info_t *req_info)
+{
+ bool find_cap = false;
+ int i, max_uw = 0;
+ pd_port_power_caps *snk_cap = &pd_port->local_snk_cap;
+ pd_port_power_caps *src_cap = &pd_port->remote_src_cap;
+
+ memset(req_info, 0, sizeof(struct dpm_rdo_info_t));
+
+ for (i = 0; i < src_cap->nr; i++)
+ DPM_DBG("SrcCap%d: 0x%08x\r\n", i + 1, src_cap->pdos[i]);
+
+ for (i = 0; i < snk_cap->nr; i++) {
+ DPM_DBG("EvaSinkCap%d\r\n", i + 1);
+
+ find_cap = dpm_find_match_req_info(
+ req_info, snk_cap->pdos[i],
+ src_cap->nr, src_cap->pdos,
+ max_uw, pd_port->dpm_caps);
+
+ if (find_cap) {
+ if (req_info->type == DPM_PDO_TYPE_BAT)
+ max_uw = req_info->oper_uw;
+ else
+ max_uw = req_info->vmax * req_info->oper_ma;
+
+ DPM_DBG("Find SrcCap%d(%s):%d mw\r\n",
+ req_info->pos, req_info->mismatch ?
+ "Mismatch" : "Match", max_uw / 1000);
+ pd_port->local_selected_cap = i + 1;
+ }
+ }
+
+ return max_uw != 0;
+}
+
+static bool dpm_build_default_request_info(
+ pd_port_t *pd_port, struct dpm_rdo_info_t *req_info)
+{
+ struct dpm_pdo_info_t sink, source;
+ pd_port_power_caps *snk_cap = &pd_port->local_snk_cap;
+ pd_port_power_caps *src_cap = &pd_port->remote_src_cap;
+
+ pd_port->local_selected_cap = 1;
+
+ dpm_extract_pdo_info(snk_cap->pdos[0], &sink);
+ dpm_extract_pdo_info(src_cap->pdos[0], &source);
+
+ req_info->pos = 1;
+ req_info->type = source.type;
+ req_info->mismatch = true;
+ req_info->vmax = 5000;
+ req_info->vmin = 5000;
+
+ if (req_info->type == DPM_PDO_TYPE_BAT) {
+ req_info->max_uw = sink.uw;
+ req_info->oper_uw = source.uw;
+
+ } else {
+ req_info->max_ma = sink.ma;
+ req_info->oper_ma = source.ma;
+ }
+
+ return true;
+}
+
+static inline void dpm_update_request(
+ pd_port_t *pd_port, struct dpm_rdo_info_t *req_info)
+{
+ u32 mw_op, mw_max;
+
+ u32 flags = 0;
+
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_GIVE_BACK)
+ flags |= RDO_GIVE_BACK;
+
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_NO_SUSPEND)
+ flags |= RDO_NO_SUSPEND;
+
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_USB_COMM)
+ flags |= RDO_COMM_CAP;
+
+ if (req_info->mismatch)
+ flags |= RDO_CAP_MISMATCH;
+
+ pd_port->request_v_new = req_info->vmax;
+
+ if (req_info->type == DPM_PDO_TYPE_BAT) {
+ mw_op = req_info->oper_uw / 1000;
+ mw_max = req_info->max_uw / 1000;
+
+ pd_port->request_i_op = req_info->oper_uw / req_info->vmin;
+ pd_port->request_i_max = req_info->max_uw / req_info->vmin;
+
+ if (req_info->mismatch)
+ pd_port->request_i_new = pd_port->request_i_op;
+ else
+ pd_port->request_i_new = pd_port->request_i_max;
+
+ pd_port->last_rdo = RDO_BATT(
+ req_info->pos, mw_op, mw_max, flags);
+ } else {
+ pd_port->request_i_op = req_info->oper_ma;
+ pd_port->request_i_max = req_info->max_ma;
+
+ if (req_info->mismatch)
+ pd_port->request_i_new = pd_port->request_i_op;
+ else
+ pd_port->request_i_new = pd_port->request_i_max;
+
+ pd_port->last_rdo = RDO_FIXED(
+ req_info->pos, req_info->oper_ma,
+ req_info->max_ma, flags);
+ }
+}
+
+bool pd_dpm_send_request(pd_port_t *pd_port, int mv, int ma)
+{
+ bool find_cap = false;
+ struct dpm_rdo_info_t req_info;
+ pd_port_power_caps *src_cap = &pd_port->remote_src_cap;
+ u32 snk_pdo = PDO_FIXED(mv, ma, 0);
+
+ memset(&req_info, 0, sizeof(struct dpm_rdo_info_t));
+
+ find_cap = dpm_find_match_req_info(
+ &req_info, snk_pdo,
+ src_cap->nr, src_cap->pdos,
+ 0, pd_port->dpm_caps);
+
+ if (!find_cap)
+ return false;
+
+ dpm_update_request(pd_port, &req_info);
+ return pd_put_dpm_pd_request_event(pd_port,
+ PD_DPM_PD_REQUEST_PW_REQUEST);
+}
+
+void pd_dpm_snk_evaluate_caps(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool find_cap = false;
+ int sink_nr, source_nr;
+ char buf[1024] = { 0 };
+
+ struct dpm_rdo_info_t req_info;
+ pd_msg_t *pd_msg = pd_event->pd_msg;
+ pd_port_power_caps *snk_cap = &pd_port->local_snk_cap;
+ pd_port_power_caps *src_cap = &pd_port->remote_src_cap;
+
+ if (!pd_msg)
+ snprintf(buf, sizeof(buf), "the pd_msg is NULL\n");
+
+ sink_nr = snk_cap->nr;
+ source_nr = PD_HEADER_CNT(pd_msg->msg_hdr);
+
+ if ((source_nr <= 0) || (sink_nr <= 0)) {
+ DPM_DBG("SrcNR or SnkNR = 0\r\n");
+ return;
+ }
+
+ src_cap->nr = source_nr;
+ memcpy(src_cap->pdos, pd_msg->payload, sizeof(u32) * source_nr);
+ pd_dpm_update_pdos_flags(pd_port, src_cap->pdos[0]);
+
+ find_cap = dpm_build_request_info(pd_port, &req_info);
+
+ /* If we can't find any cap to use, choose default setting */
+ if (!find_cap) {
+ DPM_DBG("Can't find any SrcCap\r\n");
+ dpm_build_default_request_info(pd_port, &req_info);
+ }
+
+ dpm_update_request(pd_port, &req_info);
+
+ pd_port->dpm_flags &= ~DPM_FLAGS_CHECK_SOURCE_CAP;
+ if (!(pd_port->dpm_flags & DPM_FLAGS_PARTNER_DR_POWER))
+ pd_port->dpm_flags &= ~DPM_FLAGS_CHECK_SINK_CAP;
+
+ if (req_info.pos > 0)
+ pd_put_dpm_notify_event(pd_port, req_info.pos);
+}
+
+void pd_dpm_snk_transition_power(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ tcpci_sink_vbus(
+ pd_port->tcpc_dev, TCP_VBUS_CTRL_REQUEST,
+ pd_port->request_v_new,
+ pd_port->request_i_new);
+
+ pd_port->request_v = pd_port->request_v_new;
+ pd_port->request_i = pd_port->request_i_new;
+}
+
+void pd_dpm_snk_hard_reset(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ tcpci_sink_vbus(
+ pd_port->tcpc_dev,
+ TCP_VBUS_CTRL_HRESET,
+ TCPC_VBUS_SINK_0V, 0);
+ pd_put_pe_event(pd_port, PD_PE_POWER_ROLE_AT_DEFAULT);
+}
+
+/* ---- SRC ---- */
+
+void pd_dpm_src_evaluate_request(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ u8 rdo_pos;
+ u32 rdo, pdo;
+ u32 op_curr, max_curr;
+ u32 source_vmin, source_vmax, source_i;
+ bool accept_request = true;
+ char buf[1024] = { 0 };
+
+ pd_msg_t *pd_msg = pd_event->pd_msg;
+ pd_port_power_caps *src_cap = &pd_port->local_src_cap;
+
+ if (!pd_msg)
+ snprintf(buf, sizeof(buf), "the pd_msg is NULL\n");
+
+ rdo = pd_msg->payload[0];
+ rdo_pos = RDO_POS(rdo);
+
+ DPM_DBG("RequestCap%d\r\n", rdo_pos);
+
+ pd_port->dpm_flags &= (~DPM_FLAGS_PARTNER_MISMATCH);
+ if ((rdo_pos > 0) && (rdo_pos <= src_cap->nr)) {
+ pdo = src_cap->pdos[rdo_pos - 1];
+
+ pd_extract_rdo_power(rdo, pdo, &op_curr, &max_curr);
+ pd_extract_pdo_power(
+ pdo, &source_vmin,
+ &source_vmax, &source_i);
+
+ if (source_i < op_curr) {
+ DPM_DBG("src_i (%d) < op_i (%d)\r\n",
+ source_i, op_curr);
+ accept_request = false;
+ }
+
+ if (rdo & RDO_CAP_MISMATCH) {
+ /* TODO: handle it later */
+ DPM_DBG("CAP_MISMATCH\r\n");
+ pd_port->dpm_flags |= DPM_FLAGS_PARTNER_MISMATCH;
+ } else if (source_i < max_curr) {
+ DPM_DBG("src_i (%d) < max_i (%d)\r\n",
+ source_i, max_curr);
+ accept_request = false;
+ }
+ } else {
+ accept_request = false;
+ DPM_DBG("RequestPos Wrong (%d)\r\n", rdo_pos);
+ }
+
+ if (accept_request) {
+ pd_port->local_selected_cap = rdo_pos;
+
+ pd_port->request_i_op = op_curr;
+ pd_port->request_i_max = max_curr;
+
+ if (rdo & RDO_CAP_MISMATCH)
+ pd_port->request_i_new = op_curr;
+ else
+ pd_port->request_i_new = max_curr;
+
+ pd_port->request_v_new = source_vmin;
+ pd_put_dpm_notify_event(pd_port, rdo_pos);
+ } else {
+ /*
+ * "Contract Invalid" means that the previously
+ * negotiated Voltage and Current values
+ * are no longer included in the Sources new Capabilities.
+ * If the Sink fails to make a valid Request in this case
+ * then Power Delivery operation is no longer possible
+ * and Power Delivery mode is exited with a Hard Reset.
+ */
+
+ pd_port->local_selected_cap = 0;
+ pd_put_dpm_nak_event(pd_port, PD_DPM_NAK_REJECT_INVALID);
+ }
+}
+
+void pd_dpm_src_transition_power(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_enable_vbus_stable_detection(pd_port);
+
+ tcpci_source_vbus(
+ pd_port->tcpc_dev, TCP_VBUS_CTRL_REQUEST,
+ pd_port->request_v_new, pd_port->request_i_new);
+
+ if (pd_port->request_v == pd_port->request_v_new)
+ pd_put_vbus_stable_event(pd_port->tcpc_dev);
+#if CONFIG_USB_PD_VBUS_STABLE_TOUT
+ else
+ pd_enable_timer(pd_port, PD_TIMER_VBUS_STABLE);
+#endif /* CONFIG_USB_PD_VBUS_STABLE_TOUT */
+
+ pd_port->request_v = pd_port->request_v_new;
+ pd_port->request_i = pd_port->request_i_new;
+}
+
+void pd_dpm_src_inform_cable_vdo(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ const int size = sizeof(u32) * VDO_MAX_SIZE;
+
+ if (pd_event->pd_msg)
+ memcpy(pd_port->cable_vdos, pd_event->pd_msg->payload, size);
+
+ pd_put_dpm_ack_event(pd_port);
+}
+
+void pd_dpm_src_hard_reset(pd_port_t *pd_port)
+{
+ tcpci_source_vbus(
+ pd_port->tcpc_dev,
+ TCP_VBUS_CTRL_HRESET,
+ TCPC_VBUS_SOURCE_0V, 0);
+ pd_enable_vbus_safe0v_detection(pd_port);
+}
+
+/* ---- UFP : update_svid_data ---- */
+
+static inline bool dpm_ufp_update_svid_data_enter_mode(
+ pd_port_t *pd_port, u16 svid, u8 ops)
+{
+ svdm_svid_data_t *svid_data;
+
+ DPM_DBG("EnterMode (svid0x%04x, ops:%d)\r\n", svid, ops);
+
+ svid_data = dpm_get_svdm_svid_data(pd_port, svid);
+
+ if (!svid_data)
+ return false;
+
+ /* Only accept 1 mode active at the same time */
+ if (svid_data->active_mode)
+ return false;
+
+ if ((ops == 0) || (ops > svid_data->local_mode.mode_cnt))
+ return false;
+
+ svid_data->active_mode = ops;
+ pd_port->modal_operation = true;
+
+ svdm_ufp_request_enter_mode(pd_port, svid, ops);
+
+ tcpci_enter_mode(
+ pd_port->tcpc_dev,
+ svid, ops,
+ svid_data->local_mode.mode_vdo[ops]);
+ return true;
+}
+
+static inline bool dpm_ufp_update_svid_data_exit_mode(
+ pd_port_t *pd_port, u16 svid, u8 ops)
+{
+ u8 i;
+ bool modal_operation;
+ svdm_svid_data_t *svid_data;
+
+ DPM_DBG("ExitMode (svid0x%04x, mode:%d)\r\n", svid, ops);
+
+ svid_data = dpm_get_svdm_svid_data(pd_port, svid);
+
+ if (!svid_data)
+ return false;
+
+ if (svid_data->active_mode == 0)
+ return false;
+
+ if ((ops == 0) || (ops == svid_data->active_mode)) {
+ svid_data->active_mode = 0;
+
+ modal_operation = false;
+ for (i = 0; i < pd_port->svid_data_cnt; i++) {
+ svid_data = &pd_port->svid_data[i];
+
+ if (svid_data->active_mode) {
+ modal_operation = true;
+ break;
+ }
+ }
+
+ pd_port->modal_operation = modal_operation;
+
+ svdm_ufp_request_exit_mode(pd_port, svid, ops);
+ tcpci_exit_mode(pd_port->tcpc_dev, svid);
+ return true;
+ }
+
+ return false;
+}
+
+/* ---- UFP : Evaluate VDM Request ---- */
+
+static inline bool pd_dpm_ufp_reply_request(
+ pd_port_t *pd_port, pd_event_t *pd_event, bool ack)
+{
+ return vdm_put_dpm_event(
+ pd_port, ack ? PD_DPM_ACK : PD_DPM_NAK, pd_event->pd_msg);
+}
+
+static inline u32 dpm_vdm_get_svid(pd_event_t *pd_event)
+{
+ pd_msg_t *pd_msg = pd_event->pd_msg;
+ char buf[1024] = { 0 };
+
+ if (!pd_msg)
+ snprintf(buf, sizeof(buf), "the pd_msg is NULL\n");
+ return PD_VDO_VID(pd_msg->payload[0]);
+}
+
+void pd_dpm_ufp_request_id_info(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_ufp_reply_request(
+ pd_port, pd_event,
+ dpm_vdm_get_svid(pd_event) == USB_SID_PD);
+}
+
+void pd_dpm_ufp_request_svid_info(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ack = false;
+
+ if (pd_is_support_modal_operation(pd_port))
+ ack = (dpm_vdm_get_svid(pd_event) == USB_SID_PD);
+
+ pd_dpm_ufp_reply_request(pd_port, pd_event, ack);
+}
+
+void pd_dpm_ufp_request_mode_info(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ u16 svid = dpm_vdm_get_svid(pd_event);
+ bool ack = 0;
+
+ if (dpm_get_svdm_svid_data(pd_port, svid))
+ ack = 1;
+ pd_dpm_ufp_reply_request(pd_port, pd_event, ack);
+}
+
+void pd_dpm_ufp_request_enter_mode(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ack = false;
+ u16 svid;
+ u8 ops;
+ char buf[1024] = { 0 };
+
+ if (!pd_event->pd_msg)
+ snprintf(buf, sizeof(buf), "the pd_msg is NULL\n");
+ dpm_vdm_get_svid_ops(pd_event, &svid, &ops);
+ ack = dpm_ufp_update_svid_data_enter_mode(pd_port, svid, ops);
+
+ pd_dpm_ufp_reply_request(pd_port, pd_event, ack);
+}
+
+void pd_dpm_ufp_request_exit_mode(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ack;
+ u16 svid;
+ u8 ops;
+
+ dpm_vdm_get_svid_ops(pd_event, &svid, &ops);
+ ack = dpm_ufp_update_svid_data_exit_mode(pd_port, svid, ops);
+ pd_dpm_ufp_reply_request(pd_port, pd_event, ack);
+}
+
+/* ---- UFP : Response VDM Request ---- */
+
+int pd_dpm_ufp_response_id(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ return pd_reply_svdm_request(pd_port, pd_event,
+ CMDT_RSP_ACK, pd_port->id_vdo_nr, pd_port->id_vdos);
+}
+
+int pd_dpm_ufp_response_svids(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ svdm_svid_data_t *svid_data;
+ u16 svid_list[2];
+ u32 svids[VDO_MAX_DATA_SIZE];
+ u8 i = 0, j = 0, cnt = pd_port->svid_data_cnt;
+ char buf[1024] = { 0 };
+
+ if (pd_port->svid_data_cnt >= VDO_MAX_SVID_SIZE) {
+ snprintf(buf, sizeof(buf),
+ "the %d is over vdo max svid size\n",
+ pd_port->svid_data_cnt);
+ }
+
+ if (unlikely(cnt >= VDO_MAX_SVID_SIZE))
+ cnt = VDO_MAX_SVID_SIZE;
+
+ while (i < cnt) {
+ svid_data = &pd_port->svid_data[i++];
+ svid_list[0] = svid_data->svid;
+
+ if (i < cnt) {
+ svid_data = &pd_port->svid_data[i++];
+ svid_list[1] = svid_data->svid;
+ } else {
+ svid_list[1] = 0;
+ }
+ svids[j++] = VDO_SVID(svid_list[0], svid_list[1]);
+ }
+
+ if ((cnt % 2) == 0)
+ svids[j++] = VDO_SVID(0, 0);
+
+ return pd_reply_svdm_request(
+ pd_port, pd_event, CMDT_RSP_ACK, j, svids);
+}
+
+int pd_dpm_ufp_response_modes(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ svdm_svid_data_t *svid_data;
+ u16 svid = dpm_vdm_get_svid(pd_event);
+
+ svid_data = dpm_get_svdm_svid_data(pd_port, svid);
+ if (svid_data) {
+ return pd_reply_svdm_request(
+ pd_port, pd_event, CMDT_RSP_ACK,
+ svid_data->local_mode.mode_cnt,
+ svid_data->local_mode.mode_vdo);
+ } else {
+ PE_DBG("ERROR-4965\r\n");
+ return pd_reply_svdm_request_simply(
+ pd_port, pd_event, CMDT_RSP_NAK);
+ }
+}
+
+/* ---- DFP : update_svid_data ---- */
+
+static inline void dpm_dfp_update_svid_data_exist(
+ pd_port_t *pd_port, u16 svid)
+{
+ u8 k;
+ svdm_svid_data_t *svid_data;
+
+#ifdef CONFIG_USB_PD_KEEP_SVIDS
+ svdm_svid_list_t *list = &pd_port->remote_svid_list;
+
+ if (list->cnt < VDO_MAX_SVID_SIZE)
+ list->svids[list->cnt++] = svid;
+ else
+ DPM_DBG("ERR:SVIDCNT\r\n");
+#endif
+
+ for (k = 0; k < pd_port->svid_data_cnt; k++) {
+ svid_data = &pd_port->svid_data[k];
+
+ if (svid_data->svid == svid)
+ svid_data->exist = 1;
+ }
+}
+
+static inline void dpm_dfp_update_svid_data_modes(
+ pd_port_t *pd_port, u16 svid, u32 *mode_list, u8 count)
+{
+ u8 i;
+ svdm_svid_data_t *svid_data;
+
+ DPM_DBG("InformMode (0x%04x:%d): \r\n", svid, count);
+ for (i = 0; i < count; i++)
+ DPM_DBG("Mode[%d]: 0x%08x\r\n", i, mode_list[i]);
+
+ svid_data = dpm_get_svdm_svid_data(pd_port, svid);
+ if (!svid_data)
+ return;
+
+ svid_data->remote_mode.mode_cnt = count;
+
+ if (count != 0) {
+ memcpy(svid_data->remote_mode.mode_vdo,
+ mode_list, sizeof(u32) * count);
+ }
+}
+
+static inline void dpm_dfp_update_svid_enter_mode(
+ pd_port_t *pd_port, u16 svid, u8 ops)
+{
+ svdm_svid_data_t *svid_data;
+
+ DPM_DBG("EnterMode (svid0x%04x, mode:%d)\r\n", svid, ops);
+
+ svid_data = dpm_get_svdm_svid_data(pd_port, svid);
+ if (!svid_data)
+ return;
+
+ svid_data->active_mode = ops;
+ pd_port->modal_operation = true;
+
+ tcpci_enter_mode(
+ pd_port->tcpc_dev,
+ svid_data->svid, ops,
+ svid_data->remote_mode.mode_vdo[ops]);
+}
+
+static inline void dpm_dfp_update_svid_data_exit_mode(
+ pd_port_t *pd_port, u16 svid, u8 ops)
+{
+ u8 i;
+ bool modal_operation;
+ svdm_svid_data_t *svid_data;
+
+ DPM_DBG("ExitMode (svid0x%04x, mode:%d)\r\n", svid, ops);
+
+ svid_data = dpm_get_svdm_svid_data(pd_port, svid);
+ if (!svid_data)
+ return;
+
+ if ((ops == 0) || (ops == svid_data->active_mode)) {
+ svid_data->active_mode = 0;
+
+ modal_operation = false;
+ for (i = 0; i < pd_port->svid_data_cnt; i++) {
+ svid_data = &pd_port->svid_data[i];
+
+ if (svid_data->active_mode) {
+ modal_operation = true;
+ break;
+ }
+ }
+
+ pd_port->modal_operation = modal_operation;
+ tcpci_exit_mode(pd_port->tcpc_dev, svid);
+ }
+}
+
+/* ---- DFP : Inform VDM Result ---- */
+
+void pd_dpm_dfp_inform_id(pd_port_t *pd_port, pd_event_t *pd_event, bool ack)
+{
+#if DPM_DBG_ENABLE
+ pd_msg_t *pd_msg = pd_event->pd_msg;
+#endif /* DPM_DBG_ENABLE */
+
+ if (ack) {
+ DPM_DBG("InformID, 0x%02x, 0x%02x, 0x%02x, 0x%02x\r\n",
+ pd_msg->payload[0], pd_msg->payload[1],
+ pd_msg->payload[2], pd_msg->payload[3]);
+ }
+
+ svdm_dfp_inform_id(pd_port, pd_event, ack);
+ vdm_put_dpm_notified_event(pd_port);
+}
+
+static inline int dpm_dfp_consume_svids(
+ pd_port_t *pd_port, u32 *svid_list, u8 count)
+{
+ bool discover_again = true;
+
+ u8 i, j;
+ u16 svid[2];
+
+ DPM_DBG("InformSVID (%d): \r\n", count);
+
+ if (count < 6)
+ discover_again = false;
+
+ for (i = 0; i < count; i++) {
+ svid[0] = PD_VDO_SVID_SVID0(svid_list[i]);
+ svid[1] = PD_VDO_SVID_SVID1(svid_list[i]);
+
+ DPM_DBG("svid[%d]: 0x%04x 0x%04x\r\n", i, svid[0], svid[1]);
+
+ for (j = 0; j < 2; j++) {
+ if (svid[j] == 0) {
+ discover_again = false;
+ break;
+ }
+
+ dpm_dfp_update_svid_data_exist(pd_port, svid[j]);
+ }
+ }
+
+ if (discover_again) {
+ DPM_DBG("DiscoverSVID Again\r\n");
+ vdm_put_dpm_vdm_request_event(
+ pd_port, PD_DPM_VDM_REQUEST_DISCOVER_SVIDS);
+ return 1;
+ }
+
+ return 0;
+}
+
+void pd_dpm_dfp_inform_svids(pd_port_t *pd_port, pd_event_t *pd_event, bool ack)
+{
+ u8 count;
+ u32 *svid_list;
+ pd_msg_t *pd_msg = pd_event->pd_msg;
+
+ if (ack) {
+ svid_list = &pd_msg->payload[1];
+ count = (PD_HEADER_CNT(pd_msg->msg_hdr) - 1);
+
+ if (dpm_dfp_consume_svids(pd_port, svid_list, count))
+ return;
+ }
+
+ svdm_dfp_inform_svids(pd_port, ack);
+ vdm_put_dpm_notified_event(pd_port);
+}
+
+void pd_dpm_dfp_inform_modes(
+ pd_port_t *pd_port, pd_event_t *pd_event, bool ack)
+{
+ u8 count;
+ u16 svid = 0;
+ u16 expected_svid = pd_port->mode_svid;
+
+ pd_msg_t *pd_msg = pd_event->pd_msg;
+
+ if (ack) {
+ count = (PD_HEADER_CNT(pd_msg->msg_hdr));
+ svid = PD_VDO_VID(pd_msg->payload[VDO_INDEX_HDR]);
+
+ if (svid != expected_svid) {
+ ack = false;
+ DPM_DBG("Not expected SVID (0x%04x, 0x%04x)\r\n",
+ svid, expected_svid);
+ } else {
+ dpm_dfp_update_svid_data_modes(
+ pd_port, svid, &pd_msg->payload[1], count - 1);
+ }
+ }
+
+ svdm_dfp_inform_modes(pd_port, expected_svid, ack);
+ vdm_put_dpm_notified_event(pd_port);
+}
+
+void pd_dpm_dfp_inform_enter_mode(
+ pd_port_t *pd_port,
+ pd_event_t *pd_event, bool ack)
+{
+ u16 svid = 0;
+ u16 expected_svid = pd_port->mode_svid;
+ u8 ops = 0;
+
+ if (ack) {
+ dpm_vdm_get_svid_ops(pd_event, &svid, &ops);
+
+ /* TODO: check ops later ?! */
+ if (svid != expected_svid) {
+ ack = false;
+ DPM_DBG("Not expected SVID (0x%04x, 0x%04x)\r\n",
+ svid, expected_svid);
+ } else {
+ dpm_dfp_update_svid_enter_mode(pd_port, svid, ops);
+ }
+ }
+
+ svdm_dfp_inform_enter_mode(pd_port, expected_svid, ops, ack);
+ vdm_put_dpm_notified_event(pd_port);
+}
+
+void pd_dpm_dfp_inform_exit_mode(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ u16 svid = 0;
+ u16 expected_svid = pd_port->mode_svid;
+ u8 ops;
+
+ if (pd_event->event_type != PD_EVT_TIMER_MSG) {
+ dpm_vdm_get_svid_ops(pd_event, &svid, &ops);
+ } else {
+ svid = pd_port->mode_svid;
+ ops = pd_port->mode_obj_pos;
+ }
+
+ dpm_dfp_update_svid_data_exit_mode(pd_port, expected_svid, ops);
+
+ svdm_dfp_inform_exit_mode(pd_port, expected_svid, ops);
+ vdm_put_dpm_notified_event(pd_port);
+}
+
+void pd_dpm_dfp_inform_attention(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ u16 svid = 0;
+ u8 ops;
+
+ dpm_vdm_get_svid_ops(pd_event, &svid, &ops);
+ DPM_DBG("Attention (svid0x%04x, mode:%d)\r\n", svid, ops);
+
+ svdm_dfp_inform_attention(pd_port, svid, pd_event);
+ vdm_put_dpm_notified_event(pd_port);
+}
+
+void pd_dpm_dfp_inform_cable_vdo(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ const int size = sizeof(u32) * VDO_MAX_SIZE;
+
+ if (pd_event->pd_msg)
+ memcpy(pd_port->cable_vdos, pd_event->pd_msg->payload, size);
+
+ vdm_put_dpm_notified_event(pd_port);
+}
+
+/*
+ * DRP : Inform Source/Sink Cap
+ */
+
+void pd_dpm_dr_inform_sink_cap(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_msg_t *pd_msg = pd_event->pd_msg;
+ pd_port_power_caps *snk_cap = &pd_port->remote_snk_cap;
+ char buf[1024] = { 0 };
+
+ if (pd_event_msg_match(pd_event, PD_EVT_DATA_MSG, PD_DATA_SINK_CAP)) {
+ if (!pd_msg)
+ snprintf(buf, sizeof(buf), "the pd_msg is NULL\n");
+ snk_cap->nr = PD_HEADER_CNT(pd_msg->msg_hdr);
+ memcpy(snk_cap->pdos, pd_msg->payload,
+ sizeof(u32) * snk_cap->nr);
+
+ pd_port->dpm_flags &= ~DPM_FLAGS_CHECK_SINK_CAP;
+ } else {
+ if (pd_event_msg_match(
+ pd_event,
+ PD_EVT_CTRL_MSG,
+ PD_CTRL_REJECT))
+ pd_port->dpm_flags &= ~DPM_FLAGS_CHECK_SINK_CAP;
+
+ snk_cap->nr = 0;
+ snk_cap->pdos[0] = 0;
+ }
+
+ pd_dpm_update_pdos_flags(pd_port, snk_cap->pdos[0]);
+}
+
+void pd_dpm_dr_inform_source_cap(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_msg_t *pd_msg = pd_event->pd_msg;
+ pd_port_power_caps *src_cap = &pd_port->remote_src_cap;
+ char buf[1024] = { 0 };
+
+ if (pd_event_msg_match(pd_event, PD_EVT_DATA_MSG, PD_DATA_SOURCE_CAP)) {
+ if (!pd_msg)
+ snprintf(buf, sizeof(buf), "the pd_msg is NULL\n");
+ src_cap->nr = PD_HEADER_CNT(pd_msg->msg_hdr);
+ memcpy(src_cap->pdos, pd_msg->payload,
+ sizeof(u32) * src_cap->nr);
+
+ pd_port->dpm_flags &= ~DPM_FLAGS_CHECK_SOURCE_CAP;
+ } else {
+ if (pd_event_msg_match(
+ pd_event,
+ PD_EVT_CTRL_MSG, PD_CTRL_REJECT))
+ pd_port->dpm_flags &= ~DPM_FLAGS_CHECK_SOURCE_CAP;
+
+ src_cap->nr = 0;
+ src_cap->pdos[0] = 0;
+ }
+
+ pd_dpm_update_pdos_flags(pd_port, src_cap->pdos[0]);
+}
+
+/*
+ * DRP : Data Role Swap
+ */
+
+void pd_dpm_drs_evaluate_swap(pd_port_t *pd_port, u8 role)
+{
+ /* TODO : Check it later */
+ pd_put_dpm_ack_event(pd_port);
+}
+
+void pd_dpm_drs_change_role(pd_port_t *pd_port, u8 role)
+{
+ pd_set_data_role(pd_port, role);
+
+ /* pd_put_dpm_ack_event(pd_port); */
+ pd_port->dpm_ack_immediately = true;
+}
+
+/* Rules: */
+ /* External Sources -> EXS */
+ /* Provider/Consumers -> PC */
+ /* Consumers/Provider -> CP */
+
+ /* 1. PC (with EXS) shall always deny PR_SWAP from CP (without EXS) */
+
+ /* 2. PC (without EXS) shall always acppet PR_SWAP from CP (with EXS) */
+ /* unless the requester isn't able to provide PDOs. */
+
+void pd_dpm_prs_evaluate_swap(pd_port_t *pd_port, u8 role)
+{
+ int good_power;
+ bool accept = true;
+ bool sink, check_src, check_snk, check_ext;
+
+ check_src = (pd_port->dpm_caps & DPM_CAP_PR_SWAP_CHECK_GP_SRC) ? 1 : 0;
+ check_snk = (pd_port->dpm_caps & DPM_CAP_PR_SWAP_CHECK_GP_SNK) ? 1 : 0;
+ check_ext = (pd_port->dpm_flags & DPM_FLAGS_CHECK_EXT_POWER) ? 1 : 0;
+
+ if (check_src | check_snk | check_ext) {
+ sink = pd_port->power_role == PD_ROLE_SINK;
+ good_power = dpm_check_good_power(pd_port);
+
+ switch (good_power) {
+ case GOOD_PW_PARTNER:
+ if (sink && check_snk)
+ accept = false;
+ break;
+
+ case GOOD_PW_LOCAL:
+ if ((!sink) && (check_src || check_ext))
+ accept = false;
+ break;
+
+ case GOOD_PW_NONE:
+ accept = true;
+ break;
+
+ default:
+ accept = true;
+ break;
+ }
+ }
+
+ dpm_response_request(pd_port, accept);
+}
+
+void pd_dpm_prs_turn_off_power_sink(pd_port_t *pd_port)
+{
+ tcpci_sink_vbus(
+ pd_port->tcpc_dev,
+ TCP_VBUS_CTRL_PR_SWAP, TCPC_VBUS_SINK_0V, 0);
+}
+
+void pd_dpm_prs_enable_power_source(pd_port_t *pd_port, bool en)
+{
+ int vbus_level = en ? TCPC_VBUS_SOURCE_5V : TCPC_VBUS_SOURCE_0V;
+
+ tcpci_source_vbus(
+ pd_port->tcpc_dev,
+ TCP_VBUS_CTRL_PR_SWAP, vbus_level, -1);
+
+ if (en)
+ pd_enable_vbus_valid_detection(pd_port, en);
+ else
+ pd_enable_vbus_safe0v_detection(pd_port);
+}
+
+void pd_dpm_prs_change_role(pd_port_t *pd_port, u8 role)
+{
+ pd_set_power_role(pd_port, role);
+ pd_put_dpm_ack_event(pd_port);
+}
+
+/*
+ * DRP : Vconn Swap
+ */
+
+void pd_dpm_vcs_evaluate_swap(pd_port_t *pd_port)
+{
+ bool accept = true;
+
+ dpm_response_request(pd_port, accept);
+}
+
+void pd_dpm_vcs_enable_vconn(pd_port_t *pd_port, bool en)
+{
+ pd_dpm_enable_vconn(pd_port, en);
+
+ /* TODO: If we can't enable vconn immediately, */
+ /* then after vconn_on, Vconn Controller */
+ /*should pd_put_dpm_ack_event() */
+
+ pd_port->dpm_ack_immediately = true;
+}
+
+/*
+ * PE : Notify DPM
+ */
+
+static inline int pd_dpm_ready_get_sink_cap(pd_port_t *pd_port)
+{
+ if (!(pd_port->dpm_flags & DPM_FLAGS_CHECK_SINK_CAP))
+ return 0;
+
+ if (pd_port->get_snk_cap_count >= PD_GET_SNK_CAP_RETRIES)
+ return 0;
+
+ pd_port->get_snk_cap_count++;
+ pd_put_dpm_pd_request_event(
+ pd_port, PD_DPM_PD_REQUEST_GET_SINK_CAP);
+
+ return 1;
+}
+
+static inline int pd_dpm_ready_get_source_cap(pd_port_t *pd_port)
+{
+ if (!(pd_port->dpm_flags & DPM_FLAGS_CHECK_SOURCE_CAP))
+ return 0;
+
+ if (pd_port->get_src_cap_count >= PD_GET_SRC_CAP_RETRIES)
+ return 0;
+
+ pd_port->get_src_cap_count++;
+ pd_put_dpm_pd_request_event(
+ pd_port, PD_DPM_PD_REQUEST_GET_SOURCE_CAP);
+
+ return 1;
+}
+
+static inline int pd_dpm_ready_attempt_get_extbit(pd_port_t *pd_port)
+{
+ if (pd_port->remote_src_cap.nr >= 1)
+ return 0;
+
+ if (pd_port->remote_snk_cap.nr >= 1)
+ return 0;
+
+ if (!(pd_port->dpm_flags & DPM_FLAGS_CHECK_EXT_POWER))
+ return 0;
+
+ if (pd_port->get_snk_cap_count >= PD_GET_SNK_CAP_RETRIES)
+ return 0;
+
+ pd_port->get_snk_cap_count++;
+ pd_put_dpm_pd_request_event(
+ pd_port, PD_DPM_PD_REQUEST_GET_SINK_CAP);
+ return 1;
+}
+
+static inline int pd_dpm_notify_pe_src_ready(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ return pd_dpm_ready_attempt_get_extbit(pd_port);
+}
+
+static inline int pd_dpm_notify_pe_dfp_ready(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+#ifdef CONFIG_USB_PD_DFP_READY_DISCOVER_ID
+ if (pd_port->dpm_flags & DPM_FLAGS_CHECK_CABLE_ID_DFP) {
+ if (pd_is_auto_discover_cable_id(pd_port)) {
+ if (!pd_port->vconn_source) {
+ pd_port->vconn_return = true;
+ pd_put_dpm_pd_request_event(
+ pd_port,
+ PD_DPM_PD_REQUEST_VCONN_SWAP);
+ return 1;
+ }
+
+ pd_restart_timer(pd_port, PD_TIMER_DISCOVER_ID);
+ return 1;
+ }
+ }
+
+ if (pd_port->vconn_return) {
+ DPM_DBG("VconnReturn\r\n");
+ pd_port->vconn_return = false;
+ if (pd_port->vconn_source) {
+ pd_put_dpm_pd_request_event(
+ pd_port,
+ PD_DPM_PD_REQUEST_VCONN_SWAP);
+ return 1;
+ }
+ }
+#endif /* CONFIG_USB_PD_DFP_READY_DISCOVER_ID */
+
+#ifdef CONFIG_USB_PD_ATTEMP_DISCOVER_ID
+ if (pd_port->dpm_flags & DPM_FLAGS_CHECK_UFP_ID) {
+ pd_port->dpm_flags &= ~DPM_FLAGS_CHECK_UFP_ID;
+ if (vdm_put_dpm_vdm_request_event(
+ pd_port, PD_DPM_VDM_REQUEST_DISCOVER_ID))
+ return 1;
+ }
+#endif /* CONFIG_USB_PD_ATTEMP_DISCOVER_ID */
+
+#ifdef CONFIG_USB_PD_ATTEMP_DISCOVER_SVID
+ if (pd_port->dpm_flags & DPM_FLAGS_CHECK_UFP_SVID) {
+ pd_port->dpm_flags &= ~DPM_FLAGS_CHECK_UFP_SVID;
+ if (vdm_put_dpm_vdm_request_event(
+ pd_port, PD_DPM_VDM_REQUEST_DISCOVER_SVIDS))
+ return 1;
+ }
+#endif /* CONFIG_USB_PD_ATTEMP_DISCOVER_SVID */
+
+#ifdef CONFIG_USB_PD_MODE_OPERATION
+ if (svdm_notify_pe_ready(pd_port, pd_event))
+ return 1;
+#endif /* CONFIG_USB_PD_MODE_OPERATION */
+
+ return 0;
+}
+
+int pd_dpm_notify_pe_startup(pd_port_t *pd_port)
+{
+ u32 caps, flags = 0;
+
+ caps = DPM_CAP_EXTRACT_PR_CHECK(pd_port->dpm_caps);
+ if (caps != DPM_CAP_PR_CHECK_DISABLE)
+ flags |= DPM_FLAGS_CHECK_PR_ROLE;
+
+ caps = DPM_CAP_EXTRACT_DR_CHECK(pd_port->dpm_caps);
+ if (caps != DPM_CAP_DR_CHECK_DISABLE)
+ flags |= DPM_FLAGS_CHECK_DR_ROLE;
+
+ if (pd_port->dpm_caps & DPM_CAP_PR_SWAP_CHECK_GP_SRC)
+ flags |= DPM_FLAGS_CHECK_EXT_POWER;
+
+ if (pd_port->dpm_caps & DPM_CAP_PR_SWAP_CHECK_GP_SNK)
+ flags |= DPM_FLAGS_CHECK_EXT_POWER;
+
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_EXT_POWER)
+ flags |= DPM_FLAGS_CHECK_EXT_POWER;
+
+ if (pd_port->dpm_caps & DPM_CAP_ATTEMP_DISCOVER_CABLE)
+ flags |= DPM_FLAGS_CHECK_CABLE_ID;
+
+ if (pd_port->dpm_caps & DPM_CAP_ATTEMP_DISCOVER_CABLE_DFP)
+ flags |= DPM_FLAGS_CHECK_CABLE_ID_DFP;
+
+ if (pd_port->dpm_caps & DPM_CAP_ATTEMP_DISCOVER_ID)
+ flags |= DPM_FLAGS_CHECK_UFP_ID;
+
+ pd_port->dpm_flags = flags;
+ pd_port->dpm_dfp_retry_cnt = 2;
+
+ svdm_notify_pe_startup(pd_port);
+ return 0;
+}
+
+int pd_dpm_notify_pe_hardreset(pd_port_t *pd_port)
+{
+ u32 flags = 0;
+
+ if (pd_port->dpm_dfp_retry_cnt) {
+ pd_port->dpm_dfp_retry_cnt--;
+ pd_port->dpm_flags |= flags;
+ svdm_notify_pe_startup(pd_port);
+ }
+
+ return 0;
+}
+
+int pd_dpm_notify_pe_ready(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ int ret = 0;
+
+ if (pd_dpm_ready_get_source_cap(pd_port))
+ return 1;
+
+ if (pd_dpm_ready_get_sink_cap(pd_port))
+ return 1;
+
+ if (pd_port->power_role == PD_ROLE_SOURCE)
+ ret = pd_dpm_notify_pe_src_ready(pd_port, pd_event);
+
+ if (ret != 0)
+ return ret;
+
+ if (pd_port->data_role == PD_ROLE_DFP)
+ ret = pd_dpm_notify_pe_dfp_ready(pd_port, pd_event);
+
+ if (ret != 0)
+ return ret;
+
+ if (!pd_port->pe_ready) {
+ pd_port->pe_ready = true;
+ pd_update_connect_state(pd_port, PD_CONNECT_PE_READY);
+ }
+
+ return 0;
+}
diff --git a/drivers/usb/pd/richtek/pd_dpm_prv.h b/drivers/usb/pd/richtek/pd_dpm_prv.h
new file mode 100644
index 000000000000..67976c4bcd42
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_dpm_prv.h
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PD_DPM_PRV_H_INCLUDED
+#define PD_DPM_PRV_H_INCLUDED
+
+#include <linux/hisi/usb/pd/richtek/rt1711h.h>
+
+typedef struct __eval_snk_request_result {
+ int src_sel;
+ int snk_sel;
+} eval_snk_request_result_t;
+
+#define SVID_DATA_LOCAL_MODE(svid_data, n) \
+ ((svid_data)->local_mode.mode_vdo[n])
+
+#define SVID_DATA_REMOTE_MODE(svid_data, n) \
+ ((svid_data)->remote_mode.mode_vdo[n])
+
+#define SVID_DATA_DFP_GET_ACTIVE_MODE(svid_data)\
+ SVID_DATA_REMOTE_MODE(svid_data, svid_data->active_mode - 1)
+
+#define SVID_DATA_UFP_GET_ACTIVE_MODE(svid_data)\
+ SVID_DATA_LOCAL_MODE(svid_data, svid_data->active_mode - 1)
+
+bool eval_snk_cap_request(
+ const pd_port_power_caps *snk_caps,
+ const pd_port_power_caps *src_caps,
+ int strategy,
+ eval_snk_request_result_t *result);
+
+enum pd_ufp_u_state {
+ DP_UFP_U_NONE = 0,
+ DP_UFP_U_STARTUP,
+ DP_UFP_U_WAIT,
+ DP_UFP_U_OPERATION,
+ DP_UFP_U_STATE_NR,
+
+ DP_UFP_U_ERR = 0X10,
+
+ DP_DFP_U_ERR_DP_CONNECTED,
+};
+
+typedef struct __pd_mode_prop {
+ const char *name;
+ u32 svid;
+ void (*request_enter_mode)(pd_port_t *pd_port);
+ void (*request_exit_mode)(pd_port_t *pd_port);
+ bool (*dfp_inform_id)(
+ pd_port_t *pd_port,
+ pd_event_t *pd_event, bool ack);
+ bool (*dfp_inform_svids)(pd_port_t *pd_port, bool ack);
+ bool (*dfp_inform_modes)(pd_port_t *pd_port, bool ack);
+ bool (*dfp_inform_enter_mode)(pd_port_t *pd_port, bool ack);
+ bool (*dfp_inform_exit_mode)(pd_port_t *pd_port, u16 svid);
+ bool (*dfp_inform_attention)(pd_port_t *pd_port, pd_event_t *pd_event);
+ bool (*notify_pe_dfp_ready)(pd_port_t *pd_port, pd_event_t *pd_event);
+ void (*reset_state)(pd_port_t *pd_port);
+} pd_mode_prop_t;
+
+typedef struct __svdm_svid_ops {
+ const char *name;
+ u16 svid;
+
+ bool (*dfp_inform_id)(
+ pd_port_t *pd_port,
+ svdm_svid_data_t *svid_data,
+ pd_event_t *pd_event, bool ack);
+ bool (*dfp_inform_svids)(
+ pd_port_t *pd_port,
+ svdm_svid_data_t *svid_data, bool ack);
+ bool (*dfp_inform_modes)(
+ pd_port_t *pd_port,
+ svdm_svid_data_t *svid_data, bool ack);
+
+ bool (*dfp_inform_enter_mode)(
+ pd_port_t *pd_port,
+ svdm_svid_data_t *svid_data, u8 ops, bool ack);
+ bool (*dfp_inform_exit_mode)(
+ pd_port_t *pd_port,
+ svdm_svid_data_t *svid_data, u8 ops);
+
+ bool (*dfp_inform_attention)(
+ pd_port_t *pd_port,
+ svdm_svid_data_t *svid_data, pd_event_t *pd_event);
+
+ void (*ufp_request_enter_mode)(
+ pd_port_t *pd_port,
+ svdm_svid_data_t *svid_data, u8 ops);
+ void (*ufp_request_exit_mode)(
+ pd_port_t *pd_port,
+ svdm_svid_data_t *svid_data, u8 ops);
+
+ bool (*notify_pe_startup)(
+ pd_port_t *pd_port,
+ svdm_svid_data_t *svid_data);
+ int (*notify_pe_ready)(
+ pd_port_t *pd_port,
+ svdm_svid_data_t *svid_data, pd_event_t *pd_event);
+
+ bool (*reset_state)(
+ pd_port_t *pd_port,
+ svdm_svid_data_t *svid_data);
+} svdm_svid_ops_t;
+
+static inline svdm_svid_data_t *
+ dpm_get_svdm_svid_data(pd_port_t *pd_port, u16 svid)
+{
+ u8 i;
+ svdm_svid_data_t *svid_data;
+
+ if (!(pd_port->id_vdos[0] & PD_IDH_MODAL_SUPPORT))
+ return NULL;
+
+ for (i = 0; i < pd_port->svid_data_cnt; i++) {
+ svid_data = &pd_port->svid_data[i];
+ if (svid_data->svid == svid)
+ return svid_data;
+ }
+
+ return NULL;
+}
+
+static inline void dpm_vdm_get_svid_ops(
+ pd_event_t *pd_event, u16 *svid, u8 *ops)
+{
+ u32 vdm_hdr;
+ char buf[1024] = { 0 };
+
+ if (!pd_event->pd_msg)
+ snprintf(buf, sizeof(buf), "the pd msg is NULL\n");
+ vdm_hdr = pd_event->pd_msg->payload[0];
+ if (svid)
+ *svid = PD_VDO_VID(vdm_hdr);
+ if (ops)
+ *ops = PD_VDO_OPOS(vdm_hdr);
+}
+
+static inline bool dpm_register_svdm_ops(
+ pd_port_t *pd_port, const svdm_svid_ops_t *ops)
+{
+ svdm_svid_data_t *svid_data =
+ dpm_get_svdm_svid_data(pd_port, ops->svid);
+ if (!svid_data)
+ return false;
+
+ svid_data->ops = ops;
+ return true;
+}
+
+static inline bool svdm_notify_pe_startup(pd_port_t *pd_port)
+{
+ int i;
+ svdm_svid_data_t *svid_data;
+
+ for (i = 0; i < pd_port->svid_data_cnt; i++) {
+ svid_data = &pd_port->svid_data[i];
+ if (svid_data->ops && svid_data->ops->notify_pe_startup)
+ svid_data->ops->notify_pe_startup(pd_port, svid_data);
+ }
+
+ return true;
+}
+
+static inline int svdm_notify_pe_ready(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ int i, ret;
+ svdm_svid_data_t *svid_data;
+
+ for (i = 0; i < pd_port->svid_data_cnt; i++) {
+ svid_data = &pd_port->svid_data[i];
+ if (svid_data->ops && svid_data->ops->notify_pe_ready) {
+ ret = svid_data->ops->notify_pe_ready(
+ pd_port, svid_data, pd_event);
+
+ if (ret != 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static inline bool svdm_reset_state(pd_port_t *pd_port)
+{
+ int i;
+ svdm_svid_data_t *svid_data;
+
+ for (i = 0; i < pd_port->svid_data_cnt; i++) {
+ svid_data = &pd_port->svid_data[i];
+ if (svid_data->ops && svid_data->ops->reset_state)
+ svid_data->ops->reset_state(pd_port, svid_data);
+ }
+
+ return true;
+}
+
+static inline bool svdm_dfp_inform_id(
+ pd_port_t *pd_port, pd_event_t *pd_event, bool ack)
+{
+ int i;
+ svdm_svid_data_t *svid_data;
+
+ for (i = 0; i < pd_port->svid_data_cnt; i++) {
+ svid_data = &pd_port->svid_data[i];
+ if (svid_data->ops && svid_data->ops->dfp_inform_id)
+ svid_data->ops->dfp_inform_id(
+ pd_port, svid_data, pd_event, ack);
+ }
+
+ return true;
+}
+
+static inline bool svdm_dfp_inform_svids(pd_port_t *pd_port, bool ack)
+{
+ int i;
+ svdm_svid_data_t *svid_data;
+
+ for (i = 0; i < pd_port->svid_data_cnt; i++) {
+ svid_data = &pd_port->svid_data[i];
+ if (svid_data->ops && svid_data->ops->dfp_inform_svids)
+ svid_data->ops->dfp_inform_svids(
+ pd_port, svid_data, ack);
+ }
+
+ return true;
+}
+
+static inline bool svdm_dfp_inform_modes(
+ pd_port_t *pd_port, u16 svid, bool ack)
+{
+ svdm_svid_data_t *svid_data;
+
+ svid_data = dpm_get_svdm_svid_data(pd_port, svid);
+ if (!svid_data)
+ return false;
+
+ if (svid_data->ops && svid_data->ops->dfp_inform_modes)
+ svid_data->ops->dfp_inform_modes(pd_port, svid_data, ack);
+
+ return true;
+}
+
+static inline bool svdm_dfp_inform_enter_mode(
+ pd_port_t *pd_port, u16 svid, u8 ops, bool ack)
+{
+ svdm_svid_data_t *svid_data;
+
+ svid_data = dpm_get_svdm_svid_data(pd_port, svid);
+ if (!svid_data)
+ return false;
+
+ if (svid_data->ops && svid_data->ops->dfp_inform_enter_mode)
+ svid_data->ops->dfp_inform_enter_mode(
+ pd_port, svid_data, ops, ack);
+
+ return true;
+}
+
+static inline bool svdm_dfp_inform_exit_mode(
+ pd_port_t *pd_port, u16 svid, u8 ops)
+{
+ svdm_svid_data_t *svid_data;
+
+ svid_data = dpm_get_svdm_svid_data(pd_port, svid);
+ if (!svid_data)
+ return false;
+
+ if (svid_data->ops && svid_data->ops->dfp_inform_exit_mode)
+ svid_data->ops->dfp_inform_exit_mode(pd_port, svid_data, ops);
+
+ return true;
+}
+
+static inline bool svdm_dfp_inform_attention(
+ pd_port_t *pd_port, u16 svid, pd_event_t *pd_event)
+{
+ svdm_svid_data_t *svid_data;
+
+ svid_data = dpm_get_svdm_svid_data(pd_port, svid);
+ if (!svid_data)
+ return false;
+
+ if (svid_data->ops && svid_data->ops->dfp_inform_attention)
+ svid_data->ops->dfp_inform_attention(
+ pd_port, svid_data, pd_event);
+
+ return true;
+}
+
+static inline bool svdm_ufp_request_enter_mode(
+ pd_port_t *pd_port, u16 svid, u8 ops)
+{
+ svdm_svid_data_t *svid_data;
+
+ svid_data = dpm_get_svdm_svid_data(pd_port, svid);
+ if (!svid_data)
+ return false;
+
+ if (svid_data->ops && svid_data->ops->ufp_request_enter_mode)
+ svid_data->ops->ufp_request_enter_mode(pd_port, svid_data, ops);
+
+ return true;
+}
+
+static inline bool svdm_ufp_request_exit_mode(
+ pd_port_t *pd_port, u16 svid, u8 ops)
+{
+ svdm_svid_data_t *svid_data;
+
+ svid_data = dpm_get_svdm_svid_data(pd_port, svid);
+ if (!svid_data)
+ return false;
+
+ if (svid_data->ops && svid_data->ops->ufp_request_exit_mode)
+ svid_data->ops->ufp_request_exit_mode(pd_port, svid_data, ops);
+
+ return true;
+}
+
+#endif /* PD_DPM_PRV_H_INCLUDED */
diff --git a/drivers/usb/pd/richtek/pd_policy_engine.c b/drivers/usb/pd/richtek/pd_policy_engine.c
new file mode 100644
index 000000000000..ceded2cbd665
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_policy_engine.c
@@ -0,0 +1,782 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Policy Engine Driver
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_process_evt.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+#include <linux/hisi/usb/pd/richtek/rt1711h.h>
+
+/* ---- Policy Engine State ---- */
+
+#if PE_STATE_FULL_NAME
+
+static const char *const pe_state_name[] = {
+ "PE_SRC_STARTUP",
+ "PE_SRC_DISCOVERY",
+ "PE_SRC_SEND_CAPABILITIES",
+ "PE_SRC_NEGOTIATE_CAPABILITIES",
+ "PE_SRC_TRANSITION_SUPPLY",
+ "PE_SRC_TRANSITION_SUPPLY2",
+ "PE_SRC_READY",
+ "PE_SRC_DISABLED",
+ "PE_SRC_CAPABILITY_RESPONSE",
+ "PE_SRC_HARD_RESET",
+ "PE_SRC_HARD_RESET_RECEIVED",
+ "PE_SRC_TRANSITION_TO_DEFAULT",
+ "PE_SRC_GIVE_SOURCE_CAP",
+ "PE_SRC_GET_SINK_CAP",
+ "PE_SRC_WAIT_NEW_CAPABILITIES",
+
+ "PE_SRC_SEND_SOFT_RESET",
+ "PE_SRC_SOFT_RESET",
+ "PE_SRC_PING",
+
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+ "PE_SRC_VDM_IDENTITY_REQUEST",
+ "PE_SRC_VDM_IDENTITY_ACKED",
+ "PE_SRC_VDM_IDENTITY_NAKED",
+#endif /* CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID */
+
+ "PE_SNK_STARTUP",
+ "PE_SNK_DISCOVERY",
+ "PE_SNK_WAIT_FOR_CAPABILITIES",
+ "PE_SNK_EVALUATE_CAPABILITY",
+ "PE_SNK_SELECT_CAPABILITY",
+ "PE_SNK_TRANSITION_SINK",
+ "PE_SNK_READY",
+ "PE_SNK_HARD_RESET",
+ "PE_SNK_TRANSITION_TO_DEFAULT",
+ "PE_SNK_GIVE_SINK_CAP",
+ "PE_SNK_GET_SOURCE_CAP",
+
+ "PE_SNK_SEND_SOFT_RESET",
+ "PE_SNK_SOFT_RESET",
+
+ "PE_DRS_DFP_UFP_EVALUATE_DR_SWAP",
+ "PE_DRS_DFP_UFP_ACCEPT_DR_SWAP",
+ "PE_DRS_DFP_UFP_CHANGE_TO_UFP",
+ "PE_DRS_DFP_UFP_SEND_DR_SWAP",
+ "PE_DRS_DFP_UFP_REJECT_DR_SWAP",
+
+ "PE_DRS_UFP_DFP_EVALUATE_DR_SWAP",
+ "PE_DRS_UFP_DFP_ACCEPT_DR_SWAP",
+ "PE_DRS_UFP_DFP_CHANGE_TO_DFP",
+ "PE_DRS_UFP_DFP_SEND_SWAP",
+ "PE_DRS_UFP_DFP_REJECT_DR_SWAP",
+
+ "PE_PRS_SRC_SNK_EVALUATE_PR_SWAP",
+ "PE_PRS_SRC_SNK_ACCEPT_PR_SWAP",
+ "PE_PRS_SRC_SNK_TRANSITION_TO_OFF",
+ "PE_PRS_SRC_SNK_ASSERT_RD",
+ "PE_PRS_SRC_SNK_WAIT_SOURCE_ON",
+ "PE_PRS_SRC_SNK_SEND_SWAP",
+ "PE_PRS_SRC_SNK_REJECT_PR_SWAP",
+
+ "PE_PRS_SNK_SRC_EVALUATE_PR_SWAP",
+ "PE_PRS_SNK_SRC_ACCEPT_PR_SWAP",
+ "PE_PRS_SNK_SRC_TRANSITION_TO_OFF",
+ "PE_PRS_SNK_SRC_ASSERT_RP",
+ "PE_PRS_SNK_SRC_SOURCE_ON",
+ "PE_PRS_SNK_SRC_SEND_PR_SWAP",
+ "PE_PRS_SNK_SRC_REJECT_SWAP",
+
+ "PE_DR_SRC_GET_SOURCE_CAP",
+
+ "PE_DR_SRC_GIVE_SINK_CAP",
+
+ "PE_DR_SNK_GET_SINK_CAP",
+
+ "PE_DR_SNK_GIVE_SOURCE_CAP",
+
+ "PE_VCS_SEND_SWAP",
+ "PE_VCS_EVALUATE_SWAP",
+ "PE_VCS_ACCEPT_SWAP",
+ "PE_VCS_REJECT_SWAP",
+ "PE_VCS_WAIT_FOR_VCONN",
+ "PE_VCS_TURN_OFF_VCONN",
+ "PE_VCS_TURN_ON_VCONN",
+ "PE_VCS_SEND_PS_RDY",
+
+ "PE_UFP_VDM_GET_IDENTITY",
+ "PE_UFP_VDM_SEND_IDENTITY",
+ "PE_UFP_VDM_GET_IDENTITY_NAK",
+
+ "PE_UFP_VDM_GET_SVIDS",
+ "PE_UFP_VDM_SEND_SVIDS",
+ "PE_UFP_VDM_GET_SVIDS_NAK",
+
+ "PE_UFP_VDM_GET_MODES",
+ "PE_UFP_VDM_SEND_MODES",
+ "PE_UFP_VDM_GET_MODES_NAK",
+
+ "PE_UFP_VDM_EVALUATE_MODE_ENTRY",
+ "PE_UFP_VDM_MODE_ENTRY_ACK",
+ "PE_UFP_VDM_MODE_ENTRY_NAK",
+
+ "PE_UFP_VDM_MODE_EXIT",
+ "PE_UFP_VDM_MODE_EXIT_ACK",
+ "PE_UFP_VDM_MODE_EXIT_NAK",
+
+ "PE_UFP_VDM_ATTENTION_REQUEST",
+
+ "PE_DFP_UFP_VDM_IDENTITY_REQUEST",
+ "PE_DFP_UFP_VDM_IDENTITY_ACKED",
+ "PE_DFP_UFP_VDM_IDENTITY_NAKED",
+
+ "PE_DFP_CBL_VDM_IDENTITY_REQUEST",
+ "PE_DFP_CBL_VDM_IDENTITY_ACKED",
+ "PE_DFP_CBL_VDM_IDENTITY_NAKED",
+
+ "PE_DFP_VDM_SVIDS_REQUEST",
+ "PE_DFP_VDM_SVIDS_ACKED",
+ "PE_DFP_VDM_SVIDS_NAKED",
+
+ "PE_DFP_VDM_MODES_REQUEST",
+ "PE_DFP_VDM_MODES_ACKED",
+ "PE_DFP_VDM_MODES_NAKED",
+
+ "PE_DFP_VDM_MODE_ENTRY_REQUEST",
+ "PE_DFP_VDM_MODE_ENTRY_ACKED",
+ "PE_DFP_VDM_MODE_ENTRY_NAKED",
+
+ "PE_DFP_VDM_MODE_EXIT_REQUEST",
+ "PE_DFP_VDM_MODE_EXIT_ACKED",
+
+ "PE_DFP_VDM_ATTENTION_REQUEST",
+
+#ifdef CONFIG_USB_PD_CUSTOM_DBGACC
+ "PE_DBG_READY",
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
+
+ "PE_BIST_TEST_DATA",
+ "PE_BIST_CARRIER_MODE_2",
+
+ "PE_IDLE1",
+ "PE_IDLE2",
+
+ "PE_VIRT_HARD_RESET",
+ "PE_VIRT_READY",
+};
+#else
+
+static const char *const pe_state_name[] = {
+ "SRC_START",
+ "SRC_DISCOVERY",
+ "SRC_SEND_CAP",
+ "SRC_NEG_CAP",
+ "SRC_TRANS_SUPPLY",
+ "SRC_TRANS_SUPPLY2",
+ "SRC_READY",
+ "SRC_DISABLED",
+ "SRC_CAP_RESP",
+ "SRC_HRESET",
+ "SRC_HRESET_RECV",
+ "SRC_TRANS_DFT",
+ "SRC_GIVE_CAP",
+ "SRC_GET_CAP",
+ "SRC_WAIT_CAP",
+
+ "SRC_SEND_SRESET",
+ "SRC_SRESET",
+ "SRC_PING",
+
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+ "SRC_VDM_ID_REQ",
+ "SRC_VDM_ID_ACK",
+ "SRC_VDM_ID_NAK",
+#endif /* CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID */
+
+ "SNK_START",
+ "SNK_DISCOVERY",
+ "SNK_WAIT_CAP",
+ "SNK_EVA_CAP",
+ "SNK_SEL_CAP",
+ "SNK_TRANS_SINK",
+ "SNK_READY",
+ "SNK_HRESET",
+ "SNK_TRANS_DFT",
+ "SNK_GIVE_CAP",
+ "SNK_GET_CAP",
+
+ "SNK_SEND_SRESET",
+ "SNK_SRESET",
+
+ "D_DFP_EVALUATE",
+ "D_DFP_ACCEPT",
+ "D_DFP_CHANGE",
+ "D_DFP_SEND",
+ "D_DFP_REJECT",
+
+ "D_UFP_EVALUATE",
+ "D_UFP_ACCEPT",
+ "D_UFP_CHANGE",
+ "D_UFP_SEND",
+ "D_UFP_REJECT",
+
+ "P_SRC_EVALUATE",
+ "P_SRC_ACCEPT",
+ "P_SRC_TRANS_OFF",
+ "P_SRC_ASSERT",
+ "P_SRC_WAIT_ON",
+ "P_SRC_SEND",
+ "P_SRC_REJECT",
+
+ "P_SNK_EVALUATE",
+ "P_SNK_ACCEPT",
+ "P_SNK_TRANS_OFF",
+ "P_SNK_ASSERT",
+ "P_SNK_SOURCE_ON",
+ "P_SNK_SEND",
+ "P_SNK_REJECT",
+
+ "DR_SRC_GET_CAP", /* get source cap */
+ "DR_SRC_GIVE_CAP", /* give sink cap */
+ "DR_SNK_GET_CAP", /* get sink cap */
+ "DR_SNK_GIVE_CAP", /* give source cap */
+
+ "V_SEND",
+ "V_EVALUATE",
+ "V_ACCEPT",
+ "V_REJECT",
+ "V_WAIT_VCONN",
+ "V_TURN_OFF",
+ "V_TURN_ON",
+ "V_PS_RDY",
+
+ "U_GET_ID",
+ "U_SEND_ID",
+ "U_GET_ID_N",
+
+ "U_GET_SVID",
+ "U_SEND_SVID",
+ "U_GET_SVID_N",
+
+ "U_GET_MODE",
+ "U_SEND_MODE",
+ "U_GET_MODE_N",
+
+ "U_EVA_MODE",
+ "U_MODE_EN_A",
+ "U_MODE_EN_N",
+
+ "U_MODE_EX",
+ "U_MODE_EX_A",
+ "U_MODE_EX_N",
+
+ "U_ATTENTION",
+
+ "D_UID_REQ",
+ "D_UID_A",
+ "D_UID_N",
+
+ "D_CID_REQ",
+ "D_CID_ACK",
+ "D_CID_NAK",
+
+ "D_SVID_REQ",
+ "D_SVID_ACK",
+ "D_SVID_NAK",
+
+ "D_MODE_REQ",
+ "D_MODE_ACK",
+ "D_MODE_NAK",
+
+ "D_MODE_EN_REQ",
+ "D_MODE_EN_ACK",
+ "D_MODE_EN_NAK",
+
+ "D_MODE_EX_REQ",
+ "D_MODE_EX_ACK",
+
+ "D_ATTENTION",
+
+#ifdef CONFIG_USB_PD_CUSTOM_DBGACC
+ "DBG_READY",
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
+
+ "ERR_RECOVERY",
+
+ "BIST_TD",
+ "BIST_C2",
+
+ "IDLE1",
+ "IDLE2",
+
+ "VIRT_HARD_RESET",
+ "VIRT_READY",
+};
+
+#endif
+
+typedef void (*pe_state_action_fcn_t)
+ (pd_port_t *pd_port, pd_event_t *pd_event);
+
+typedef struct __pe_state_actions {
+ const pe_state_action_fcn_t entry_action;
+ /* const pd_pe_state_action_fcn_t exit_action; */
+} pe_state_actions_t;
+
+#define PE_STATE_ACTIONS(state) { .entry_action = state##_entry, }
+
+/*
+ * Policy Engine General State Activity
+ */
+
+/* extern int rt1711_set_bist_carrier_mode( */
+/* struct tcpc_device *tcpc_dev, uint8_t pattern); */
+static void pe_idle_reset_data(pd_port_t *pd_port)
+{
+ pd_reset_pe_timer(pd_port);
+ pd_reset_svid_data(pd_port);
+
+ pd_port->pd_prev_connected = false;
+ pd_port->state_machine = PE_STATE_MACHINE_IDLE;
+
+ switch (pd_port->pe_state_curr) {
+ case PE_BIST_TEST_DATA:
+ pd_enable_bist_test_mode(pd_port, false);
+ break;
+
+ case PE_BIST_CARRIER_MODE_2:
+ pd_disable_bist_mode2(pd_port);
+ break;
+ }
+
+ pd_unlock_msg_output(pd_port);
+}
+
+static void pe_idle1_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pe_idle_reset_data(pd_port);
+
+ pd_try_put_pe_idle_event(pd_port);
+}
+
+static void pe_idle2_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_set_rx_enable(pd_port, PD_RX_CAP_PE_IDLE);
+ pd_notify_pe_idle(pd_port);
+}
+
+void pe_error_recovery_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pe_idle_reset_data(pd_port);
+
+ pd_set_rx_enable(pd_port, PD_RX_CAP_PE_IDLE);
+ pd_notify_pe_error_recovery(pd_port);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_bist_test_data_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_enable_bist_test_mode(pd_port, true);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_bist_test_data_exit(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_enable_bist_test_mode(pd_port, false);
+}
+
+void pe_bist_carrier_mode_2_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_bist_mode2(pd_port);
+ pd_enable_timer(pd_port, PD_TIMER_BIST_CONT_MODE);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_bist_carrier_mode_2_exit(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_BIST_CONT_MODE);
+ pd_disable_bist_mode2(pd_port);
+}
+
+/*
+ * Policy Engine Share State Activity
+ */
+
+void pe_power_ready_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_port->during_swap = false;
+ pd_port->explicit_contract = true;
+
+ if (pd_port->data_role == PD_ROLE_UFP)
+ pd_set_rx_enable(pd_port, PD_RX_CAP_PE_READY_UFP);
+ else
+ pd_set_rx_enable(pd_port, PD_RX_CAP_PE_READY_DFP);
+
+ pd_dpm_notify_pe_ready(pd_port, pd_event);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+static const pe_state_actions_t pe_state_actions[] = {
+ /* src activity */
+ PE_STATE_ACTIONS(pe_src_startup),
+ PE_STATE_ACTIONS(pe_src_discovery),
+ PE_STATE_ACTIONS(pe_src_send_capabilities),
+ PE_STATE_ACTIONS(pe_src_negotiate_capabilities),
+ PE_STATE_ACTIONS(pe_src_transition_supply),
+ PE_STATE_ACTIONS(pe_src_transition_supply2),
+ PE_STATE_ACTIONS(pe_src_ready),
+ PE_STATE_ACTIONS(pe_src_disabled),
+ PE_STATE_ACTIONS(pe_src_capability_response),
+ PE_STATE_ACTIONS(pe_src_hard_reset),
+ PE_STATE_ACTIONS(pe_src_hard_reset_received),
+ PE_STATE_ACTIONS(pe_src_transition_to_default),
+ PE_STATE_ACTIONS(pe_src_give_source_cap),
+ PE_STATE_ACTIONS(pe_src_get_sink_cap),
+ PE_STATE_ACTIONS(pe_src_wait_new_capabilities),
+
+ PE_STATE_ACTIONS(pe_src_send_soft_reset),
+ PE_STATE_ACTIONS(pe_src_soft_reset),
+ PE_STATE_ACTIONS(pe_src_ping),
+
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+ PE_STATE_ACTIONS(pe_src_vdm_identity_request),
+ PE_STATE_ACTIONS(pe_src_vdm_identity_acked),
+ PE_STATE_ACTIONS(pe_src_vdm_identity_naked),
+#endif
+
+ /* snk activity */
+ PE_STATE_ACTIONS(pe_snk_startup),
+ PE_STATE_ACTIONS(pe_snk_discovery),
+ PE_STATE_ACTIONS(pe_snk_wait_for_capabilities),
+ PE_STATE_ACTIONS(pe_snk_evaluate_capability),
+ PE_STATE_ACTIONS(pe_snk_select_capability),
+ PE_STATE_ACTIONS(pe_snk_transition_sink),
+ PE_STATE_ACTIONS(pe_snk_ready),
+ PE_STATE_ACTIONS(pe_snk_hard_reset),
+ PE_STATE_ACTIONS(pe_snk_transition_to_default),
+ PE_STATE_ACTIONS(pe_snk_give_sink_cap),
+ PE_STATE_ACTIONS(pe_snk_get_source_cap),
+
+ PE_STATE_ACTIONS(pe_snk_send_soft_reset),
+ PE_STATE_ACTIONS(pe_snk_soft_reset),
+
+ /* drs dfp activity */
+ PE_STATE_ACTIONS(pe_drs_dfp_ufp_evaluate_dr_swap),
+ PE_STATE_ACTIONS(pe_drs_dfp_ufp_accept_dr_swap),
+ PE_STATE_ACTIONS(pe_drs_dfp_ufp_change_to_ufp),
+ PE_STATE_ACTIONS(pe_drs_dfp_ufp_send_dr_swap),
+ PE_STATE_ACTIONS(pe_drs_dfp_ufp_reject_dr_swap),
+
+ /* drs ufp activity */
+ PE_STATE_ACTIONS(pe_drs_ufp_dfp_evaluate_dr_swap),
+ PE_STATE_ACTIONS(pe_drs_ufp_dfp_accept_dr_swap),
+ PE_STATE_ACTIONS(pe_drs_ufp_dfp_change_to_dfp),
+ PE_STATE_ACTIONS(pe_drs_ufp_dfp_send_dr_swap),
+ PE_STATE_ACTIONS(pe_drs_ufp_dfp_reject_dr_swap),
+
+ /* prs src activity */
+ PE_STATE_ACTIONS(pe_prs_src_snk_evaluate_pr_swap),
+ PE_STATE_ACTIONS(pe_prs_src_snk_accept_pr_swap),
+ PE_STATE_ACTIONS(pe_prs_src_snk_transition_to_off),
+ PE_STATE_ACTIONS(pe_prs_src_snk_assert_rd),
+ PE_STATE_ACTIONS(pe_prs_src_snk_wait_source_on),
+ PE_STATE_ACTIONS(pe_prs_src_snk_send_swap),
+ PE_STATE_ACTIONS(pe_prs_src_snk_reject_pr_swap),
+
+ /* prs snk activity */
+ PE_STATE_ACTIONS(pe_prs_snk_src_evaluate_pr_swap),
+ PE_STATE_ACTIONS(pe_prs_snk_src_accept_pr_swap),
+ PE_STATE_ACTIONS(pe_prs_snk_src_transition_to_off),
+ PE_STATE_ACTIONS(pe_prs_snk_src_assert_rp),
+ PE_STATE_ACTIONS(pe_prs_snk_src_source_on),
+ PE_STATE_ACTIONS(pe_prs_snk_src_send_swap),
+ PE_STATE_ACTIONS(pe_prs_snk_src_reject_swap),
+
+ /* dr src activity */
+ PE_STATE_ACTIONS(pe_dr_src_get_source_cap),
+ PE_STATE_ACTIONS(pe_dr_src_give_sink_cap),
+
+ /* dr snk activity */
+ PE_STATE_ACTIONS(pe_dr_snk_get_sink_cap),
+ PE_STATE_ACTIONS(pe_dr_snk_give_source_cap),
+
+ /* vcs activity */
+ PE_STATE_ACTIONS(pe_vcs_send_swap),
+ PE_STATE_ACTIONS(pe_vcs_evaluate_swap),
+ PE_STATE_ACTIONS(pe_vcs_accept_swap),
+ PE_STATE_ACTIONS(pe_vcs_reject_vconn_swap),
+ PE_STATE_ACTIONS(pe_vcs_wait_for_vconn),
+ PE_STATE_ACTIONS(pe_vcs_turn_off_vconn),
+ PE_STATE_ACTIONS(pe_vcs_turn_on_vconn),
+ PE_STATE_ACTIONS(pe_vcs_send_ps_rdy),
+
+ /* ufp structured vdm activity */
+ PE_STATE_ACTIONS(pe_ufp_vdm_get_identity),
+ PE_STATE_ACTIONS(pe_ufp_vdm_send_identity),
+ PE_STATE_ACTIONS(pe_ufp_vdm_get_identity_nak),
+
+ PE_STATE_ACTIONS(pe_ufp_vdm_get_svids),
+ PE_STATE_ACTIONS(pe_ufp_vdm_send_svids),
+ PE_STATE_ACTIONS(pe_ufp_vdm_get_svids_nak),
+
+ PE_STATE_ACTIONS(pe_ufp_vdm_get_modes),
+ PE_STATE_ACTIONS(pe_ufp_vdm_send_modes),
+ PE_STATE_ACTIONS(pe_ufp_vdm_get_modes_nak),
+
+ PE_STATE_ACTIONS(pe_ufp_vdm_evaluate_mode_entry),
+ PE_STATE_ACTIONS(pe_ufp_vdm_mode_entry_ack),
+ PE_STATE_ACTIONS(pe_ufp_vdm_mode_entry_nak),
+
+ PE_STATE_ACTIONS(pe_ufp_vdm_mode_exit),
+ PE_STATE_ACTIONS(pe_ufp_vdm_mode_exit_ack),
+ PE_STATE_ACTIONS(pe_ufp_vdm_mode_exit_nak),
+
+ PE_STATE_ACTIONS(pe_ufp_vdm_attention_request),
+
+ /* dfp structured vdm */
+ PE_STATE_ACTIONS(pe_dfp_ufp_vdm_identity_request),
+ PE_STATE_ACTIONS(pe_dfp_ufp_vdm_identity_acked),
+ PE_STATE_ACTIONS(pe_dfp_ufp_vdm_identity_naked),
+
+ PE_STATE_ACTIONS(pe_dfp_cbl_vdm_identity_request),
+ PE_STATE_ACTIONS(pe_dfp_cbl_vdm_identity_acked),
+ PE_STATE_ACTIONS(pe_dfp_cbl_vdm_identity_naked),
+
+ PE_STATE_ACTIONS(pe_dfp_vdm_svids_request),
+ PE_STATE_ACTIONS(pe_dfp_vdm_svids_acked),
+ PE_STATE_ACTIONS(pe_dfp_vdm_svids_naked),
+
+ PE_STATE_ACTIONS(pe_dfp_vdm_modes_request),
+ PE_STATE_ACTIONS(pe_dfp_vdm_modes_acked),
+ PE_STATE_ACTIONS(pe_dfp_vdm_modes_naked),
+
+ PE_STATE_ACTIONS(pe_dfp_vdm_mode_entry_request),
+ PE_STATE_ACTIONS(pe_dfp_vdm_mode_entry_acked),
+ PE_STATE_ACTIONS(pe_dfp_vdm_mode_entry_naked),
+
+ PE_STATE_ACTIONS(pe_dfp_vdm_mode_exit_request),
+ PE_STATE_ACTIONS(pe_dfp_vdm_mode_exit_acked),
+
+ PE_STATE_ACTIONS(pe_dfp_vdm_attention_request),
+
+ /* general activity */
+#ifdef CONFIG_USB_PD_CUSTOM_DBGACC
+ PE_STATE_ACTIONS(pe_dbg_ready),
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
+ PE_STATE_ACTIONS(pe_error_recovery),
+
+ PE_STATE_ACTIONS(pe_bist_test_data),
+ PE_STATE_ACTIONS(pe_bist_carrier_mode_2),
+
+ PE_STATE_ACTIONS(pe_idle1),
+ PE_STATE_ACTIONS(pe_idle2),
+};
+
+static void pe_exit_action_disable_sender_response(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_SENDER_RESPONSE);
+}
+
+pe_state_action_fcn_t pe_get_exit_action(uint8_t pe_state)
+{
+ pe_state_action_fcn_t retval = NULL;
+
+ switch (pe_state) {
+ /* Source */
+ case PE_SRC_SEND_CAPABILITIES:
+ retval = pe_src_send_capabilities_exit;
+ break;
+ case PE_SRC_TRANSITION_SUPPLY:
+ retval = pe_src_transition_supply_exit;
+ break;
+ case PE_SRC_TRANSITION_TO_DEFAULT:
+ retval = pe_src_transition_to_default_exit;
+ break;
+ case PE_SRC_GET_SINK_CAP:
+ retval = pe_src_get_sink_cap_exit;
+ break;
+
+ /* Sink */
+ case PE_SNK_WAIT_FOR_CAPABILITIES:
+ retval = pe_snk_wait_for_capabilities_exit;
+ break;
+ case PE_SNK_SELECT_CAPABILITY:
+ retval = pe_snk_select_capability_exit;
+ break;
+ case PE_SNK_TRANSITION_SINK:
+ retval = pe_snk_transition_sink_exit;
+ break;
+ case PE_SNK_TRANSITION_TO_DEFAULT:
+ retval = pe_snk_transition_to_default_exit;
+ break;
+
+ case PE_DR_SRC_GET_SOURCE_CAP:
+ retval = pe_dr_src_get_source_cap_exit;
+ break;
+ case PE_DR_SNK_GET_SINK_CAP:
+ retval = pe_dr_snk_get_sink_cap_exit;
+ break;
+
+ case PE_BIST_TEST_DATA:
+ retval = pe_bist_test_data_exit;
+ break;
+
+ case PE_BIST_CARRIER_MODE_2:
+ retval = pe_bist_carrier_mode_2_exit;
+ break;
+
+ case PE_VCS_SEND_SWAP:
+ case PE_PRS_SRC_SNK_SEND_SWAP:
+ case PE_PRS_SNK_SRC_SEND_SWAP:
+ case PE_DRS_DFP_UFP_SEND_DR_SWAP:
+ case PE_DRS_UFP_DFP_SEND_DR_SWAP:
+ retval = pe_exit_action_disable_sender_response;
+ break;
+
+ case PE_PRS_SRC_SNK_WAIT_SOURCE_ON:
+ retval = pe_prs_src_snk_wait_source_on_exit;
+ break;
+
+ case PE_PRS_SNK_SRC_SOURCE_ON:
+ retval = pe_prs_snk_src_source_on_exit;
+ break;
+
+ case PE_PRS_SNK_SRC_TRANSITION_TO_OFF:
+ retval = pe_prs_snk_src_transition_to_off_exit;
+ break;
+
+ case PE_VCS_WAIT_FOR_VCONN:
+ retval = pe_vcs_wait_for_vconn_exit;
+ break;
+ }
+
+ return retval;
+}
+
+static void pd_pe_state_change(
+ pd_port_t *pd_port, pd_event_t *pd_event, bool vdm_evt)
+{
+ pe_state_action_fcn_t prev_exit_action;
+ pe_state_action_fcn_t next_entry_action;
+
+ u8 old_state = pd_port->pe_state_curr;
+ u8 new_state = pd_port->pe_state_next;
+ char buf[1024] = { 0 };
+
+ if ((old_state >= PD_NR_PE_STATES) || (new_state >= PD_NR_PE_STATES))
+ snprintf(buf, sizeof(buf), "the pd nr pe states\n");
+ if ((new_state == PE_IDLE1) || (new_state == PE_IDLE2))
+ prev_exit_action = NULL;
+ else
+ prev_exit_action = pe_get_exit_action(old_state);
+
+ next_entry_action = pe_state_actions[new_state].entry_action;
+
+ /*
+ * Source (P, Provider), Sink (C, Consumer)
+ * DFP (D), UFP (U)
+ * Vconn Source (Y/N)
+ */
+
+#if PE_DBG_ENABLE
+ PE_DBG("%s -> %s (%c%c%c)\r\n",
+ vdm_evt ? "VDM" : "PD", pe_state_name[new_state],
+ pd_port->power_role ? 'P' : 'C',
+ pd_port->data_role ? 'D' : 'U',
+ pd_port->vconn_source ? 'Y' : 'N');
+#else
+ if (!vdm_evt) {
+ PE_STATE_INFO("%s-> %s\r\n",
+ vdm_evt ? "VDM" : "PD", pe_state_name[new_state]);
+ }
+#endif
+
+ if (prev_exit_action)
+ prev_exit_action(pd_port, pd_event);
+
+ if (next_entry_action)
+ next_entry_action(pd_port, pd_event);
+
+ if (vdm_evt)
+ pd_port->pe_vdm_state = new_state;
+ else
+ pd_port->pe_pd_state = new_state;
+}
+
+static int pd_handle_event(
+ pd_port_t *pd_port, pd_event_t *pd_event, bool vdm_evt)
+{
+ if (vdm_evt) {
+ if (pd_port->reset_vdm_state) {
+ pd_port->reset_vdm_state = false;
+ pd_port->pe_vdm_state = pd_port->pe_pd_state;
+ }
+
+ pd_port->pe_state_curr = pd_port->pe_vdm_state;
+ } else {
+ pd_port->pe_state_curr = pd_port->pe_pd_state;
+ }
+
+ if (pd_process_event(pd_port, pd_event, vdm_evt))
+ pd_pe_state_change(pd_port, pd_event, vdm_evt);
+ else
+ pd_free_pd_event(pd_port, pd_event);
+
+ return 1;
+}
+
+static inline int pd_put_dpm_ack_immediately(
+ pd_port_t *pd_port, bool vdm_evt)
+{
+ pd_event_t pd_event = {
+ .event_type = PD_EVT_DPM_MSG,
+ .msg = PD_DPM_ACK,
+ .pd_msg = NULL,
+ };
+
+ pd_handle_event(pd_port, &pd_event, vdm_evt);
+
+ PE_DBG("ACK_Immediately\r\n");
+ pd_port->dpm_ack_immediately = false;
+ return 1;
+}
+
+int pd_policy_engine_run(struct tcpc_device *tcpc_dev)
+{
+ bool vdm_evt = false;
+ pd_event_t pd_event;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ if (!pd_get_event(tcpc_dev, &pd_event)) {
+ switch (pd_port->pe_pd_state) {
+ case PE_SNK_READY:
+ case PE_SRC_READY:
+ case PE_SRC_STARTUP:
+ case PE_SRC_DISCOVERY:
+ vdm_evt = pd_get_vdm_event(tcpc_dev, &pd_event);
+ break;
+ }
+
+ if (!vdm_evt)
+ return 0;
+ }
+ mutex_lock(&pd_port->pd_lock);
+
+ pd_handle_event(pd_port, &pd_event, vdm_evt);
+
+ if (pd_port->dpm_ack_immediately)
+ pd_put_dpm_ack_immediately(pd_port, vdm_evt);
+
+ mutex_unlock(&pd_port->pd_lock);
+
+ return 1;
+}
diff --git a/drivers/usb/pd/richtek/pd_policy_engine_dbg.c b/drivers/usb/pd/richtek/pd_policy_engine_dbg.c
new file mode 100644
index 000000000000..0efc83ddcf73
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_policy_engine_dbg.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Policy Engine for DBGACC
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+
+#ifdef CONFIG_USB_PD_CUSTOM_DBGACC
+
+void pe_dbg_ready_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ u8 state;
+
+ if (pd_port->pe_ready)
+ return;
+
+ pd_port->pe_ready = true;
+ pd_port->state_machine = PE_STATE_MACHINE_DBGACC;
+
+ if (pd_port->data_role == PD_ROLE_UFP) {
+ PE_INFO("Custom_DBGACC : UFP\r\n");
+ state = PD_CONNECT_PE_READY_DBGACC_UFP;
+ pd_set_rx_enable(pd_port, PD_RX_CAP_PE_READY_UFP);
+ } else {
+ PE_INFO("Custom_DBGACC : DFP\r\n");
+ state = PD_CONNECT_PE_READY_DBGACC_DFP;
+ pd_set_rx_enable(pd_port, PD_RX_CAP_PE_READY_DFP);
+ }
+
+ pd_reset_protocol_layer(pd_port);
+ pd_update_connect_state(pd_port, state);
+}
+
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
diff --git a/drivers/usb/pd/richtek/pd_policy_engine_dfp.c b/drivers/usb/pd/richtek/pd_policy_engine_dfp.c
new file mode 100644
index 000000000000..e255cc6b83d8
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_policy_engine_dfp.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Policy Engine for DFP
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+
+/*
+ * [PD2.0] Figure 8-64 DFP to UFP VDM Discover Identity State Diagram
+ */
+
+void pe_dfp_ufp_vdm_identity_request_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_vdm_discover_id(pd_port, TCPC_TX_SOP);
+}
+
+void pe_dfp_ufp_vdm_identity_acked_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_dpm_dfp_inform_id(pd_port, pd_event, true);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_dfp_ufp_vdm_identity_naked_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_dpm_dfp_inform_id(pd_port, pd_event, false);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+/*
+ * [PD2.0] Figure 8-65 DFP VDM Discover Identity State Diagram
+ */
+
+void pe_dfp_cbl_vdm_identity_request_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_vdm_discover_id(pd_port, TCPC_TX_SOP_PRIME);
+ pd_port->discover_id_counter++;
+
+ pd_enable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_dfp_cbl_vdm_identity_acked_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_port->dpm_flags &=
+ ~(DPM_FLAGS_CHECK_CABLE_ID | DPM_FLAGS_CHECK_CABLE_ID_DFP);
+
+ pd_disable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_dpm_dfp_inform_cable_vdo(pd_port, pd_event);
+
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_dfp_cbl_vdm_identity_naked_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_dpm_dfp_inform_cable_vdo(pd_port, pd_event);
+
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+/*
+ * [PD2.0] Figure 8-66 DFP VDM Discover SVIDs State Diagram
+ */
+
+void pe_dfp_vdm_svids_request_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_vdm_discover_svids(pd_port, TCPC_TX_SOP);
+}
+
+void pe_dfp_vdm_svids_acked_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_dpm_dfp_inform_svids(pd_port, pd_event, true);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_dfp_vdm_svids_naked_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_dpm_dfp_inform_svids(pd_port, pd_event, false);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+/*
+ * [PD2.0] Figure 8-67 DFP VDM Discover Modes State Diagram
+ */
+
+void pe_dfp_vdm_modes_request_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_vdm_discover_modes(pd_port, TCPC_TX_SOP, pd_port->mode_svid);
+}
+
+void pe_dfp_vdm_modes_acked_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_dpm_dfp_inform_modes(pd_port, pd_event, true);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_dfp_vdm_modes_naked_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_dpm_dfp_inform_modes(pd_port, pd_event, false);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+/*
+ * [PD2.0] Figure 8-68 DFP VDM Mode Entry State Diagram
+ */
+
+void pe_dfp_vdm_mode_entry_request_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_vdm_enter_mode(pd_port, TCPC_TX_SOP,
+ pd_port->mode_svid, pd_port->mode_obj_pos);
+}
+
+void pe_dfp_vdm_mode_entry_acked_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_dpm_dfp_inform_enter_mode(pd_port, pd_event, true);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_dfp_vdm_mode_entry_naked_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_dpm_dfp_inform_enter_mode(pd_port, pd_event, false);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+/*
+ * [PD2.0] Figure 8-69 DFP VDM Mode Exit State Diagram
+ */
+
+void pe_dfp_vdm_mode_exit_request_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_vdm_exit_mode(pd_port, TCPC_TX_SOP,
+ pd_port->mode_svid, pd_port->mode_obj_pos);
+}
+
+void pe_dfp_vdm_mode_exit_acked_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_dpm_dfp_inform_exit_mode(pd_port, pd_event);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+/*
+ * [PD2.0] Figure 8-70 DFP VDM Attention State Diagram
+ */
+
+void pe_dfp_vdm_attention_request_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_dfp_inform_attention(pd_port, pd_event);
+ pd_free_pd_event(pd_port, pd_event);
+}
diff --git a/drivers/usb/pd/richtek/pd_policy_engine_dr.c b/drivers/usb/pd/richtek/pd_policy_engine_dr.c
new file mode 100644
index 000000000000..6efdeb4a8b50
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_policy_engine_dr.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Policy Engine for DR
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+
+/*
+ * [PD2.0]
+ * Figure 8-53 Dual-Role (Source) Get Source Capabilities diagram
+ * Figure 8-54 Dual-Role (Source) Give Sink Capabilities diagram
+ * Figure 8-55 Dual-Role (Sink) Get Sink Capabilities State Diagram
+ * Figure 8-56 Dual-Role (Sink) Give Source Capabilities State Diagram
+ */
+
+void pe_dr_src_get_source_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_GET_SOURCE_CAP);
+}
+
+void pe_dr_src_get_source_cap_exit(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_SENDER_RESPONSE);
+ pd_dpm_dr_inform_source_cap(pd_port, pd_event);
+}
+
+void pe_dr_src_give_sink_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_send_sink_caps(pd_port);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_dr_snk_get_sink_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_GET_SINK_CAP);
+}
+
+void pe_dr_snk_get_sink_cap_exit(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_SENDER_RESPONSE);
+ pd_dpm_dr_inform_sink_cap(pd_port, pd_event);
+}
+
+void pe_dr_snk_give_source_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_send_source_caps(pd_port);
+ pd_free_pd_event(pd_port, pd_event);
+}
diff --git a/drivers/usb/pd/richtek/pd_policy_engine_drs.c b/drivers/usb/pd/richtek/pd_policy_engine_drs.c
new file mode 100644
index 000000000000..be01eb3cf306
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_policy_engine_drs.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Policy Engine for DRS
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+
+/*
+ * [PD2.0] Figure 8-49: Type-C DFP to UFP Data Role Swap State Diagram
+ */
+
+void pe_drs_dfp_ufp_evaluate_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_drs_evaluate_swap(pd_port, PD_ROLE_UFP);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_drs_dfp_ufp_accept_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_ACCEPT);
+}
+
+void pe_drs_dfp_ufp_change_to_ufp_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_drs_change_role(pd_port, PD_ROLE_UFP);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_drs_dfp_ufp_send_dr_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_DR_SWAP);
+}
+
+void pe_drs_dfp_ufp_reject_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_event->msg_sec == PD_DPM_NAK_REJECT)
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ else
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_WAIT);
+}
+
+/*
+ * [PD2.0] Figure 8-50: Type-C UFP to DFP Data Role Swap State Diagram
+ */
+
+void pe_drs_ufp_dfp_evaluate_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_drs_evaluate_swap(pd_port, PD_ROLE_DFP);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_drs_ufp_dfp_accept_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_ACCEPT);
+}
+
+void pe_drs_ufp_dfp_change_to_dfp_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_drs_change_role(pd_port, PD_ROLE_DFP);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_drs_ufp_dfp_send_dr_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_DR_SWAP);
+}
+
+void pe_drs_ufp_dfp_reject_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_event->msg_sec == PD_DPM_NAK_REJECT)
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ else
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_WAIT);
+}
diff --git a/drivers/usb/pd/richtek/pd_policy_engine_prs.c b/drivers/usb/pd/richtek/pd_policy_engine_prs.c
new file mode 100644
index 000000000000..fad45a3e1444
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_policy_engine_prs.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Policy Engine for PRS
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+
+/*
+ * [PD2.0] Figure 8-51:
+ * Dual-Role Port in Source to Sink Power Role Swap State Diagram
+ */
+
+void pe_prs_src_snk_evaluate_pr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_prs_evaluate_swap(pd_port, PD_ROLE_SINK);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_prs_src_snk_accept_pr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_notify_pe_execute_pr_swap(pd_port, true);
+
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_ACCEPT);
+}
+
+void pe_prs_src_snk_transition_to_off_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_lock_msg_output(pd_port); /* for tSRCTransition */
+ pd_notify_pe_execute_pr_swap(pd_port, true);
+
+ pd_enable_timer(pd_port, PD_TIMER_SOURCE_TRANSITION);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_prs_src_snk_assert_rd_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_prs_change_role(pd_port, PD_ROLE_SINK);
+}
+
+void pe_prs_src_snk_wait_source_on_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_PS_RDY);
+}
+
+void pe_prs_src_snk_wait_source_on_exit(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_PS_SOURCE_ON);
+}
+
+void pe_prs_src_snk_send_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_PR_SWAP);
+}
+
+void pe_prs_src_snk_reject_pr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_event->msg_sec == PD_DPM_NAK_REJECT)
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ else
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_WAIT);
+}
+
+/*
+ * [PD2.0] Figure 8-52:
+ * Dual-role Port in Sink to Source Power Role Swap State Diagram
+ */
+
+void pe_prs_snk_src_evaluate_pr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_prs_evaluate_swap(pd_port, PD_ROLE_SOURCE);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_prs_snk_src_accept_pr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_notify_pe_execute_pr_swap(pd_port, true);
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_ACCEPT);
+}
+
+void pe_prs_snk_src_transition_to_off_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ /*
+ * Sink should call pd_notify_pe_execute_pr_swap before this state,
+ * because source may turn off power & change CC before we got
+ * GoodCRC or Accept.
+ */
+
+ pd_port->during_swap = true;
+ pd_enable_timer(pd_port, PD_TIMER_PS_SOURCE_OFF);
+ pd_dpm_prs_turn_off_power_sink(pd_port);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_prs_snk_src_transition_to_off_exit(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_PS_SOURCE_OFF);
+}
+
+void pe_prs_snk_src_assert_rp_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_prs_change_role(pd_port, PD_ROLE_SOURCE);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_prs_snk_src_source_on_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_prs_enable_power_source(pd_port, true);
+}
+
+void pe_prs_snk_src_source_on_exit(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+/* Do it in process_event after source_on */
+/* pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_PS_RDY); */
+}
+
+void pe_prs_snk_src_send_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_notify_pe_execute_pr_swap(pd_port, false);
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_PR_SWAP);
+}
+
+void pe_prs_snk_src_reject_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_event->msg_sec == PD_DPM_NAK_REJECT)
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ else
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_WAIT);
+}
diff --git a/drivers/usb/pd/richtek/pd_policy_engine_snk.c b/drivers/usb/pd/richtek/pd_policy_engine_snk.c
new file mode 100644
index 000000000000..27f60340ed9d
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_policy_engine_snk.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Policy Engine for SNK
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+
+/*
+ * [PD2.0] Figure 8-39 Sink Port state diagram
+ */
+
+void pe_snk_startup_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ u8 rx_cap = PD_RX_CAP_PE_STARTUP;
+
+ pd_port->state_machine = PE_STATE_MACHINE_SINK;
+ pd_reset_protocol_layer(pd_port);
+
+ switch (pd_event->event_type) {
+ case PD_EVT_HW_MSG: /* CC attached */
+ pd_put_pe_event(pd_port, PD_PE_RESET_PRL_COMPLETED);
+ break;
+
+ case PD_EVT_PE_MSG: /* From Hard-Reset */
+ pd_enable_vbus_valid_detection(pd_port, false);
+ break;
+
+ case PD_EVT_CTRL_MSG: /* From PR-SWAP (Received PS_RDY) */
+ /* If we reset rx_cap in here, */
+ /* maybe can't meet tSwapSink (Check it later) */
+ if (!pd_dpm_check_vbus_valid(pd_port)) {
+ PE_INFO("rx_cap_on\r\n");
+ rx_cap = PD_RX_CAP_PE_SEND_WAIT_CAP;
+ }
+
+ pd_put_pe_event(pd_port, PD_PE_RESET_PRL_COMPLETED);
+ pd_free_pd_event(pd_port, pd_event);
+ break;
+ }
+
+ pd_set_rx_enable(pd_port, rx_cap);
+}
+
+void pe_snk_discovery_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+#ifdef CONFIG_USB_PD_FAST_RESP_TYPEC_SRC
+ pd_disable_timer(pd_port, PD_TIMER_SRC_RECOVER);
+#endif /* CONFIG_USB_PD_FAST_RESP_TYPEC_SRC */
+ pd_enable_vbus_valid_detection(pd_port, true);
+}
+
+void pe_snk_wait_for_capabilities_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_notify_pe_hard_reset_completed(pd_port);
+
+ pd_set_rx_enable(pd_port, PD_RX_CAP_PE_SEND_WAIT_CAP);
+ pd_enable_timer(pd_port, PD_TIMER_SINK_WAIT_CAP);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_snk_wait_for_capabilities_exit(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_SINK_WAIT_CAP);
+}
+
+void pe_snk_evaluate_capability_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ /* Stop NoResponseTimer and reset HardResetCounter to zero */
+
+ pd_disable_timer(pd_port, PD_TIMER_NO_RESPONSE);
+
+ pd_port->hard_reset_counter = 0;
+ pd_port->pd_connected = 1;
+ pd_port->pd_prev_connected = 1;
+ pd_port->explicit_contract = false;
+
+ pd_dpm_snk_evaluate_caps(pd_port, pd_event);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_snk_select_capability_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_event->msg == PD_DPM_NOTIFIED) {
+ PE_DBG("SelectCap%d, rdo:0x%08x\r\n",
+ pd_event->msg_sec, pd_port->last_rdo);
+ } else {
+ /* new request, for debug only */
+ /* pd_dpm_sink_vbus(pd_port, false); */
+ PE_DBG("NewReq, rdo:0x%08x\r\n", pd_port->last_rdo);
+ }
+
+ pd_lock_msg_output(pd_port); /* SenderResponse */
+ pd_send_data_msg(pd_port,
+ TCPC_TX_SOP, PD_DATA_REQUEST, 1, &pd_port->last_rdo);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_snk_select_capability_exit(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_SENDER_RESPONSE);
+
+ if (pd_event_msg_match(pd_event,
+ PD_EVT_CTRL_MSG, PD_CTRL_ACCEPT))
+ pd_port->remote_selected_cap = RDO_POS(pd_port->last_rdo);
+
+ /* Waiting for Hard-Reset Done */
+ if (!pd_event_msg_match(pd_event,
+ PD_EVT_TIMER_MSG, PD_TIMER_SENDER_RESPONSE))
+ pd_unlock_msg_output(pd_port);
+}
+
+void pe_snk_transition_sink_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_enable_timer(pd_port, PD_TIMER_PS_TRANSITION);
+
+ if (pd_event->msg == PD_CTRL_GOTO_MIN) {
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_GIVE_BACK) {
+ pd_port->request_i_new = pd_port->request_i_op;
+ pd_dpm_snk_transition_power(pd_port, pd_event);
+ }
+ }
+
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_snk_transition_sink_exit(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_event_msg_match(pd_event, PD_EVT_CTRL_MSG, PD_CTRL_PS_RDY))
+ pd_dpm_snk_transition_power(pd_port, pd_event);
+
+ pd_disable_timer(pd_port, PD_TIMER_PS_TRANSITION);
+}
+
+void pe_snk_ready_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_event_msg_match(pd_event, PD_EVT_CTRL_MSG, PD_CTRL_WAIT))
+ pd_enable_timer(pd_port, PD_TIMER_SINK_REQUEST);
+
+ pd_port->state_machine = PE_STATE_MACHINE_SINK;
+ pe_power_ready_entry(pd_port, pd_event);
+}
+
+void pe_snk_hard_reset_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_hard_reset(pd_port);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_snk_transition_to_default_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_reset_local_hw(pd_port);
+ pd_dpm_snk_hard_reset(pd_port, pd_event);
+}
+
+void pe_snk_transition_to_default_exit(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_enable_timer(pd_port, PD_TIMER_NO_RESPONSE);
+
+#ifdef CONFIG_USB_PD_FAST_RESP_TYPEC_SRC
+ if (!pd_port->pd_prev_connected)
+ pd_enable_timer(pd_port, PD_TIMER_SRC_RECOVER);
+#endif /* CONFIG_USB_PD_FAST_RESP_TYPEC_SRC */
+}
+
+void pe_snk_give_sink_cap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_send_sink_caps(pd_port);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_snk_get_source_cap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_GET_SOURCE_CAP);
+}
+
+void pe_snk_send_soft_reset_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_soft_reset(pd_port, PE_STATE_MACHINE_SINK);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_snk_soft_reset_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_handle_soft_reset(pd_port, PE_STATE_MACHINE_SINK);
+ pd_free_pd_event(pd_port, pd_event);
+}
diff --git a/drivers/usb/pd/richtek/pd_policy_engine_src.c b/drivers/usb/pd/richtek/pd_policy_engine_src.c
new file mode 100644
index 000000000000..7b846dca57ef
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_policy_engine_src.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Policy Engine for SRC
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+
+/*
+ * [PD2.0] Figure 8-38 Source Port Policy Engine state diagram
+ */
+
+void pe_src_startup_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_port->state_machine = PE_STATE_MACHINE_SOURCE;
+
+ pd_port->cap_counter = 0;
+ pd_port->request_i = -1;
+ pd_port->request_v = TCPC_VBUS_SOURCE_5V;
+
+ pd_reset_protocol_layer(pd_port);
+ pd_set_rx_enable(pd_port, PD_RX_CAP_PE_STARTUP);
+
+ switch (pd_event->event_type) {
+ case PD_EVT_HW_MSG: /* CC attached */
+ pd_enable_vbus_valid_detection(pd_port, true);
+ break;
+
+ case PD_EVT_PE_MSG: /* From Hard-Reset */
+ pd_enable_timer(pd_port, PD_TIMER_SOURCE_START);
+ break;
+
+ case PD_EVT_CTRL_MSG: /* From PR-SWAP (Received PS_RDY) */
+ pd_enable_timer(pd_port, PD_TIMER_SOURCE_START);
+ break;
+ }
+}
+
+void pe_src_discovery_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ /* MessageID Should be 0 for First SourceCap (Ellisys)... */
+
+ /* The SourceCapabilitiesTimer continues to run during the states
+ * defined in Source Startup Structured VDM Discover Identity State
+ * Diagram
+ */
+
+ pd_port->msg_id_tx[TCPC_TX_SOP] = 0;
+ pd_port->pd_connected = false;
+
+ pd_enable_timer(pd_port, PD_TIMER_SOURCE_CAPABILITY);
+
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+ if (pd_is_auto_discover_cable_id(pd_port)) {
+ pd_port->msg_id_tx[TCPC_TX_SOP_PRIME] = 0;
+ pd_enable_timer(pd_port, PD_TIMER_DISCOVER_ID);
+ }
+#endif
+}
+
+void pe_src_send_capabilities_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_set_rx_enable(pd_port, PD_RX_CAP_PE_SEND_WAIT_CAP);
+
+ pd_dpm_send_source_caps(pd_port);
+ pd_port->cap_counter++;
+
+ pd_free_pd_event(pd_port, pd_event); /* soft-reset */
+}
+
+void pe_src_send_capabilities_exit(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_SENDER_RESPONSE);
+}
+
+void pe_src_negotiate_capabilities_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_port->pd_connected = true;
+ pd_port->pd_prev_connected = true;
+
+ pd_dpm_src_evaluate_request(pd_port, pd_event);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_src_transition_supply_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_event->msg == PD_DPM_PD_REQUEST) {
+ pd_port->request_i_new = pd_port->request_i_op;
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_GOTO_MIN);
+ } else {
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_ACCEPT);
+ }
+ pd_enable_timer(pd_port, PD_TIMER_SOURCE_TRANSITION);
+}
+
+void pe_src_transition_supply_exit(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_SOURCE_TRANSITION);
+}
+
+void pe_src_transition_supply2_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_PS_RDY);
+}
+
+void pe_src_ready_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_port->state_machine = PE_STATE_MACHINE_SOURCE;
+ pd_notify_pe_src_explicit_contract(pd_port);
+ pe_power_ready_entry(pd_port, pd_event);
+}
+
+void pe_src_disabled_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_set_rx_enable(pd_port, PD_RX_CAP_PE_DISABLE);
+ pd_update_connect_state(pd_port, PD_CONNECT_TYPEC_ONLY);
+}
+
+void pe_src_capability_response_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_event->msg_sec) {
+ case PD_DPM_NAK_REJECT_INVALID:
+ pd_port->invalid_contract = true;
+ case PD_DPM_NAK_REJECT:
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ break;
+
+ case PD_DPM_NAK_WAIT:
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_WAIT);
+ break;
+ }
+}
+
+void pe_src_hard_reset_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_hard_reset(pd_port);
+
+ pd_free_pd_event(pd_port, pd_event);
+ pd_enable_timer(pd_port, PD_TIMER_PS_HARD_RESET);
+}
+
+void pe_src_hard_reset_received_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_enable_timer(pd_port, PD_TIMER_PS_HARD_RESET);
+}
+
+void pe_src_transition_to_default_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_reset_local_hw(pd_port);
+ pd_dpm_src_hard_reset(pd_port);
+}
+
+void pe_src_transition_to_default_exit(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_enable_vconn(pd_port, true);
+ pd_enable_timer(pd_port, PD_TIMER_NO_RESPONSE);
+}
+
+void pe_src_give_source_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_send_source_caps(pd_port);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_src_get_sink_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_GET_SINK_CAP);
+}
+
+void pe_src_get_sink_cap_exit(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_SENDER_RESPONSE);
+ pd_dpm_dr_inform_sink_cap(pd_port, pd_event);
+}
+
+void pe_src_wait_new_capabilities_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ /* Wait for new Source Capabilities */
+}
+
+void pe_src_send_soft_reset_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_soft_reset(pd_port, PE_STATE_MACHINE_SOURCE);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_src_soft_reset_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_handle_soft_reset(pd_port, PE_STATE_MACHINE_SOURCE);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_src_ping_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ /* TODO: Send Ping Message */
+}
+
+/*
+ * [PD2.0] Figure 8-81
+ Source Startup Structured VDM Discover Identity State Diagram (TODO)
+ */
+
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+
+void pe_src_vdm_identity_request_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_set_rx_enable(pd_port, PD_RX_CAP_PE_DISCOVER_CABLE);
+
+ pd_send_vdm_discover_id(pd_port, TCPC_TX_SOP_PRIME);
+
+ pd_port->discover_id_counter++;
+ pd_enable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_src_vdm_identity_acked_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_port->dpm_flags &= ~DPM_FLAGS_CHECK_CABLE_ID;
+
+ pd_disable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_dpm_src_inform_cable_vdo(pd_port, pd_event);
+
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_src_vdm_identity_naked_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_VDM_RESPONSE);
+ pd_dpm_src_inform_cable_vdo(pd_port, pd_event);
+
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+#endif /* CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID */
diff --git a/drivers/usb/pd/richtek/pd_policy_engine_ufp.c b/drivers/usb/pd/richtek/pd_policy_engine_ufp.c
new file mode 100644
index 000000000000..3741c381ecd6
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_policy_engine_ufp.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Policy Engine for UFP
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+
+/*
+ * [PD2.0] Figure 8-58 UFP Structured VDM Discover Identity State Diagram
+ */
+
+void pe_ufp_vdm_get_identity_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_ufp_request_id_info(pd_port, pd_event);
+}
+
+void pe_ufp_vdm_send_identity_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_ufp_response_id(pd_port, pd_event);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_ufp_vdm_get_identity_nak_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_reply_svdm_request_simply(pd_port, pd_event, CMDT_RSP_NAK);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+/*
+ * [PD2.0] Figure 8-59 UFP Structured VDM Discover SVIDs State Diagram
+ */
+
+void pe_ufp_vdm_get_svids_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_ufp_request_svid_info(pd_port, pd_event);
+}
+
+void pe_ufp_vdm_send_svids_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_ufp_response_svids(pd_port, pd_event);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_ufp_vdm_get_svids_nak_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_reply_svdm_request_simply(pd_port, pd_event, CMDT_RSP_NAK);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+/*
+ * [PD2.0] Figure 8-60 UFP Structured VDM Discover Modes State Diagram
+ */
+
+void pe_ufp_vdm_get_modes_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_ufp_request_mode_info(pd_port, pd_event);
+}
+
+void pe_ufp_vdm_send_modes_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_ufp_response_modes(pd_port, pd_event);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_ufp_vdm_get_modes_nak_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_reply_svdm_request_simply(pd_port, pd_event, CMDT_RSP_NAK);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+/*
+ * [PD2.0] Figure 8-61 UFP Structured VDM Enter Mode State Diagram
+ */
+
+void pe_ufp_vdm_evaluate_mode_entry_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_ufp_request_enter_mode(pd_port, pd_event);
+}
+
+void pe_ufp_vdm_mode_entry_ack_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_reply_svdm_request_simply(pd_port, pd_event, CMDT_RSP_ACK);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_ufp_vdm_mode_entry_nak_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_reply_svdm_request_simply(pd_port, pd_event, CMDT_RSP_NAK);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+/*
+ * [PD2.0] Figure 8-62 UFP Structured VDM Exit Mode State Diagram
+ */
+
+void pe_ufp_vdm_mode_exit_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_ufp_request_exit_mode(pd_port, pd_event);
+}
+
+void pe_ufp_vdm_mode_exit_ack_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_reply_svdm_request_simply(pd_port, pd_event, CMDT_RSP_ACK);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_ufp_vdm_mode_exit_nak_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_reply_svdm_request_simply(pd_port, pd_event, CMDT_RSP_NAK);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+/*
+ * [PD2.0] Figure 8-63 UFP VDM Attention State Diagram
+ */
+
+void pe_ufp_vdm_attention_request_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_port->mode_svid) {
+ default:
+ pd_send_vdm_attention(pd_port,
+ TCPC_TX_SOP, pd_port->mode_svid,
+ pd_port->mode_obj_pos);
+ break;
+ }
+
+ pd_free_pd_event(pd_port, pd_event);
+}
diff --git a/drivers/usb/pd/richtek/pd_policy_engine_vcs.c b/drivers/usb/pd/richtek/pd_policy_engine_vcs.c
new file mode 100644
index 000000000000..d3667ca7a84f
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_policy_engine_vcs.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Policy Engine for VCS
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+
+/*
+ * [PD2.0] Figure 8-57 VCONN Swap State Diagram
+ */
+
+void pe_vcs_send_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_VCONN_SWAP);
+}
+
+void pe_vcs_evaluate_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_vcs_evaluate_swap(pd_port);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_vcs_accept_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_ACCEPT);
+}
+
+void pe_vcs_reject_vconn_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_event->msg_sec == PD_DPM_NAK_REJECT)
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ else
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_WAIT);
+}
+
+void pe_vcs_wait_for_vconn_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_enable_timer(pd_port, PD_TIMER_VCONN_ON);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_vcs_wait_for_vconn_exit(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_disable_timer(pd_port, PD_TIMER_VCONN_ON);
+}
+
+void pe_vcs_turn_off_vconn_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_vcs_enable_vconn(pd_port, false);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_vcs_turn_on_vconn_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_dpm_vcs_enable_vconn(pd_port, true);
+ pd_free_pd_event(pd_port, pd_event);
+}
+
+void pe_vcs_send_ps_rdy_entry(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_PS_RDY);
+}
diff --git a/drivers/usb/pd/richtek/pd_process_evt.c b/drivers/usb/pd/richtek/pd_process_evt.c
new file mode 100644
index 000000000000..e5a2fe006534
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_process_evt.c
@@ -0,0 +1,883 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Process Event
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_event.h>
+#include <linux/hisi/usb/pd/richtek/pd_process_evt.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+
+/*
+ * [BLOCK] print event
+ */
+
+#if PE_EVENT_DBG_ENABLE
+static const char * const pd_ctrl_msg_name[] = {
+ "ctrl0",
+ "good_crc",
+ "goto_min",
+ "accept",
+ "reject",
+ "ping",
+ "ps_rdy",
+ "get_src_cap",
+ "get_snk_cap",
+ "dr_swap",
+ "pr_swap",
+ "vs_swap",
+ "wait",
+ "soft_reset",
+ "ctrlE",
+ "ctrlF",
+};
+
+static inline void print_ctrl_msg_event(u8 msg)
+{
+ if (msg < PD_CTRL_MSG_NR)
+ PE_EVT_INFO("%s\r\n", pd_ctrl_msg_name[msg]);
+}
+
+static const char * const pd_data_msg_name[] = {
+ "data0",
+ "src_cap",
+ "request",
+ "bist",
+ "sink_cap",
+ "data5",
+ "data6",
+ "data7",
+ "data8",
+ "data9",
+ "dataA",
+ "dataB",
+ "dataC",
+ "dataD",
+ "dataE",
+ "vdm",
+};
+
+static inline void print_data_msg_event(u8 msg)
+{
+ if (msg < PD_DATA_MSG_NR)
+ PE_EVT_INFO("%s\r\n", pd_data_msg_name[msg]);
+}
+
+static const char *const pd_hw_msg_name[] = {
+ "Detached",
+ "Attached",
+ "hard_reset",
+ "vbus_high",
+ "vbus_low",
+ "vbus_0v",
+ "vbus_stable",
+ "tx_err",
+ "retry_vdm",
+};
+
+static inline void print_hw_msg_event(u8 msg)
+{
+ if (msg < PD_HW_MSG_NR)
+ PE_EVT_INFO("%s\r\n", pd_hw_msg_name[msg]);
+}
+
+static const char *const pd_pe_msg_name[] = {
+ "reset_prl_done",
+ "pr_at_dft",
+ "hard_reset_done",
+ "pe_idle",
+};
+
+static inline void print_pe_msg_event(u8 msg)
+{
+ if (msg < PD_PE_MSG_NR)
+ PE_EVT_INFO("%s\r\n", pd_pe_msg_name[msg]);
+}
+
+static const char * const pd_dpm_msg_name[] = {
+ "ack",
+ "nak",
+
+ "pd_req",
+ "vdm_req",
+ "cable_req",
+
+ "cap_change",
+ "recover",
+};
+
+static inline void print_dpm_msg_event(u8 msg)
+{
+ if (msg < PD_DPM_MSG_NR)
+ PE_EVT_INFO("dpm_%s\r\n", pd_dpm_msg_name[msg]);
+}
+
+const char *const pd_dpm_pd_request_name[] = {
+ "pr_swap",
+ "dr_swap",
+ "vs_swap",
+ "gotomin",
+ "softreset",
+ "hardreset",
+ "get_src_cap",
+ "get_snk_cap",
+ "request",
+ "bist_cm2",
+};
+
+static inline void print_dpm_pd_request(u8 msg)
+{
+ if (msg < PD_DPM_PD_REQUEST_NR)
+ PE_EVT_INFO("dpm_pd_req(%s)\r\n", pd_dpm_pd_request_name[msg]);
+}
+#endif
+
+static inline void print_event(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+#if PE_EVENT_DBG_ENABLE
+ switch (pd_event->event_type) {
+ case PD_EVT_CTRL_MSG:
+ print_ctrl_msg_event(pd_event->msg);
+ break;
+
+ case PD_EVT_DATA_MSG:
+ print_data_msg_event(pd_event->msg);
+ break;
+
+ case PD_EVT_DPM_MSG:
+ if (pd_event->msg == PD_DPM_PD_REQUEST)
+ print_dpm_pd_request(pd_event->msg_sec);
+ else
+ print_dpm_msg_event(pd_event->msg);
+ break;
+
+ case PD_EVT_HW_MSG:
+ print_hw_msg_event(pd_event->msg);
+ break;
+
+ case PD_EVT_PE_MSG:
+ print_pe_msg_event(pd_event->msg);
+ break;
+
+ case PD_EVT_TIMER_MSG:
+ PE_EVT_INFO("timer\r\n");
+ break;
+ }
+#endif
+}
+
+bool pd_make_pe_state_transit(pd_port_t *pd_port,
+ u8 curr_state,
+ const pe_state_reaction_t *state_reaction)
+{
+ int i;
+ const pe_state_transition_t *state_transition =
+ state_reaction->state_transition;
+
+ for (i = 0; i < state_reaction->nr_transition; i++) {
+ if (state_transition[i].curr_state == curr_state) {
+ PE_TRANSIT_STATE(pd_port,
+ state_transition[i].next_state);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool pd_make_pe_state_transit_virt(pd_port_t *pd_port,
+ u8 curr_state,
+ const pe_state_reaction_t *state_reaction)
+{
+ bool ret = pd_make_pe_state_transit(
+ pd_port, curr_state, state_reaction);
+
+ if (ret) {
+ switch (pd_port->pe_state_next) {
+ case PE_VIRT_READY:
+ PE_TRANSIT_READY_STATE(pd_port);
+ break;
+
+ case PE_VIRT_HARD_RESET:
+ PE_TRANSIT_HARD_RESET_STATE(pd_port);
+ break;
+ }
+ }
+ return ret;
+}
+
+bool pd_make_pe_state_transit_force(pd_port_t *pd_port,
+ u8 curr_state, u8 force_state,
+ const pe_state_reaction_t *state_reaction)
+{
+ bool ret = pd_make_pe_state_transit(
+ pd_port, curr_state, state_reaction);
+
+ if (ret)
+ return ret;
+
+ PE_TRANSIT_STATE(pd_port, force_state);
+ return true;
+}
+
+bool pd_process_protocol_error(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool power_change = false;
+ pd_msg_t *pd_msg = pd_event->pd_msg;
+
+ u8 event_type = pd_event->event_type;
+ u8 msg_id = PD_HEADER_ID(pd_msg->msg_hdr);
+ u8 msg_type = PD_HEADER_TYPE(pd_msg->msg_hdr);
+
+ switch (pd_port->pe_state_curr) {
+ case PE_SNK_TRANSITION_SINK:
+ case PE_SRC_TRANSITION_SUPPLY:
+ case PE_SRC_TRANSITION_SUPPLY2:
+ power_change = true;
+ case PE_PRS_SRC_SNK_WAIT_SOURCE_ON:
+ if (pd_event_msg_match(pd_event,
+ PD_EVT_CTRL_MSG, PD_CTRL_PING)) {
+ PE_DBG("Igrone Ping\r\n");
+ return false;
+ }
+ break;
+
+ case PE_SRC_SOFT_RESET:
+ case PE_SRC_SEND_SOFT_RESET:
+ case PE_SNK_SOFT_RESET:
+ case PE_SNK_SEND_SOFT_RESET:
+ case PE_SNK_READY:
+ case PE_SRC_READY:
+ case PE_BIST_TEST_DATA:
+ PE_DBG("Igrone Unknown Event\r\n");
+ return false;
+ };
+
+ PE_INFO("PRL_ERR: %d-%d-%d\r\n", event_type, msg_type, msg_id);
+
+ if (pd_port->during_swap)
+ PE_TRANSIT_HARD_RESET_STATE(pd_port);
+ else if (power_change)
+ PE_TRANSIT_HARD_RESET_STATE(pd_port);
+ else
+ PE_TRANSIT_SEND_SOFT_RESET_STATE(pd_port);
+ return true;
+}
+
+bool pd_process_data_msg_bist(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (!pd_check_pe_state_ready(pd_port))
+ return false;
+
+ if (pd_port->request_v > 5000) {
+ PE_INFO("bist_not_vsafe5v\r\n");
+ return false;
+ }
+
+ switch (BDO_MODE(pd_event->pd_msg->payload[0])) {
+ case BDO_MODE_TEST_DATA:
+ PE_DBG("bist_test\r\n");
+ PE_TRANSIT_STATE(pd_port, PE_BIST_TEST_DATA);
+ pd_noitfy_pe_bist_mode(pd_port, PD_BIST_MODE_TEST_DATA);
+ return true;
+
+ case BDO_MODE_CARRIER2:
+ PE_DBG("bist_cm2\r\n");
+ PE_TRANSIT_STATE(pd_port, PE_BIST_CARRIER_MODE_2);
+ pd_noitfy_pe_bist_mode(pd_port, PD_BIST_MODE_DISABLE);
+ return true;
+
+ default:
+ case BDO_MODE_RECV:
+ case BDO_MODE_TRANSMIT:
+ case BDO_MODE_COUNTERS:
+ case BDO_MODE_CARRIER0:
+ case BDO_MODE_CARRIER1:
+ case BDO_MODE_CARRIER3:
+ case BDO_MODE_EYE:
+ PE_DBG("Unsupport BIST\r\n");
+ pd_noitfy_pe_bist_mode(pd_port, PD_BIST_MODE_DISABLE);
+ return false;
+ }
+
+ return false;
+}
+
+/* DRP (Data Role Swap) */
+
+bool pd_process_ctrl_msg_dr_swap(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool reject;
+
+ if (!pd_check_pe_state_ready(pd_port))
+ return false;
+
+ reject = !(pd_port->dpm_caps & DPM_CAP_LOCAL_DR_DATA);
+
+ if (!reject) {
+ if (pd_port->data_role == PD_ROLE_DFP)
+ reject = pd_port->dpm_caps &
+ DPM_CAP_DR_SWAP_REJECT_AS_UFP;
+ else
+ reject = pd_port->dpm_caps &
+ DPM_CAP_DR_SWAP_REJECT_AS_DFP;
+ }
+
+ if (reject) {
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ return false;
+ }
+ if (pd_port->modal_operation) {
+ PE_TRANSIT_HARD_RESET_STATE(pd_port);
+ } else {
+ pd_port->during_swap = false;
+ pd_port->state_machine = PE_STATE_MACHINE_DR_SWAP;
+
+ PE_TRANSIT_DATA_STATE(pd_port,
+ PE_DRS_UFP_DFP_EVALUATE_DR_SWAP,
+ PE_DRS_DFP_UFP_EVALUATE_DR_SWAP);
+ }
+ return true;
+}
+
+bool pd_process_dpm_msg_dr_swap(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (!(pd_port->dpm_caps & DPM_CAP_LOCAL_DR_DATA))
+ return false;
+
+ if (!pd_check_pe_state_ready(pd_port))
+ return false;
+
+ pd_port->during_swap = false;
+ pd_port->state_machine = PE_STATE_MACHINE_DR_SWAP;
+
+ PE_TRANSIT_DATA_STATE(pd_port,
+ PE_DRS_UFP_DFP_SEND_DR_SWAP,
+ PE_DRS_DFP_UFP_SEND_DR_SWAP);
+
+ return true;
+}
+
+/* DRP (Power Role Swap) */
+
+bool pd_process_ctrl_msg_pr_swap(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool reject;
+
+ if (!pd_check_pe_state_ready(pd_port))
+ return false;
+
+ reject = !(pd_port->dpm_caps & DPM_CAP_LOCAL_DR_POWER);
+
+ if (!reject) {
+ if (pd_port->power_role == PD_ROLE_SOURCE)
+ reject = pd_port->dpm_caps &
+ DPM_CAP_PR_SWAP_REJECT_AS_SNK;
+ else
+ reject = pd_port->dpm_caps &
+ DPM_CAP_PR_SWAP_REJECT_AS_SRC;
+ }
+
+ if (reject) {
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ return false;
+ }
+ pd_port->during_swap = false;
+ pd_port->state_machine = PE_STATE_MACHINE_PR_SWAP;
+
+ PE_TRANSIT_POWER_STATE(pd_port,
+ PE_PRS_SNK_SRC_EVALUATE_PR_SWAP,
+ PE_PRS_SRC_SNK_EVALUATE_PR_SWAP);
+
+ return true;
+}
+
+bool pd_process_dpm_msg_pr_swap(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (!(pd_port->dpm_caps & DPM_CAP_LOCAL_DR_POWER))
+ return false;
+
+ if (!pd_check_pe_state_ready(pd_port))
+ return false;
+
+ pd_port->during_swap = false;
+ pd_port->state_machine = PE_STATE_MACHINE_PR_SWAP;
+
+ PE_TRANSIT_POWER_STATE(pd_port,
+ PE_PRS_SNK_SRC_SEND_SWAP,
+ PE_PRS_SRC_SNK_SEND_SWAP);
+ return true;
+}
+
+/* DRP (Vconn Swap) */
+
+bool pd_process_ctrl_msg_vconn_swap(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (!pd_check_pe_state_ready(pd_port))
+ return false;
+
+ if (!(pd_port->dpm_caps & DPM_CAP_LOCAL_VCONN_SUPPLY)) {
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ return false;
+ }
+ pd_port->state_machine = PE_STATE_MACHINE_VCONN_SWAP;
+ PE_TRANSIT_STATE(pd_port, PE_VCS_EVALUATE_SWAP);
+ return true;
+}
+
+bool pd_process_dpm_msg_vconn_swap(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (!(pd_port->dpm_caps & DPM_CAP_LOCAL_VCONN_SUPPLY))
+ return false;
+
+ if (!pd_check_pe_state_ready(pd_port))
+ return false;
+
+ pd_port->state_machine = PE_STATE_MACHINE_VCONN_SWAP;
+ PE_TRANSIT_STATE(pd_port, PE_VCS_SEND_SWAP);
+ return true;
+}
+
+bool pd_process_recv_hard_reset(
+ pd_port_t *pd_port, pd_event_t *pd_event, u8 hreset_state)
+{
+ PE_TRANSIT_STATE(pd_port, hreset_state);
+ return true;
+}
+
+bool pd_process_dpm_msg_pw_request(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_port->pe_state_curr != PE_SNK_READY)
+ return false;
+
+ PE_TRANSIT_STATE(pd_port, PE_SNK_SELECT_CAPABILITY);
+ return true;
+}
+
+bool pd_process_dpm_msg_bist_cm2(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ u32 bist = BDO_MODE_CARRIER2;
+
+ if (!pd_check_pe_state_ready(pd_port))
+ return false;
+
+ pd_send_data_msg(pd_port, TCPC_TX_SOP, PD_DATA_BIST, 1, &bist);
+ return false;
+}
+
+bool pd_process_dpm_msg_gotomin(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_port->pe_state_curr != PE_SRC_READY)
+ return false;
+
+ if (!(pd_port->dpm_flags & DPM_CAP_LOCAL_GIVE_BACK))
+ return false;
+
+ PE_TRANSIT_STATE(pd_port, PE_SRC_TRANSITION_SUPPLY);
+ return true;
+}
+
+bool pd_process_dpm_msg_softreset(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (!pd_check_pe_state_ready(pd_port))
+ return false;
+
+ PE_TRANSIT_SEND_SOFT_RESET_STATE(pd_port);
+ return true;
+}
+
+bool pd_process_dpm_msg_hardreset(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (!pd_check_pe_state_ready(pd_port))
+ return false;
+
+ PE_TRANSIT_HARD_RESET_STATE(pd_port);
+ return true;
+}
+
+bool pd_process_dpm_msg_get_source_cap(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_port->pe_state_curr) {
+ case PE_SNK_READY:
+ PE_TRANSIT_STATE(pd_port, PE_SNK_GET_SOURCE_CAP);
+ return true;
+
+ case PE_SRC_READY:
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_DR_POWER) {
+ PE_TRANSIT_STATE(pd_port, PE_DR_SRC_GET_SOURCE_CAP);
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+bool pd_process_dpm_msg_get_sink_cap(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_port->pe_state_curr) {
+ case PE_SRC_READY:
+ PE_TRANSIT_STATE(pd_port, PE_SRC_GET_SINK_CAP);
+ return true;
+
+ case PE_SNK_READY:
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_DR_POWER) {
+ PE_TRANSIT_STATE(pd_port, PE_DR_SNK_GET_SINK_CAP);
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+bool pd_process_event_dpm_pd_request(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg_sec) {
+ case PD_DPM_PD_REQUEST_PR_SWAP:
+ ret = pd_process_dpm_msg_pr_swap(pd_port, pd_event);
+ break;
+
+ case PD_DPM_PD_REQUEST_DR_SWAP:
+ ret = pd_process_dpm_msg_dr_swap(pd_port, pd_event);
+ break;
+
+ case PD_DPM_PD_REQUEST_VCONN_SWAP:
+ ret = pd_process_dpm_msg_vconn_swap(pd_port, pd_event);
+ break;
+
+ case PD_DPM_PD_REQUEST_GOTOMIN:
+ ret = pd_process_dpm_msg_gotomin(pd_port, pd_event);
+ break;
+
+ case PD_DPM_PD_REQUEST_SOFTRESET:
+ ret = pd_process_dpm_msg_softreset(pd_port, pd_event);
+ break;
+
+ case PD_DPM_PD_REQUEST_HARDRESET:
+ ret = pd_process_dpm_msg_hardreset(pd_port, pd_event);
+ break;
+
+ case PD_DPM_PD_REQUEST_GET_SOURCE_CAP:
+ ret = pd_process_dpm_msg_get_source_cap(pd_port, pd_event);
+ break;
+
+ case PD_DPM_PD_REQUEST_GET_SINK_CAP:
+ ret = pd_process_dpm_msg_get_sink_cap(pd_port, pd_event);
+ break;
+
+ case PD_DPM_PD_REQUEST_PW_REQUEST:
+ ret = pd_process_dpm_msg_pw_request(pd_port, pd_event);
+ break;
+
+ case PD_DPM_PD_REQUEST_BIST_CM2:
+ ret = pd_process_dpm_msg_bist_cm2(pd_port, pd_event);
+ break;
+
+ default:
+ PE_DBG("Unknown PD_Request\r\n");
+ return false;
+ }
+
+ if (!ret)
+ /* TODO: Notify DPM, Policy Engine Reject this request ... */
+ PE_DBG("Reject DPM PD Request\r\n");
+ return ret;
+}
+
+/* @ true : valid message */
+/* @ false : invalid message, pe should drop the message */
+
+static inline bool pe_is_valid_pd_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_msg_t *pd_msg = pd_event->pd_msg;
+
+ u8 event_type = pd_event->event_type;
+ u8 sop_type = pd_msg->frame_type;
+ u8 msg_id = PD_HEADER_ID(pd_msg->msg_hdr);
+ u8 msg_type = PD_HEADER_TYPE(pd_msg->msg_hdr);
+
+ if (pd_port->pe_state_curr == PE_BIST_TEST_DATA)
+ return false;
+
+ if (event_type == PD_EVT_CTRL_MSG) {
+ switch (msg_type) {
+ /* SofReset always has a MessageID value of zero */
+ case PD_CTRL_SOFT_RESET:
+ if (msg_id != 0) {
+ PE_INFO("Repeat soft_reset\r\n");
+ return false;
+ }
+
+ return true;
+ case PD_CTRL_GOOD_CRC:
+ PE_DBG("Discard_CRC\r\n");
+ return true;
+ }
+ }
+
+ if ((pd_port->msg_id_rx_init[sop_type]) &&
+ (pd_port->msg_id_rx[sop_type] == msg_id)) {
+ PE_INFO("Repeat msg: %c:%d:%d\r\n",
+ (pd_event->event_type == PD_EVT_CTRL_MSG) ? 'C' : 'D',
+ pd_event->msg, msg_id);
+ return false;
+ }
+
+ pd_port->msg_id_rx[sop_type] = msg_id;
+ pd_port->msg_id_rx_init[sop_type] = true;
+
+ return true;
+}
+
+static inline bool pe_is_valid_pd_msg_role(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = true;
+ u8 msg_pr, msg_dr;
+ pd_msg_t *pd_msg = pd_event->pd_msg;
+
+ if (!pd_msg) /* Good-CRC */
+ return true;
+
+ if (pd_msg->frame_type != TCPC_TX_SOP)
+ return true;
+
+ msg_pr = PD_HEADER_PR(pd_msg->msg_hdr);
+ msg_dr = PD_HEADER_DR(pd_msg->msg_hdr);
+
+ /*
+ * The Port Power Role field of a received Message shall not be verified
+ * by the receiver and no error recovery action shall be
+ * taken if it is incorrect.
+ */
+
+ if (msg_pr == pd_port->power_role)
+ PE_DBG("Wrong PR:%d\r\n", msg_pr);
+
+ /*
+ * Should a Type-C Port receive a Message with the Port Data Role field
+ * set to the same Data Role as its current Data Role,
+ * except for the GoodCRC Message,
+ * Type-C error recovery actions as defined
+ * in [USBType-C 1.0] shall be performed.
+ */
+
+ if (msg_dr == pd_port->data_role)
+ PE_INFO("Wrong DR:%d\r\n", msg_dr);
+
+ return ret;
+}
+
+static inline bool pe_translate_pd_msg_event(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_msg_t *pd_msg;
+
+ if (pd_event->event_type != PD_EVT_PD_MSG)
+ return true;
+
+ pd_msg = pd_event->pd_msg;
+
+ if (PD_HEADER_CNT(pd_msg->msg_hdr))
+ pd_event->event_type = PD_EVT_DATA_MSG;
+ else
+ pd_event->event_type = PD_EVT_CTRL_MSG;
+
+ pd_event->msg = PD_HEADER_TYPE(pd_msg->msg_hdr);
+
+ return pe_is_valid_pd_msg(pd_port, pd_event);
+}
+
+static inline bool pe_exit_idle_state(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+#ifdef CONFIG_USB_PD_CUSTOM_DBGACC
+ pd_port->custom_dbgacc = false;
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
+
+ switch (pd_event->msg_sec) {
+ case TYPEC_ATTACHED_SNK:
+ pd_init_role(pd_port,
+ PD_ROLE_SINK, PD_ROLE_UFP, PD_ROLE_VCONN_OFF);
+ break;
+
+ case TYPEC_ATTACHED_SRC:
+ pd_init_role(pd_port,
+ PD_ROLE_SOURCE, PD_ROLE_DFP, PD_ROLE_VCONN_ON);
+ break;
+
+#ifdef CONFIG_USB_PD_CUSTOM_DBGACC
+ case TYPEC_ATTACHED_DBGACC_SNK:
+ pd_port->custom_dbgacc = true;
+ pd_init_role(pd_port,
+ PD_ROLE_SINK, PD_ROLE_UFP, PD_ROLE_VCONN_OFF);
+ break;
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
+
+ default:
+ return false;
+ }
+
+ pd_port->cap_counter = 0;
+ pd_port->discover_id_counter = 0;
+ pd_port->hard_reset_counter = 0;
+ pd_port->get_snk_cap_count = 0;
+ pd_port->get_src_cap_count = 0;
+
+ pd_port->pe_ready = 0;
+ pd_port->pd_connected = 0;
+ pd_port->pd_prev_connected = 0;
+ pd_port->reset_vdm_state = 0;
+ pd_port->power_cable_present = 0;
+
+ pd_port->explicit_contract = false;
+ pd_port->invalid_contract = false;
+
+ pd_port->modal_operation = false;
+ pd_port->during_swap = false;
+ pd_port->dpm_ack_immediately = false;
+
+ pd_port->remote_src_cap.nr = 0;
+ pd_port->remote_snk_cap.nr = 0;
+
+ memset(pd_port->cable_vdos, 0, sizeof(uint32_t) * VDO_MAX_SIZE);
+
+ pd_dpm_notify_pe_startup(pd_port);
+ return true;
+}
+
+static inline bool pe_is_trap_in_idle_state(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool trap = true;
+
+ switch (pd_port->pe_state_curr) {
+ case PE_IDLE1:
+ if (pd_event_msg_match(pd_event, PD_EVT_PE_MSG, PD_PE_IDLE))
+ return false;
+ pd_try_put_pe_idle_event(pd_port);
+ case PE_IDLE2:
+ break;
+
+ default:
+ return false;
+ }
+
+ if (pd_event->event_type == PD_EVT_HW_MSG) {
+ switch (pd_event->msg) {
+ case PD_HW_CC_ATTACHED:
+ trap = false;
+ break;
+
+ case PD_HW_CC_DETACHED:
+ pd_notify_pe_idle(pd_port);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!trap)
+ trap = !pe_exit_idle_state(pd_port, pd_event);
+ return trap;
+}
+
+bool pd_process_event(pd_port_t *pd_port, pd_event_t *pd_event, bool vdm_evt)
+{
+ bool ret = false;
+
+ if (pe_is_trap_in_idle_state(pd_port, pd_event)) {
+ PE_DBG("Trap in idle state, Igrone All MSG\r\n");
+ return false;
+ }
+
+ if (!pe_translate_pd_msg_event(pd_port, pd_event))
+ return false;
+
+#if PE_EVT_INFO_VDM_DIS
+ if (!vdm_evt)
+#endif
+ print_event(pd_port, pd_event);
+
+ switch (pd_event->event_type) {
+ case PD_EVT_CTRL_MSG:
+ case PD_EVT_DATA_MSG:
+ if (!pe_is_valid_pd_msg_role(pd_port, pd_event)) {
+ PE_TRANSIT_STATE(pd_port, PE_ERROR_RECOVERY);
+ return true;
+ }
+ break;
+ }
+
+ if (vdm_evt)
+ return pd_process_event_vdm(pd_port, pd_event);
+
+#ifdef CONFIG_USB_PD_CUSTOM_DBGACC
+ if (pd_port->custom_dbgacc)
+ return pd_process_event_dbg(pd_port, pd_event);
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
+
+ if ((pd_event->event_type == PD_EVT_CTRL_MSG) &&
+ (pd_event->msg != PD_CTRL_GOOD_CRC) &&
+ (pd_event->pd_msg->frame_type != TCPC_TX_SOP)) {
+ PE_DBG("Igrone not SOP Ctrl Msg\r\n");
+ return false;
+ }
+
+ if (pd_event_msg_match(pd_event, PD_EVT_DPM_MSG, PD_DPM_PD_REQUEST))
+ return pd_process_event_dpm_pd_request(pd_port, pd_event);
+
+ switch (pd_port->state_machine) {
+ case PE_STATE_MACHINE_DR_SWAP:
+ ret = pd_process_event_drs(pd_port, pd_event);
+ break;
+ case PE_STATE_MACHINE_PR_SWAP:
+ ret = pd_process_event_prs(pd_port, pd_event);
+ break;
+ case PE_STATE_MACHINE_VCONN_SWAP:
+ ret = pd_process_event_vcs(pd_port, pd_event);
+ break;
+ }
+
+ if (ret)
+ return true;
+
+ if (pd_port->power_role == PD_ROLE_SINK)
+ ret = pd_process_event_snk(pd_port, pd_event);
+ else
+ ret = pd_process_event_src(pd_port, pd_event);
+
+ return ret;
+}
diff --git a/drivers/usb/pd/richtek/pd_process_evt_dbg.c b/drivers/usb/pd/richtek/pd_process_evt_dbg.c
new file mode 100644
index 000000000000..162a597dfca1
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_process_evt_dbg.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Process Event For DBGACC
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_event.h>
+#include <linux/hisi/usb/pd/richtek/pd_process_evt.h>
+
+#ifdef CONFIG_USB_PD_CUSTOM_DBGACC
+
+DECL_PE_STATE_TRANSITION(PD_PE_MSG_IDLE) = {
+ { PE_IDLE1, PE_IDLE2 },
+};
+
+DECL_PE_STATE_REACTION(PD_PE_MSG_IDLE);
+
+bool pd_process_event_dbg(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_event->event_type == PD_EVT_HW_MSG) {
+ switch (pd_event->msg) {
+ case PD_HW_CC_DETACHED:
+ PE_TRANSIT_STATE(pd_port, PE_IDLE1);
+ return true;
+
+ case PD_HW_CC_ATTACHED:
+ PE_TRANSIT_STATE(pd_port, PE_DBG_READY);
+ return true;
+ }
+ }
+
+ if (pd_event_msg_match(pd_event, PD_EVT_PE_MSG, PD_PE_IDLE))
+ return PE_MAKE_STATE_TRANSIT(PD_PE_MSG_IDLE);
+
+ return false;
+}
+
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
diff --git a/drivers/usb/pd/richtek/pd_process_evt_drs.c b/drivers/usb/pd/richtek/pd_process_evt_drs.c
new file mode 100644
index 000000000000..c5e68336fdc7
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_process_evt_drs.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Process Event For DRS
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_event.h>
+#include <linux/hisi/usb/pd/richtek/pd_process_evt.h>
+
+/* PD Control MSG reactions */
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_ACCEPT) = {
+ { PE_DRS_DFP_UFP_SEND_DR_SWAP, PE_DRS_DFP_UFP_CHANGE_TO_UFP },
+ { PE_DRS_UFP_DFP_SEND_DR_SWAP, PE_DRS_UFP_DFP_CHANGE_TO_DFP },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_ACCEPT);
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_REJECT_WAIT) = {
+ { PE_DRS_DFP_UFP_SEND_DR_SWAP, PE_VIRT_READY },
+ { PE_DRS_UFP_DFP_SEND_DR_SWAP, PE_VIRT_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_REJECT_WAIT);
+
+/* DPM Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_ACK) = {
+ { PE_DRS_DFP_UFP_EVALUATE_DR_SWAP, PE_DRS_DFP_UFP_ACCEPT_DR_SWAP },
+ { PE_DRS_UFP_DFP_EVALUATE_DR_SWAP, PE_DRS_UFP_DFP_ACCEPT_DR_SWAP },
+ { PE_DRS_DFP_UFP_CHANGE_TO_UFP, PE_VIRT_READY },
+ { PE_DRS_UFP_DFP_CHANGE_TO_DFP, PE_VIRT_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_ACK);
+
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_NAK) = {
+ { PE_DRS_DFP_UFP_EVALUATE_DR_SWAP, PE_DRS_DFP_UFP_REJECT_DR_SWAP },
+ { PE_DRS_UFP_DFP_EVALUATE_DR_SWAP, PE_DRS_UFP_DFP_REJECT_DR_SWAP },
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_NAK);
+
+/* Timer Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_SENDER_RESPONSE) = {
+ { PE_DRS_DFP_UFP_SEND_DR_SWAP, PE_VIRT_READY },
+ { PE_DRS_UFP_DFP_SEND_DR_SWAP, PE_VIRT_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_SENDER_RESPONSE);
+
+/*
+ * [BLOCK] Porcess PD Ctrl MSG
+ */
+
+static inline bool pd_process_ctrl_msg_good_crc(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_port->pe_state_curr) {
+ case PE_DRS_DFP_UFP_REJECT_DR_SWAP:
+ case PE_DRS_UFP_DFP_REJECT_DR_SWAP:
+ PE_TRANSIT_READY_STATE(pd_port);
+ return true;
+
+ case PE_DRS_DFP_UFP_ACCEPT_DR_SWAP:
+ PE_TRANSIT_STATE(pd_port, PE_DRS_DFP_UFP_CHANGE_TO_UFP);
+ return true;
+
+ case PE_DRS_UFP_DFP_ACCEPT_DR_SWAP:
+ PE_TRANSIT_STATE(pd_port, PE_DRS_UFP_DFP_CHANGE_TO_DFP);
+ return true;
+
+ case PE_DRS_DFP_UFP_SEND_DR_SWAP:
+ case PE_DRS_UFP_DFP_SEND_DR_SWAP:
+ pd_enable_timer(pd_port, PD_TIMER_SENDER_RESPONSE);
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+static inline bool pd_process_ctrl_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_event->msg) {
+ case PD_CTRL_GOOD_CRC:
+ return pd_process_ctrl_msg_good_crc(pd_port, pd_event);
+
+ case PD_CTRL_ACCEPT:
+ return PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_ACCEPT);
+
+ case PD_CTRL_WAIT:
+ case PD_CTRL_REJECT:
+ return PE_MAKE_STATE_TRANSIT_VIRT(PD_CTRL_MSG_REJECT_WAIT);
+
+ default:
+ return false;
+ }
+}
+
+/*
+ * [BLOCK] Porcess DPM MSG
+ */
+
+static inline bool pd_process_dpm_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_DPM_ACK:
+ ret = PE_MAKE_STATE_TRANSIT_VIRT(PD_DPM_MSG_ACK);
+ break;
+ case PD_DPM_NAK:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DPM_MSG_NAK);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess Timer MSG
+ */
+
+static inline bool pd_process_timer_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_TIMER_SENDER_RESPONSE:
+ ret = PE_MAKE_STATE_TRANSIT_VIRT(PD_TIMER_SENDER_RESPONSE);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Process Policy Engine's DRS Message
+ */
+
+bool pd_process_event_drs(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_event->event_type) {
+ case PD_EVT_CTRL_MSG:
+ return pd_process_ctrl_msg(pd_port, pd_event);
+
+ case PD_EVT_DPM_MSG:
+ return pd_process_dpm_msg(pd_port, pd_event);
+
+ case PD_EVT_TIMER_MSG:
+ return pd_process_timer_msg(pd_port, pd_event);
+
+ default:
+ return false;
+ }
+}
diff --git a/drivers/usb/pd/richtek/pd_process_evt_prs.c b/drivers/usb/pd/richtek/pd_process_evt_prs.c
new file mode 100644
index 000000000000..ed05f66e5b34
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_process_evt_prs.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Process Event For PRS
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_event.h>
+#include <linux/hisi/usb/pd/richtek/pd_process_evt.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+
+/* PD Control MSG reactions */
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_GOOD_CRC) = {
+ { PE_PRS_SRC_SNK_ACCEPT_PR_SWAP, PE_PRS_SRC_SNK_TRANSITION_TO_OFF },
+ { PE_PRS_SRC_SNK_REJECT_PR_SWAP, PE_SRC_READY },
+
+ { PE_PRS_SNK_SRC_ACCEPT_PR_SWAP, PE_PRS_SNK_SRC_TRANSITION_TO_OFF },
+ { PE_PRS_SNK_SRC_REJECT_SWAP, PE_SNK_READY },
+
+ /* VBUS-ON & PS_RDY SENT */
+ { PE_PRS_SNK_SRC_SOURCE_ON, PE_SRC_STARTUP },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_GOOD_CRC);
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_ACCEPT) = {
+ { PE_PRS_SRC_SNK_SEND_SWAP, PE_PRS_SRC_SNK_TRANSITION_TO_OFF },
+ { PE_PRS_SNK_SRC_SEND_SWAP, PE_PRS_SNK_SRC_TRANSITION_TO_OFF },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_ACCEPT);
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_REJECT_WAIT) = {
+ { PE_PRS_SRC_SNK_SEND_SWAP, PE_SRC_READY },
+ { PE_PRS_SNK_SRC_SEND_SWAP, PE_SNK_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_REJECT_WAIT);
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_PS_RDY) = {
+ { PE_PRS_SRC_SNK_WAIT_SOURCE_ON, PE_SNK_STARTUP },
+ { PE_PRS_SNK_SRC_TRANSITION_TO_OFF, PE_PRS_SNK_SRC_ASSERT_RP },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_PS_RDY);
+
+/* DPM Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_ACK) = {
+ { PE_PRS_SRC_SNK_EVALUATE_PR_SWAP, PE_PRS_SRC_SNK_ACCEPT_PR_SWAP },
+ { PE_PRS_SNK_SRC_EVALUATE_PR_SWAP, PE_PRS_SNK_SRC_ACCEPT_PR_SWAP },
+
+ { PE_PRS_SRC_SNK_ASSERT_RD, PE_PRS_SRC_SNK_WAIT_SOURCE_ON },
+ { PE_PRS_SNK_SRC_ASSERT_RP, PE_PRS_SNK_SRC_SOURCE_ON },
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_ACK);
+
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_NAK) = {
+ { PE_PRS_SRC_SNK_EVALUATE_PR_SWAP, PE_PRS_SRC_SNK_REJECT_PR_SWAP },
+ { PE_PRS_SNK_SRC_EVALUATE_PR_SWAP, PE_PRS_SNK_SRC_REJECT_SWAP },
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_NAK);
+
+/* HW Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_HW_TX_FAILED) = {
+ { PE_PRS_SRC_SNK_WAIT_SOURCE_ON, PE_SNK_HARD_RESET },
+ { PE_PRS_SNK_SRC_SOURCE_ON, PE_SRC_HARD_RESET },
+};
+
+DECL_PE_STATE_REACTION(PD_HW_TX_FAILED);
+
+DECL_PE_STATE_TRANSITION(PD_HW_VBUS_SAFE0V) = {
+ { PE_PRS_SRC_SNK_TRANSITION_TO_OFF, PE_PRS_SRC_SNK_ASSERT_RD },
+};
+
+DECL_PE_STATE_REACTION(PD_HW_VBUS_SAFE0V);
+
+/* Timer Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_SENDER_RESPONSE) = {
+ { PE_PRS_SRC_SNK_SEND_SWAP, PE_SRC_READY },
+ { PE_PRS_SNK_SRC_SEND_SWAP, PE_SNK_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_SENDER_RESPONSE);
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_PS_SOURCE_ON) = {
+ { PE_PRS_SRC_SNK_WAIT_SOURCE_ON, PE_SNK_HARD_RESET },
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_PS_SOURCE_ON);
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_PS_SOURCE_OFF) = {
+ { PE_PRS_SNK_SRC_TRANSITION_TO_OFF, PE_SNK_HARD_RESET },
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_PS_SOURCE_OFF);
+
+/*
+ * [BLOCK] Porcess PD Ctrl MSG
+ */
+
+static inline bool pd_process_ctrl_msg_good_crc(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_port->pe_state_curr) {
+ case PE_PRS_SRC_SNK_WAIT_SOURCE_ON:
+ pd_enable_timer(pd_port, PD_TIMER_PS_SOURCE_ON);
+ pd_unlock_msg_output(pd_port); /* for tSRCTransition */
+ return false;
+
+ case PE_PRS_SRC_SNK_SEND_SWAP:
+ case PE_PRS_SNK_SRC_SEND_SWAP:
+ pd_enable_timer(pd_port, PD_TIMER_SENDER_RESPONSE);
+ return false;
+
+ default:
+ return PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_GOOD_CRC);
+ }
+}
+
+static inline bool pd_process_ctrl_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_event->msg) {
+ case PD_CTRL_GOOD_CRC:
+ return pd_process_ctrl_msg_good_crc(pd_port, pd_event);
+
+ case PD_CTRL_ACCEPT:
+ return PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_ACCEPT);
+
+ case PD_CTRL_WAIT:
+ case PD_CTRL_REJECT:
+ pd_notify_pe_cancel_pr_swap(pd_port);
+ return PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_REJECT_WAIT);
+
+ case PD_CTRL_PS_RDY:
+ return PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_PS_RDY);
+
+ default:
+ return false;
+ }
+}
+
+/*
+ * [BLOCK] Porcess DPM MSG
+ */
+
+static inline bool pd_process_dpm_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_DPM_ACK:
+ ret = PE_MAKE_STATE_TRANSIT_VIRT(PD_DPM_MSG_ACK);
+ break;
+ case PD_DPM_NAK:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DPM_MSG_NAK);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess HW MSG
+ */
+
+static inline bool pd_process_hw_msg_vbus_present(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_port->pe_state_curr == PE_PRS_SNK_SRC_SOURCE_ON)
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_PS_RDY);
+
+ return false;
+}
+
+static inline bool pd_process_hw_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_event->msg) {
+ case PD_HW_VBUS_PRESENT:
+ return pd_process_hw_msg_vbus_present(pd_port, pd_event);
+
+ case PD_HW_TX_FAILED:
+ return PE_MAKE_STATE_TRANSIT(PD_HW_TX_FAILED);
+
+ case PD_HW_VBUS_SAFE0V:
+ return PE_MAKE_STATE_TRANSIT(PD_HW_VBUS_SAFE0V);
+
+ default:
+ return false;
+ }
+}
+
+/*
+ * [BLOCK] Porcess Timer MSG
+ */
+
+static inline bool pd_process_timer_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_event->msg) {
+ case PD_TIMER_SENDER_RESPONSE:
+ pd_notify_pe_cancel_pr_swap(pd_port);
+ return PE_MAKE_STATE_TRANSIT(PD_TIMER_SENDER_RESPONSE);
+
+ case PD_TIMER_PS_SOURCE_ON:
+ return PE_MAKE_STATE_TRANSIT(PD_TIMER_PS_SOURCE_ON);
+
+ case PD_TIMER_PS_SOURCE_OFF:
+ return PE_MAKE_STATE_TRANSIT(PD_TIMER_PS_SOURCE_OFF);
+
+ case PD_TIMER_SOURCE_TRANSITION:
+ pd_dpm_prs_enable_power_source(pd_port, false);
+ return false;
+ default:
+ return false;
+ }
+}
+
+/*
+ * [BLOCK] Process Policy Engine's PRS Message
+ */
+
+bool pd_process_event_prs(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_event->event_type) {
+ case PD_EVT_CTRL_MSG:
+ return pd_process_ctrl_msg(pd_port, pd_event);
+
+ case PD_EVT_DPM_MSG:
+ return pd_process_dpm_msg(pd_port, pd_event);
+
+ case PD_EVT_HW_MSG:
+ return pd_process_hw_msg(pd_port, pd_event);
+
+ case PD_EVT_TIMER_MSG:
+ return pd_process_timer_msg(pd_port, pd_event);
+
+ default:
+ return false;
+ }
+}
diff --git a/drivers/usb/pd/richtek/pd_process_evt_snk.c b/drivers/usb/pd/richtek/pd_process_evt_snk.c
new file mode 100644
index 000000000000..a9102f3d3648
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_process_evt_snk.c
@@ -0,0 +1,514 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Process Event For SNK
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_event.h>
+#include <linux/hisi/usb/pd/richtek/pd_process_evt.h>
+
+/* PD Control MSG reactions */
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_GOOD_CRC) = {
+ /* sink */
+ { PE_SNK_GIVE_SINK_CAP, PE_SNK_READY },
+ { PE_SNK_GET_SOURCE_CAP, PE_SNK_READY },
+
+ { PE_SNK_SOFT_RESET, PE_SNK_WAIT_FOR_CAPABILITIES },
+
+ /* dual */
+ { PE_DR_SNK_GIVE_SOURCE_CAP, PE_SNK_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_GOOD_CRC);
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_GOTO_MIN) = {
+ { PE_SNK_READY, PE_SNK_TRANSITION_SINK },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_GOTO_MIN);
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_ACCEPT) = {
+ { PE_SNK_SELECT_CAPABILITY, PE_SNK_TRANSITION_SINK },
+ { PE_SNK_SEND_SOFT_RESET, PE_SNK_WAIT_FOR_CAPABILITIES },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_ACCEPT);
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_PS_RDY) = {
+ { PE_SNK_TRANSITION_SINK, PE_SNK_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_PS_RDY);
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_GET_SINK_CAP) = {
+ { PE_SNK_READY, PE_SNK_GIVE_SINK_CAP },
+
+ { PE_SNK_GET_SOURCE_CAP, PE_SNK_GIVE_SINK_CAP },
+ { PE_DR_SNK_GET_SINK_CAP, PE_SNK_GIVE_SINK_CAP },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_GET_SINK_CAP);
+
+/* PD Data MSG reactions */
+
+DECL_PE_STATE_TRANSITION(PD_DATA_MSG_SOURCE_CAP) = {
+ { PE_SNK_WAIT_FOR_CAPABILITIES, PE_SNK_EVALUATE_CAPABILITY },
+ { PE_SNK_READY, PE_SNK_EVALUATE_CAPABILITY },
+
+ /* PR-Swap issue (Check it later) */
+ { PE_SNK_STARTUP, PE_SNK_EVALUATE_CAPABILITY },
+ { PE_SNK_DISCOVERY, PE_SNK_EVALUATE_CAPABILITY },
+};
+
+DECL_PE_STATE_REACTION(PD_DATA_MSG_SOURCE_CAP);
+
+DECL_PE_STATE_TRANSITION(PD_DATA_MSG_SINK_CAP) = {
+ { PE_DR_SNK_GET_SINK_CAP, PE_SNK_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_DATA_MSG_SINK_CAP);
+
+/* DPM Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_ACK) = {
+ { PE_SNK_EVALUATE_CAPABILITY, PE_SNK_SELECT_CAPABILITY },
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_ACK);
+
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_NAK) = {
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_NAK);
+
+/* HW Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_HW_MSG_VBUS_PRESENT) = {
+ { PE_SNK_DISCOVERY, PE_SNK_WAIT_FOR_CAPABILITIES},
+};
+
+DECL_PE_STATE_REACTION(PD_HW_MSG_VBUS_PRESENT);
+
+DECL_PE_STATE_TRANSITION(PD_HW_MSG_VBUS_ABSENT) = {
+ { PE_SNK_STARTUP, PE_SNK_DISCOVERY },
+};
+
+DECL_PE_STATE_REACTION(PD_HW_MSG_VBUS_ABSENT);
+
+DECL_PE_STATE_TRANSITION(PD_HW_MSG_TX_FAILED) = {
+ { PE_SNK_SOFT_RESET, PE_SNK_HARD_RESET },
+ { PE_SNK_SEND_SOFT_RESET, PE_SNK_HARD_RESET },
+};
+
+DECL_PE_STATE_REACTION(PD_HW_MSG_TX_FAILED);
+
+/* PE Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_PE_MSG_HARD_RESET_COMPLETED) = {
+ { PE_SNK_HARD_RESET, PE_SNK_TRANSITION_TO_DEFAULT },
+};
+
+DECL_PE_STATE_REACTION(PD_PE_MSG_HARD_RESET_COMPLETED);
+
+DECL_PE_STATE_TRANSITION(PD_PE_MSG_RESET_PRL_COMPLETED) = {
+ { PE_SNK_STARTUP, PE_SNK_DISCOVERY },
+};
+
+DECL_PE_STATE_REACTION(PD_PE_MSG_RESET_PRL_COMPLETED);
+
+DECL_PE_STATE_TRANSITION(PD_PE_MSG_POWER_ROLE_AT_DEFAULT) = {
+ { PE_SNK_TRANSITION_TO_DEFAULT, PE_SNK_STARTUP },
+};
+
+DECL_PE_STATE_REACTION(PD_PE_MSG_POWER_ROLE_AT_DEFAULT);
+DECL_PE_STATE_TRANSITION(PD_PE_MSG_IDLE) = {
+ { PE_IDLE1, PE_IDLE2 },
+};
+
+DECL_PE_STATE_REACTION(PD_PE_MSG_IDLE);
+
+/* Timer Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_BIST_CONT_MODE) = {
+ { PE_BIST_CARRIER_MODE_2, PE_SNK_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_BIST_CONT_MODE);
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_SENDER_RESPONSE) = {
+ { PE_SNK_SELECT_CAPABILITY, PE_SNK_HARD_RESET },
+ { PE_SNK_SEND_SOFT_RESET, PE_SNK_HARD_RESET },
+
+ { PE_DR_SNK_GET_SINK_CAP, PE_SNK_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_SENDER_RESPONSE);
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_SINK_REQUEST) = {
+ { PE_SNK_READY, PE_SNK_SELECT_CAPABILITY },
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_SINK_REQUEST);
+
+/*
+ * [BLOCK] Porcess Ctrl MSG
+ */
+
+static inline bool pd_process_ctrl_msg_good_crc(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_port->pe_state_curr) {
+ case PE_SNK_SELECT_CAPABILITY:
+ case PE_SNK_SEND_SOFT_RESET:
+ case PE_DR_SNK_GET_SINK_CAP:
+ pd_enable_timer(pd_port, PD_TIMER_SENDER_RESPONSE);
+ return false;
+
+ default:
+ return PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_GOOD_CRC);
+ }
+}
+
+static inline bool pd_process_ctrl_msg_get_source_cap(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_port->pe_state_curr) {
+ case PE_SNK_READY:
+ case PE_DR_SNK_GET_SINK_CAP:
+ case PE_SNK_GET_SOURCE_CAP:
+ break;
+
+ default:
+ return false;
+ }
+
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_DR_POWER) {
+ PE_TRANSIT_STATE(pd_port, PE_DR_SNK_GIVE_SOURCE_CAP);
+ return true;
+ }
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ return false;
+}
+
+static inline bool pd_process_ctrl_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_CTRL_GOOD_CRC:
+ return pd_process_ctrl_msg_good_crc(pd_port, pd_event);
+
+ case PD_CTRL_GOTO_MIN:
+ ret = PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_GOTO_MIN);
+ break;
+
+ case PD_CTRL_ACCEPT:
+ ret = PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_ACCEPT);
+ break;
+
+ case PD_CTRL_PING:
+ pd_notify_pe_recv_ping_event(pd_port);
+ break;
+
+ case PD_CTRL_PS_RDY:
+ ret = PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_PS_RDY);
+ break;
+
+ case PD_CTRL_GET_SOURCE_CAP:
+ ret = pd_process_ctrl_msg_get_source_cap(pd_port, pd_event);
+ break;
+
+ case PD_CTRL_GET_SINK_CAP:
+ ret = PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_GET_SINK_CAP);
+ break;
+
+ case PD_CTRL_REJECT:
+ if (pd_port->pe_state_curr == PE_DR_SNK_GET_SINK_CAP) {
+ PE_TRANSIT_STATE(pd_port, PE_SNK_READY);
+ return true;
+ }
+ /* no break!*/
+
+ case PD_CTRL_WAIT:
+ if (pd_port->pe_state_curr == PE_SNK_SELECT_CAPABILITY) {
+ if (pd_port->explicit_contract)
+ PE_TRANSIT_STATE(pd_port, PE_SNK_READY);
+ else
+ PE_TRANSIT_STATE(pd_port,
+ PE_SNK_WAIT_FOR_CAPABILITIES);
+
+ return true;
+ }
+ break;
+
+ /* Swap */
+ case PD_CTRL_DR_SWAP:
+ ret = pd_process_ctrl_msg_dr_swap(pd_port, pd_event);
+ break;
+
+ case PD_CTRL_PR_SWAP:
+ ret = pd_process_ctrl_msg_pr_swap(pd_port, pd_event);
+ break;
+
+ case PD_CTRL_VCONN_SWAP:
+ ret = pd_process_ctrl_msg_vconn_swap(pd_port, pd_event);
+ break;
+
+ /* SoftReset */
+ case PD_CTRL_SOFT_RESET:
+ if (!pd_port->during_swap) {
+ PE_TRANSIT_STATE(pd_port, PE_SNK_SOFT_RESET);
+ return true;
+ }
+ break;
+ }
+
+ if (!ret)
+ ret = pd_process_protocol_error(pd_port, pd_event);
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess Data MSG
+ */
+
+static inline bool pd_process_data_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_DATA_SOURCE_CAP:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DATA_MSG_SOURCE_CAP);
+ break;
+
+ case PD_DATA_SINK_CAP:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DATA_MSG_SINK_CAP);
+ break;
+
+ case PD_DATA_BIST:
+ ret = pd_process_data_msg_bist(pd_port, pd_event);
+ break;
+
+ case PD_DATA_REQUEST:
+ case PD_DATA_VENDOR_DEF:
+ break;
+ }
+
+ if (!ret)
+ ret = pd_process_protocol_error(pd_port, pd_event);
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess DPM MSG
+ */
+
+static inline bool pd_process_dpm_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_DPM_ACK:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DPM_MSG_ACK);
+ break;
+ case PD_DPM_NAK:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DPM_MSG_NAK);
+ break;
+ case PD_DPM_ERROR_RECOVERY:
+ PE_TRANSIT_STATE(pd_port, PE_ERROR_RECOVERY);
+ return true;
+ }
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess HW MSG
+ */
+
+static inline bool pd_process_hw_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_HW_CC_DETACHED:
+ PE_TRANSIT_STATE(pd_port, PE_IDLE1);
+ return true;
+
+ case PD_HW_CC_ATTACHED:
+ PE_TRANSIT_STATE(pd_port, PE_SNK_STARTUP);
+ return true;
+
+ case PD_HW_RECV_HARD_RESET:
+ ret = pd_process_recv_hard_reset(
+ pd_port, pd_event, PE_SNK_TRANSITION_TO_DEFAULT);
+ break;
+
+ case PD_HW_VBUS_PRESENT:
+ ret = PE_MAKE_STATE_TRANSIT(PD_HW_MSG_VBUS_PRESENT);
+ break;
+
+ case PD_HW_VBUS_ABSENT:
+ ret = PE_MAKE_STATE_TRANSIT(PD_HW_MSG_VBUS_ABSENT);
+ break;
+
+ case PD_HW_TX_FAILED:
+ ret = PE_MAKE_STATE_TRANSIT_FORCE(
+ PD_HW_MSG_TX_FAILED, PE_SNK_SEND_SOFT_RESET);
+ break;
+ };
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess PE MSG
+ */
+
+static inline bool pd_process_pe_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_PE_RESET_PRL_COMPLETED:
+ ret = PE_MAKE_STATE_TRANSIT(PD_PE_MSG_RESET_PRL_COMPLETED);
+ break;
+
+ case PD_PE_HARD_RESET_COMPLETED:
+ ret = PE_MAKE_STATE_TRANSIT(PD_PE_MSG_HARD_RESET_COMPLETED);
+ break;
+
+ case PD_PE_POWER_ROLE_AT_DEFAULT:
+ ret = PE_MAKE_STATE_TRANSIT(PD_PE_MSG_POWER_ROLE_AT_DEFAULT);
+ break;
+
+ case PD_PE_IDLE:
+ ret = PE_MAKE_STATE_TRANSIT(PD_PE_MSG_IDLE);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess Timer MSG
+ */
+
+static inline void pd_report_typec_only_charger(pd_port_t *pd_port)
+{
+ /* TODO: pd_set_rx_enable(pd_port, PD_RX_CAP_PE_DISABLE);*/
+ PE_INFO("TYPE-C Only Charger!\r\n");
+ pd_dpm_sink_vbus(pd_port, true);
+ pd_update_connect_state(pd_port, PD_CONNECT_TYPEC_ONLY);
+}
+
+static inline bool pd_process_timer_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_TIMER_BIST_CONT_MODE:
+ ret = PE_MAKE_STATE_TRANSIT(PD_TIMER_BIST_CONT_MODE);
+ break;
+
+ case PD_TIMER_SINK_REQUEST:
+ ret = PE_MAKE_STATE_TRANSIT(PD_TIMER_SINK_REQUEST);
+ break;
+
+#ifndef CONFIG_USB_PD_DBG_IGRONE_TIMEOUT
+ case PD_TIMER_SENDER_RESPONSE:
+ ret = PE_MAKE_STATE_TRANSIT(PD_TIMER_SENDER_RESPONSE);
+ break;
+
+ case PD_TIMER_SINK_WAIT_CAP:
+ case PD_TIMER_PS_TRANSITION:
+ if (pd_port->hard_reset_counter <= PD_HARD_RESET_COUNT) {
+ PE_TRANSIT_STATE(pd_port, PE_SNK_HARD_RESET);
+ return true;
+ }
+ break;
+
+#ifdef CONFIG_USB_PD_FAST_RESP_TYPEC_SRC
+ case PD_TIMER_SRC_RECOVER:
+ if (pd_port->pe_state_curr == PE_SNK_STARTUP) {
+ pd_disable_timer(pd_port, PD_TIMER_NO_RESPONSE);
+ pd_report_typec_only_charger(pd_port);
+ }
+ break;
+#endif /* CONFIG_USB_PD_FAST_RESP_TYPEC_SRC */
+
+ case PD_TIMER_NO_RESPONSE:
+ if (!pd_dpm_check_vbus_valid(pd_port)) {
+ PE_DBG("NoResp&VBUS=0\r\n");
+ PE_TRANSIT_STATE(pd_port, PE_ERROR_RECOVERY);
+ ret = true;
+ } else if (pd_port->hard_reset_counter <= PD_HARD_RESET_COUNT) {
+ PE_TRANSIT_STATE(pd_port, PE_SNK_HARD_RESET);
+ ret = true;
+ } else if (pd_port->pd_prev_connected) {
+ PE_TRANSIT_STATE(pd_port, PE_ERROR_RECOVERY);
+ ret = true;
+ } else {
+ pd_report_typec_only_charger(pd_port);
+ }
+ break;
+#endif
+
+#ifdef CONFIG_USB_PD_DFP_READY_DISCOVER_ID
+ case PD_TIMER_DISCOVER_ID:
+ vdm_put_dpm_discover_cable_event(pd_port);
+ break;
+#endif
+ }
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Process Policy Engine's SNK Message
+ */
+
+bool pd_process_event_snk(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_event->event_type) {
+ case PD_EVT_CTRL_MSG:
+ return pd_process_ctrl_msg(pd_port, pd_event);
+
+ case PD_EVT_DATA_MSG:
+ return pd_process_data_msg(pd_port, pd_event);
+
+ case PD_EVT_DPM_MSG:
+ return pd_process_dpm_msg(pd_port, pd_event);
+
+ case PD_EVT_HW_MSG:
+ return pd_process_hw_msg(pd_port, pd_event);
+
+ case PD_EVT_PE_MSG:
+ return pd_process_pe_msg(pd_port, pd_event);
+
+ case PD_EVT_TIMER_MSG:
+ return pd_process_timer_msg(pd_port, pd_event);
+
+ default:
+ return false;
+ }
+}
diff --git a/drivers/usb/pd/richtek/pd_process_evt_src.c b/drivers/usb/pd/richtek/pd_process_evt_src.c
new file mode 100644
index 000000000000..8c307ee1427e
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_process_evt_src.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Process Event For SRC
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_event.h>
+#include <linux/hisi/usb/pd/richtek/pd_process_evt.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+
+/* PD Control MSG reactions */
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_GOOD_CRC) = {
+ { PE_SRC_TRANSITION_SUPPLY2, PE_SRC_READY },
+ { PE_SRC_GIVE_SOURCE_CAP, PE_SRC_READY },
+ { PE_SRC_SOFT_RESET, PE_SRC_SEND_CAPABILITIES },
+
+ { PE_DR_SRC_GIVE_SINK_CAP, PE_SRC_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_GOOD_CRC);
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_GET_SOURCE_CAP) = {
+ { PE_SRC_READY, PE_SRC_GIVE_SOURCE_CAP },
+
+/* Handler Port Partner Request first */
+ { PE_DR_SRC_GET_SOURCE_CAP, PE_SRC_GIVE_SOURCE_CAP},
+ { PE_SRC_GET_SINK_CAP, PE_SRC_GIVE_SOURCE_CAP},
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_GET_SOURCE_CAP);
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_ACCEPT) = {
+ {PE_SRC_SEND_SOFT_RESET, PE_SRC_SEND_CAPABILITIES },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_ACCEPT);
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_REJECT) = {
+ { PE_DR_SRC_GET_SOURCE_CAP, PE_SRC_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_REJECT);
+
+/* PD Data MSG reactions */
+
+DECL_PE_STATE_TRANSITION(PD_DATA_MSG_REQUEST) = {
+ { PE_SRC_SEND_CAPABILITIES, PE_SRC_NEGOTIATE_CAPABILITIES },
+ { PE_SRC_READY, PE_SRC_NEGOTIATE_CAPABILITIES },
+
+/* Handler Port Partner Request first */
+ { PE_DR_SRC_GET_SOURCE_CAP, PE_SRC_GIVE_SOURCE_CAP},
+ { PE_SRC_GET_SINK_CAP, PE_SRC_GIVE_SOURCE_CAP},
+};
+
+DECL_PE_STATE_REACTION(PD_DATA_MSG_REQUEST);
+
+DECL_PE_STATE_TRANSITION(PD_DATA_MSG_SOURCE_CAP) = {
+ { PE_DR_SRC_GET_SOURCE_CAP, PE_SRC_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_DATA_MSG_SOURCE_CAP);
+
+DECL_PE_STATE_TRANSITION(PD_DATA_MSG_SINK_CAP) = {
+ { PE_SRC_GET_SINK_CAP, PE_SRC_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_DATA_MSG_SINK_CAP);
+
+/* DPM Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_ACK) = {
+ { PE_SRC_NEGOTIATE_CAPABILITIES, PE_SRC_TRANSITION_SUPPLY },
+
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+ { PE_SRC_STARTUP, PE_SRC_SEND_CAPABILITIES },
+#endif /* CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID */
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_ACK);
+
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_NAK) = {
+ { PE_SRC_NEGOTIATE_CAPABILITIES, PE_SRC_CAPABILITY_RESPONSE },
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_NAK);
+
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_CAP_CHANGED) = {
+ { PE_SRC_READY, PE_SRC_SEND_CAPABILITIES },
+ { PE_SRC_WAIT_NEW_CAPABILITIES, PE_SRC_SEND_CAPABILITIES },
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_CAP_CHANGED);
+
+/* HW Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_HW_MSG_TX_FAILED) = {
+ { PE_SRC_SOFT_RESET, PE_SRC_HARD_RESET },
+ { PE_SRC_SEND_SOFT_RESET, PE_SRC_HARD_RESET },
+};
+
+DECL_PE_STATE_REACTION(PD_HW_MSG_TX_FAILED);
+
+DECL_PE_STATE_TRANSITION(PD_HW_VBUS_STABLE) = {
+ { PE_SRC_TRANSITION_SUPPLY, PE_SRC_TRANSITION_SUPPLY2 },
+};
+
+DECL_PE_STATE_REACTION(PD_HW_VBUS_STABLE);
+
+/* PE Event reactions */
+
+/* TODO: Remove it later, always trigger by pd_evt_source_start_timeout */
+DECL_PE_STATE_TRANSITION(PD_PE_MSG_RESET_PRL_COMPLETED) = {
+ { PE_SRC_STARTUP, PE_SRC_SEND_CAPABILITIES },
+};
+
+DECL_PE_STATE_REACTION(PD_PE_MSG_RESET_PRL_COMPLETED);
+
+DECL_PE_STATE_TRANSITION(PD_PE_MSG_POWER_ROLE_AT_DEFAULT) = {
+ { PE_SRC_TRANSITION_TO_DEFAULT, PE_SRC_STARTUP },
+};
+
+DECL_PE_STATE_REACTION(PD_PE_MSG_POWER_ROLE_AT_DEFAULT);
+
+DECL_PE_STATE_TRANSITION(PD_PE_MSG_IDLE) = {
+ { PE_IDLE1, PE_IDLE2 },
+};
+
+DECL_PE_STATE_REACTION(PD_PE_MSG_IDLE);
+/* Timer Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_SENDER_RESPONSE) = {
+ { PE_SRC_SEND_CAPABILITIES, PE_SRC_HARD_RESET },
+ { PE_SRC_SEND_SOFT_RESET, PE_SRC_HARD_RESET },
+
+ { PE_SRC_GET_SINK_CAP, PE_SRC_READY },
+ { PE_DR_SRC_GET_SOURCE_CAP, PE_SRC_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_SENDER_RESPONSE);
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_PS_HARD_RESET) = {
+ { PE_SRC_HARD_RESET, PE_SRC_TRANSITION_TO_DEFAULT },
+ { PE_SRC_HARD_RESET_RECEIVED, PE_SRC_TRANSITION_TO_DEFAULT },
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_PS_HARD_RESET);
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_BIST_CONT_MODE) = {
+ { PE_BIST_CARRIER_MODE_2, PE_SRC_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_BIST_CONT_MODE);
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_SOURCE_START) = {
+ { PE_SRC_STARTUP, PE_SRC_SEND_CAPABILITIES },
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_SOURCE_START);
+
+/*
+ * [BLOCK] Porcess Ctrl MSG
+ */
+
+static inline bool pd_process_ctrl_msg_good_crc(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+
+{
+ switch (pd_port->pe_state_curr) {
+ case PE_SRC_SEND_SOFT_RESET:
+ case PE_SRC_GET_SINK_CAP:
+ case PE_DR_SRC_GET_SOURCE_CAP:
+ pd_enable_timer(pd_port, PD_TIMER_SENDER_RESPONSE);
+ return false;
+
+ case PE_SRC_SEND_CAPABILITIES:
+ pd_disable_timer(pd_port, PD_TIMER_NO_RESPONSE);
+ pd_port->cap_counter = 0;
+ pd_port->hard_reset_counter = 0;
+ pd_notify_pe_hard_reset_completed(pd_port);
+ pd_enable_timer(pd_port, PD_TIMER_SENDER_RESPONSE);
+ /* pd_set_cc_res(pd_port, TYPEC_CC_RP_1_5); */
+ return false;
+
+ case PE_SRC_CAPABILITY_RESPONSE:
+ if (!pd_port->explicit_contract)
+ PE_TRANSIT_STATE(pd_port, PE_SRC_WAIT_NEW_CAPABILITIES);
+ else if (pd_port->invalid_contract)
+ PE_TRANSIT_STATE(pd_port, PE_SRC_HARD_RESET);
+ else
+ PE_TRANSIT_STATE(pd_port, PE_SRC_READY);
+ return true;
+ default:
+ return PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_GOOD_CRC);
+ }
+}
+
+static inline bool pd_process_ctrl_msg_get_sink_cap(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_port->pe_state_curr) {
+ case PE_SRC_READY:
+ case PE_DR_SRC_GET_SOURCE_CAP:
+ case PE_SRC_GET_SINK_CAP:
+ break;
+
+ default:
+ return false;
+ }
+
+ if (pd_port->dpm_caps & DPM_CAP_LOCAL_DR_POWER) {
+ PE_TRANSIT_STATE(pd_port, PE_DR_SRC_GIVE_SINK_CAP);
+ return true;
+ }
+ pd_send_ctrl_msg(pd_port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ return false;
+}
+
+static inline bool pd_process_ctrl_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_CTRL_GOOD_CRC:
+ return pd_process_ctrl_msg_good_crc(pd_port, pd_event);
+
+ case PD_CTRL_ACCEPT:
+ ret = PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_ACCEPT);
+ break;
+
+ case PD_CTRL_REJECT:
+ ret = PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_REJECT);
+ break;
+
+ case PD_CTRL_GET_SOURCE_CAP:
+ ret = PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_GET_SOURCE_CAP);
+ break;
+
+ case PD_CTRL_GET_SINK_CAP:
+ ret = pd_process_ctrl_msg_get_sink_cap(pd_port, pd_event);
+ break;
+
+ /* Swap */
+ case PD_CTRL_DR_SWAP:
+ ret = pd_process_ctrl_msg_dr_swap(pd_port, pd_event);
+ break;
+
+ case PD_CTRL_PR_SWAP:
+ ret = pd_process_ctrl_msg_pr_swap(pd_port, pd_event);
+ break;
+
+ case PD_CTRL_VCONN_SWAP:
+ ret = pd_process_ctrl_msg_vconn_swap(pd_port, pd_event);
+ break;
+
+ /* SoftReset */
+ case PD_CTRL_SOFT_RESET:
+ if (!pd_port->during_swap) {
+ PE_TRANSIT_STATE(pd_port, PE_SRC_SOFT_RESET);
+ return true;
+ }
+ break;
+
+ /* Ignore */
+ case PD_CTRL_PING:
+ pd_notify_pe_recv_ping_event(pd_port);
+ case PD_CTRL_PS_RDY:
+ case PD_CTRL_GOTO_MIN:
+ case PD_CTRL_WAIT:
+ break;
+ }
+
+ if (!ret)
+ ret = pd_process_protocol_error(pd_port, pd_event);
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess Data MSG
+ */
+
+static inline bool pd_process_data_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_DATA_SOURCE_CAP:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DATA_MSG_SOURCE_CAP);
+ break;
+
+ case PD_DATA_SINK_CAP:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DATA_MSG_SINK_CAP);
+ break;
+
+ case PD_DATA_BIST:
+ ret = pd_process_data_msg_bist(pd_port, pd_event);
+ break;
+
+ case PD_DATA_REQUEST:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DATA_MSG_REQUEST);
+ break;
+
+ case PD_DATA_VENDOR_DEF:
+ return false;
+ }
+
+ if (!ret)
+ ret = pd_process_protocol_error(pd_port, pd_event);
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess DPM MSG
+ */
+
+static inline bool pd_process_dpm_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_DPM_ACK:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DPM_MSG_ACK);
+ break;
+ case PD_DPM_NAK:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DPM_MSG_NAK);
+ break;
+ case PD_DPM_CAP_CHANGED:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DPM_MSG_CAP_CHANGED);
+ break;
+
+ case PD_DPM_ERROR_RECOVERY:
+ PE_TRANSIT_STATE(pd_port, PE_ERROR_RECOVERY);
+ return true;
+ }
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess HW MSG
+ */
+
+static inline bool pd_process_hw_msg_vbus_present(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_port->pe_state_curr) {
+ case PE_SRC_STARTUP:
+ pd_enable_timer(pd_port, PD_TIMER_SOURCE_START);
+ break;
+
+ case PE_SRC_TRANSITION_TO_DEFAULT:
+ pd_put_pe_event(pd_port, PD_PE_POWER_ROLE_AT_DEFAULT);
+ break;
+ }
+
+ return false;
+}
+
+static inline bool pd_process_hw_msg_tx_failed(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_port->pe_state_curr == PE_SRC_SEND_CAPABILITIES) {
+ if (pd_port->pd_connected) {
+ PE_DBG("PR_SWAP NoResp\r\n");
+ return false;
+ }
+
+ PE_TRANSIT_STATE(pd_port, PE_SRC_DISCOVERY);
+ return true;
+ }
+
+ return PE_MAKE_STATE_TRANSIT_FORCE(
+ PD_HW_MSG_TX_FAILED, PE_SRC_SEND_SOFT_RESET);
+}
+
+static inline bool pd_process_hw_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_HW_CC_DETACHED:
+ PE_TRANSIT_STATE(pd_port, PE_IDLE1);
+ return true;
+
+ case PD_HW_CC_ATTACHED:
+ PE_TRANSIT_STATE(pd_port, PE_SRC_STARTUP);
+ return true;
+
+ case PD_HW_RECV_HARD_RESET:
+ ret = pd_process_recv_hard_reset(
+ pd_port, pd_event, PE_SRC_HARD_RESET_RECEIVED);
+ break;
+
+ case PD_HW_VBUS_PRESENT:
+ ret = pd_process_hw_msg_vbus_present(pd_port, pd_event);
+ break;
+
+ case PD_HW_VBUS_SAFE0V:
+ pd_enable_timer(pd_port, PD_TIMER_SRC_RECOVER);
+ break;
+
+ case PD_HW_VBUS_STABLE:
+ ret = PE_MAKE_STATE_TRANSIT(PD_HW_VBUS_STABLE);
+ break;
+
+ case PD_HW_TX_FAILED:
+ ret = pd_process_hw_msg_tx_failed(pd_port, pd_event);
+ break;
+
+ case PD_HW_VBUS_ABSENT:
+ break;
+ };
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess PE MSG
+ */
+
+static inline bool pd_process_pe_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_PE_RESET_PRL_COMPLETED:
+ ret = PE_MAKE_STATE_TRANSIT(PD_PE_MSG_RESET_PRL_COMPLETED);
+ break;
+
+ case PD_PE_POWER_ROLE_AT_DEFAULT:
+ ret = PE_MAKE_STATE_TRANSIT(PD_PE_MSG_POWER_ROLE_AT_DEFAULT);
+ break;
+
+ case PD_PE_IDLE:
+ ret = PE_MAKE_STATE_TRANSIT(PD_PE_MSG_IDLE);
+ break;
+ }
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess Timer MSG
+ */
+static inline bool pd_process_timer_msg_source_start(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+ if (pd_is_auto_discover_cable_id(pd_port)) {
+ if (vdm_put_dpm_discover_cable_event(pd_port)) {
+ /* waiting for dpm_ack event */
+ return false;
+ }
+ }
+#endif
+
+ return PE_MAKE_STATE_TRANSIT(PD_TIMER_SOURCE_START);
+}
+
+static inline bool pd_process_timer_msg_source_cap(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_port->pe_state_curr != PE_SRC_DISCOVERY)
+ return false;
+
+ if (pd_port->cap_counter <= PD_CAPS_COUNT)
+ PE_TRANSIT_STATE(pd_port, PE_SRC_SEND_CAPABILITIES);
+ else /* in this state, PD always not connected */
+ PE_TRANSIT_STATE(pd_port, PE_SRC_DISABLED);
+
+ return true;
+}
+
+static inline bool pd_process_timer_msg_no_response(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_port->hard_reset_counter <= PD_HARD_RESET_COUNT)
+ PE_TRANSIT_STATE(pd_port, PE_SRC_HARD_RESET);
+ else if (pd_port->pd_prev_connected)
+ PE_TRANSIT_STATE(pd_port, PE_ERROR_RECOVERY);
+ else
+ PE_TRANSIT_STATE(pd_port, PE_SRC_DISABLED);
+
+ return true;
+}
+
+static inline bool pd_process_timer_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_event->msg) {
+ case PD_TIMER_BIST_CONT_MODE:
+ return PE_MAKE_STATE_TRANSIT(PD_TIMER_BIST_CONT_MODE);
+
+ case PD_TIMER_SOURCE_CAPABILITY:
+ return pd_process_timer_msg_source_cap(pd_port, pd_event);
+
+#ifndef CONFIG_USB_PD_DBG_IGRONE_TIMEOUT
+ case PD_TIMER_SENDER_RESPONSE:
+ return PE_MAKE_STATE_TRANSIT(PD_TIMER_SENDER_RESPONSE);
+#endif
+
+ case PD_TIMER_PS_HARD_RESET:
+ return PE_MAKE_STATE_TRANSIT(PD_TIMER_PS_HARD_RESET);
+
+ case PD_TIMER_SOURCE_START:
+ return pd_process_timer_msg_source_start(pd_port, pd_event);
+
+#ifndef CONFIG_USB_PD_DBG_IGRONE_TIMEOUT
+ case PD_TIMER_NO_RESPONSE:
+ return pd_process_timer_msg_no_response(pd_port, pd_event);
+#endif
+
+ case PD_TIMER_SOURCE_TRANSITION:
+ if (pd_port->state_machine != PE_STATE_MACHINE_PR_SWAP)
+ pd_dpm_src_transition_power(pd_port, pd_event);
+ break;
+
+#ifdef CONFIG_PD_DISCOVER_CABLE_ID
+ case PD_TIMER_DISCOVER_ID:
+ vdm_put_dpm_discover_cable_event(pd_port);
+ break;
+#endif
+
+ case PD_TIMER_SRC_RECOVER:
+ pd_dpm_source_vbus(pd_port, true);
+ pd_enable_vbus_valid_detection(pd_port, true);
+ break;
+ }
+
+ return false;
+}
+
+/*
+ * [BLOCK] Process Policy Engine's SRC Message
+ */
+
+bool pd_process_event_src(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_event->event_type) {
+ case PD_EVT_CTRL_MSG:
+ return pd_process_ctrl_msg(pd_port, pd_event);
+
+ case PD_EVT_DATA_MSG:
+ return pd_process_data_msg(pd_port, pd_event);
+
+ case PD_EVT_DPM_MSG:
+ return pd_process_dpm_msg(pd_port, pd_event);
+
+ case PD_EVT_HW_MSG:
+ return pd_process_hw_msg(pd_port, pd_event);
+
+ case PD_EVT_PE_MSG:
+ return pd_process_pe_msg(pd_port, pd_event);
+
+ case PD_EVT_TIMER_MSG:
+ return pd_process_timer_msg(pd_port, pd_event);
+
+ default:
+ return false;
+ }
+}
diff --git a/drivers/usb/pd/richtek/pd_process_evt_vcs.c b/drivers/usb/pd/richtek/pd_process_evt_vcs.c
new file mode 100644
index 000000000000..a9a91ad74e0f
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_process_evt_vcs.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Process Event For VCS
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_event.h>
+#include <linux/hisi/usb/pd/richtek/pd_process_evt.h>
+
+/* PD Control MSG reactions */
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_REJECT_WAIT) = {
+ { PE_VCS_SEND_SWAP, PE_VIRT_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_REJECT_WAIT);
+
+DECL_PE_STATE_TRANSITION(PD_CTRL_MSG_PS_RDY) = {
+ { PE_VCS_WAIT_FOR_VCONN, PE_VCS_TURN_OFF_VCONN },
+};
+
+DECL_PE_STATE_REACTION(PD_CTRL_MSG_PS_RDY);
+
+/* DPM Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_ACK) = {
+ { PE_VCS_EVALUATE_SWAP, PE_VCS_ACCEPT_SWAP },
+ { PE_VCS_TURN_ON_VCONN, PE_VCS_SEND_PS_RDY },
+ { PE_VCS_TURN_OFF_VCONN, PE_VIRT_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_ACK);
+
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_NAK) = {
+ { PE_VCS_EVALUATE_SWAP, PE_VCS_REJECT_VCONN_SWAP },
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_NAK);
+
+/* Timer Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_SENDER_RESPONSE) = {
+ { PE_VCS_SEND_SWAP, PE_VIRT_READY },
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_SENDER_RESPONSE);
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_VCONN_ON) = {
+ { PE_VCS_WAIT_FOR_VCONN, PE_VIRT_HARD_RESET},
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_VCONN_ON);
+
+/*
+ * [BLOCK] Porcess PD Ctrl MSG
+ */
+
+static inline bool pd_process_ctrl_msg_good_crc(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_port->pe_state_curr) {
+ case PE_VCS_REJECT_VCONN_SWAP:
+ case PE_VCS_SEND_PS_RDY:
+ PE_TRANSIT_READY_STATE(pd_port);
+ return true;
+
+ case PE_VCS_ACCEPT_SWAP:
+ PE_TRANSIT_VCS_SWAP_STATE(pd_port);
+ return true;
+
+ case PE_VCS_SEND_SWAP:
+ pd_enable_timer(pd_port, PD_TIMER_SENDER_RESPONSE);
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+static inline bool pd_process_ctrl_msg_accept(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_port->pe_state_curr == PE_VCS_SEND_SWAP) {
+ PE_TRANSIT_VCS_SWAP_STATE(pd_port);
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool pd_process_ctrl_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_CTRL_GOOD_CRC:
+ return pd_process_ctrl_msg_good_crc(pd_port, pd_event);
+
+ case PD_CTRL_ACCEPT:
+ return pd_process_ctrl_msg_accept(pd_port, pd_event);
+
+ case PD_CTRL_WAIT:
+ case PD_CTRL_REJECT:
+ ret = PE_MAKE_STATE_TRANSIT_VIRT(PD_CTRL_MSG_REJECT_WAIT);
+ break;
+
+ case PD_CTRL_PS_RDY:
+ ret = PE_MAKE_STATE_TRANSIT(PD_CTRL_MSG_PS_RDY);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess DPM MSG
+ */
+
+static inline bool pd_process_dpm_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_DPM_ACK:
+ ret = PE_MAKE_STATE_TRANSIT_VIRT(PD_DPM_MSG_ACK);
+ break;
+ case PD_DPM_NAK:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DPM_MSG_NAK);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess Timer MSG
+ */
+
+static inline bool pd_process_timer_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_TIMER_SENDER_RESPONSE:
+ ret = PE_MAKE_STATE_TRANSIT_VIRT(PD_TIMER_SENDER_RESPONSE);
+ break;
+
+ case PD_TIMER_VCONN_ON:
+ ret = PE_MAKE_STATE_TRANSIT_VIRT(PD_TIMER_VCONN_ON);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Process Policy Engine's VCS Message
+ */
+
+bool pd_process_event_vcs(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_event->event_type) {
+ case PD_EVT_CTRL_MSG:
+ return pd_process_ctrl_msg(pd_port, pd_event);
+
+ case PD_EVT_DPM_MSG:
+ return pd_process_dpm_msg(pd_port, pd_event);
+
+ case PD_EVT_TIMER_MSG:
+ return pd_process_timer_msg(pd_port, pd_event);
+
+ default:
+ return false;
+ }
+}
diff --git a/drivers/usb/pd/richtek/pd_process_evt_vdm.c b/drivers/usb/pd/richtek/pd_process_evt_vdm.c
new file mode 100644
index 000000000000..e3216fcbe231
--- /dev/null
+++ b/drivers/usb/pd/richtek/pd_process_evt_vdm.c
@@ -0,0 +1,587 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Process Event For VDM
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_event.h>
+#include <linux/hisi/usb/pd/richtek/pd_process_evt.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+
+#define VDM_CMD_STATE(cmd, cmd_type) \
+ (((cmd) & 0x1f) | (((cmd_type) & 0x03) << 6))
+
+#define VDM_CMD_INIT_STATE(cmd, next_state) \
+ { VDM_CMD_STATE(cmd, CMDT_INIT), next_state }
+
+#define VDM_CMD_ACK_STATE(cmd, next_state) \
+ { VDM_CMD_STATE(cmd, CMDT_RSP_ACK), next_state }
+
+#define VDM_CMD_NACK_STATE(cmd, next_state) \
+ { VDM_CMD_STATE(cmd, CMDT_RSP_NAK), next_state }
+
+#define VDM_CMD_BUSY_STATE(cmd, next_state) \
+ { VDM_CMD_STATE(cmd, CMDT_RSP_BUSY), next_state }
+
+/* UFP PD VDM Command's reactions */
+
+DECL_PE_STATE_TRANSITION(PD_UFP_VDM_CMD) = {
+ VDM_CMD_INIT_STATE(CMD_DISCOVER_IDENT, PE_UFP_VDM_GET_IDENTITY),
+ VDM_CMD_INIT_STATE(CMD_DISCOVER_SVID, PE_UFP_VDM_GET_SVIDS),
+ VDM_CMD_INIT_STATE(CMD_DISCOVER_MODES, PE_UFP_VDM_GET_MODES),
+ VDM_CMD_INIT_STATE(CMD_ENTER_MODE, PE_UFP_VDM_EVALUATE_MODE_ENTRY),
+ VDM_CMD_INIT_STATE(CMD_EXIT_MODE, PE_UFP_VDM_MODE_EXIT),
+ /* CHECK IT LATER */
+ VDM_CMD_INIT_STATE(CMD_ATTENTION, PE_UFP_VDM_ATTENTION_REQUEST),
+};
+
+DECL_PE_STATE_REACTION(PD_UFP_VDM_CMD);
+
+/* DFP PD VDM Command's reactions */
+
+DECL_PE_STATE_TRANSITION(PD_DFP_VDM_DISCOVER_ID) = {
+ VDM_CMD_ACK_STATE(CMD_DISCOVER_IDENT,
+ PE_DFP_UFP_VDM_IDENTITY_ACKED),
+ VDM_CMD_NACK_STATE(CMD_DISCOVER_IDENT, PE_DFP_UFP_VDM_IDENTITY_NAKED),
+ VDM_CMD_BUSY_STATE(CMD_DISCOVER_IDENT, PE_DFP_UFP_VDM_IDENTITY_NAKED),
+};
+
+DECL_PE_STATE_REACTION(PD_DFP_VDM_DISCOVER_ID);
+
+DECL_PE_STATE_TRANSITION(PD_DFP_VDM_DISCOVER_SVID) = {
+ VDM_CMD_ACK_STATE(CMD_DISCOVER_SVID,
+ PE_DFP_VDM_SVIDS_ACKED),
+ VDM_CMD_NACK_STATE(CMD_DISCOVER_SVID, PE_DFP_VDM_SVIDS_NAKED),
+ VDM_CMD_BUSY_STATE(CMD_DISCOVER_SVID, PE_DFP_VDM_SVIDS_NAKED),
+};
+
+DECL_PE_STATE_REACTION(PD_DFP_VDM_DISCOVER_SVID);
+
+DECL_PE_STATE_TRANSITION(PD_DFP_VDM_DISCOVER_MODES) = {
+ VDM_CMD_ACK_STATE(CMD_DISCOVER_MODES,
+ PE_DFP_VDM_MODES_ACKED),
+ VDM_CMD_NACK_STATE(CMD_DISCOVER_MODES, PE_DFP_VDM_MODES_NAKED),
+ VDM_CMD_BUSY_STATE(CMD_DISCOVER_MODES, PE_DFP_VDM_MODES_NAKED),
+};
+
+DECL_PE_STATE_REACTION(PD_DFP_VDM_DISCOVER_MODES);
+
+DECL_PE_STATE_TRANSITION(PD_DFP_VDM_ENTER_MODE) = {
+ VDM_CMD_ACK_STATE(CMD_ENTER_MODE,
+ PE_DFP_VDM_MODE_ENTRY_ACKED),
+ VDM_CMD_NACK_STATE(CMD_ENTER_MODE, PE_DFP_VDM_MODE_ENTRY_NAKED),
+ VDM_CMD_BUSY_STATE(CMD_ENTER_MODE, PE_DFP_VDM_MODE_ENTRY_NAKED),
+};
+
+DECL_PE_STATE_REACTION(PD_DFP_VDM_ENTER_MODE);
+
+DECL_PE_STATE_TRANSITION(PD_DFP_VDM_EXIT_MODE) = {
+ VDM_CMD_ACK_STATE(CMD_EXIT_MODE,
+ PE_DFP_VDM_MODE_ENTRY_ACKED),
+ VDM_CMD_NACK_STATE(CMD_EXIT_MODE,
+ PE_DFP_VDM_MODE_ENTRY_NAKED),
+ VDM_CMD_BUSY_STATE(CMD_EXIT_MODE,
+ PE_VIRT_HARD_RESET),
+};
+
+DECL_PE_STATE_REACTION(PD_DFP_VDM_EXIT_MODE);
+
+DECL_PE_STATE_TRANSITION(PD_DFP_VDM_ATTENTION) = {
+ VDM_CMD_INIT_STATE(CMD_ATTENTION,
+ PE_DFP_VDM_ATTENTION_REQUEST),
+};
+
+DECL_PE_STATE_REACTION(PD_DFP_VDM_ATTENTION);
+/* HW Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_HW_MSG_TX_FAILED) = {
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+ {PE_SRC_VDM_IDENTITY_REQUEST, PE_SRC_VDM_IDENTITY_NAKED},
+#endif /* PD_CAP_SRC_STARTUP_DISCOVERY_ID */
+
+#ifdef CONFIG_USB_PD_DFP_READY_DISCOVER_ID
+ {PE_DFP_CBL_VDM_IDENTITY_REQUEST, PE_DFP_CBL_VDM_IDENTITY_NAKED},
+#endif
+};
+
+DECL_PE_STATE_REACTION(PD_HW_MSG_TX_FAILED);
+
+/* DPM Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_ACK) = {
+ { PE_UFP_VDM_GET_IDENTITY, PE_UFP_VDM_SEND_IDENTITY },
+ { PE_UFP_VDM_GET_SVIDS, PE_UFP_VDM_SEND_SVIDS },
+ { PE_UFP_VDM_GET_MODES, PE_UFP_VDM_SEND_MODES },
+ { PE_UFP_VDM_MODE_EXIT, PE_UFP_VDM_MODE_EXIT_ACK},
+ { PE_UFP_VDM_EVALUATE_MODE_ENTRY, PE_UFP_VDM_MODE_ENTRY_ACK },
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_ACK);
+
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_NAK) = {
+ {PE_UFP_VDM_GET_IDENTITY, PE_UFP_VDM_GET_IDENTITY_NAK},
+ {PE_UFP_VDM_GET_SVIDS, PE_UFP_VDM_GET_SVIDS_NAK},
+ {PE_UFP_VDM_GET_MODES, PE_UFP_VDM_GET_MODES_NAK},
+ {PE_UFP_VDM_MODE_EXIT, PE_UFP_VDM_MODE_EXIT_NAK},
+ {PE_UFP_VDM_EVALUATE_MODE_ENTRY, PE_UFP_VDM_MODE_ENTRY_NAK},
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_NAK);
+
+/* Discover Cable ID */
+
+#ifdef CONFIG_PD_DISCOVER_CABLE_ID
+DECL_PE_STATE_TRANSITION(PD_DPM_MSG_DISCOVER_CABLE) = {
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+ { PE_SRC_STARTUP, PE_SRC_VDM_IDENTITY_REQUEST},
+ { PE_SRC_DISCOVERY, PE_SRC_VDM_IDENTITY_REQUEST},
+#endif
+
+#ifdef CONFIG_USB_PD_DFP_READY_DISCOVER_ID
+ { PE_SRC_READY, PE_DFP_CBL_VDM_IDENTITY_REQUEST},
+ { PE_SNK_READY, PE_DFP_CBL_VDM_IDENTITY_REQUEST},
+#endif
+};
+
+DECL_PE_STATE_REACTION(PD_DPM_MSG_DISCOVER_CABLE);
+#endif
+
+/* Source Startup Discover Cable ID */
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+DECL_PE_STATE_TRANSITION(PD_SRC_VDM_DISCOVER_CABLE) = {
+ VDM_CMD_ACK_STATE(CMD_DISCOVER_IDENT, PE_SRC_VDM_IDENTITY_ACKED),
+ VDM_CMD_NACK_STATE(CMD_DISCOVER_IDENT, PE_SRC_VDM_IDENTITY_NAKED),
+ VDM_CMD_BUSY_STATE(CMD_DISCOVER_IDENT, PE_SRC_VDM_IDENTITY_NAKED),
+};
+
+DECL_PE_STATE_REACTION(PD_SRC_VDM_DISCOVER_CABLE);
+#endif /* CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID */
+
+#ifdef CONFIG_USB_PD_DFP_READY_DISCOVER_ID
+DECL_PE_STATE_TRANSITION(PD_DFP_VDM_DISCOVER_CABLE) = {
+ VDM_CMD_ACK_STATE(CMD_DISCOVER_IDENT, PE_DFP_CBL_VDM_IDENTITY_ACKED),
+ VDM_CMD_NACK_STATE(CMD_DISCOVER_IDENT, PE_DFP_CBL_VDM_IDENTITY_NAKED),
+ VDM_CMD_BUSY_STATE(CMD_DISCOVER_IDENT, PE_DFP_CBL_VDM_IDENTITY_NAKED),
+};
+
+DECL_PE_STATE_REACTION(PD_DFP_VDM_DISCOVER_CABLE);
+
+#endif /* CONFIG_USB_PD_DFP_READY_DISCOVER_ID */
+
+/* Timer Event reactions */
+
+DECL_PE_STATE_TRANSITION(PD_TIMER_VDM_RESPONSE) = {
+ { PE_DFP_UFP_VDM_IDENTITY_REQUEST, PE_DFP_UFP_VDM_IDENTITY_NAKED },
+ { PE_DFP_VDM_SVIDS_REQUEST, PE_DFP_VDM_SVIDS_NAKED },
+ { PE_DFP_VDM_MODES_REQUEST, PE_DFP_VDM_MODES_NAKED },
+ { PE_DFP_VDM_MODE_EXIT_REQUEST, PE_VIRT_HARD_RESET },
+ { PE_DFP_VDM_MODE_ENTRY_REQUEST, PE_DFP_VDM_MODE_ENTRY_NAKED },
+
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+ { PE_SRC_VDM_IDENTITY_REQUEST, PE_SRC_VDM_IDENTITY_NAKED },
+#endif
+
+#ifdef CONFIG_USB_PD_DFP_READY_DISCOVER_ID
+ { PE_DFP_CBL_VDM_IDENTITY_REQUEST, PE_DFP_CBL_VDM_IDENTITY_NAKED },
+#endif
+};
+
+DECL_PE_STATE_REACTION(PD_TIMER_VDM_RESPONSE);
+
+/*
+ * [BLOCK] Porcess Ctrl MSG
+ */
+
+static inline bool pd_process_ctrl_msg_good_crc(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_port->pe_state_curr) {
+ case PE_UFP_VDM_SEND_IDENTITY:
+ case PE_UFP_VDM_GET_IDENTITY_NAK:
+ case PE_UFP_VDM_SEND_SVIDS:
+ case PE_UFP_VDM_GET_SVIDS_NAK:
+
+ case PE_UFP_VDM_SEND_MODES:
+ case PE_UFP_VDM_GET_MODES_NAK:
+ case PE_UFP_VDM_MODE_ENTRY_ACK:
+ case PE_UFP_VDM_MODE_ENTRY_NAK:
+ case PE_UFP_VDM_MODE_EXIT_ACK:
+ case PE_UFP_VDM_MODE_EXIT_NAK:
+
+ PE_TRANSIT_READY_STATE(pd_port);
+ return true;
+
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+ case PE_SRC_VDM_IDENTITY_REQUEST:
+ pd_port->power_cable_present = true;
+ return false;
+#endif
+
+#ifdef CONFIG_USB_PD_DFP_READY_DISCOVER_ID
+ case PE_DFP_CBL_VDM_IDENTITY_REQUEST:
+ pd_port->power_cable_present = true;
+ return false;
+#endif
+ }
+
+ return false;
+}
+
+static inline bool pd_process_ctrl_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_CTRL_GOOD_CRC:
+ return pd_process_ctrl_msg_good_crc(pd_port, pd_event);
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static inline bool pd_process_uvdm(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ return false;
+}
+
+/*
+ * [BLOCK] Porcess Data MSG (VDM)
+ */
+
+#if (PE_EVT_INFO_VDM_DIS == 0)
+static const char * const pe_vdm_cmd_name[] = {
+ "DiscoverID",
+ "DiscoverSVID",
+ "DiscoverMode",
+ "EnterMode",
+ "ExitMode",
+ "Attention",
+};
+
+static const char *const pe_vdm_dp_cmd_name[] = {
+ "DPStatus",
+ "DPConfig",
+};
+
+static const char * const pe_vdm_cmd_type_name[] = {
+ "INIT",
+ "ACK",
+ "NACK",
+ "BUSY",
+};
+#endif /* if (PE_EVT_INFO_VDM_DIS == 0) */
+
+static inline void print_vdm_msg(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+#if (PE_EVT_INFO_VDM_DIS == 0)
+ u8 cmd;
+ u8 cmd_type;
+ const char *name = NULL;
+ u32 vdm_hdr = pd_event->pd_msg->payload[0];
+
+ cmd = PD_VDO_CMD(vdm_hdr);
+ cmd_type = PD_VDO_CMDT(vdm_hdr);
+
+ if (cmd <= ARRAY_SIZE(pe_vdm_cmd_name))
+ name = pe_vdm_cmd_name[cmd - 1];
+ if (!name)
+ return;
+
+ if (cmd_type >= ARRAY_SIZE(pe_vdm_cmd_type_name))
+ return;
+
+ PE_DBG("%s:%s\r\n", name, pe_vdm_cmd_type_name[cmd_type]);
+
+#endif /* PE_EVT_INFO_VDM_DIS */
+}
+
+static inline bool pd_process_ufp_vdm(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (!pd_check_pe_state_ready(pd_port)) {
+ PE_DBG("659 : invalid, current status\r\n");
+ return false;
+ }
+
+ if (PE_MAKE_VDM_CMD_STATE_TRANSIT(PD_UFP_VDM_CMD))
+ return true;
+
+ return false;
+}
+
+static inline bool pd_process_dfp_vdm(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ u32 vdm_hdr = pd_event->pd_msg->payload[0];
+
+ if ((PD_VDO_CMDT(vdm_hdr) == CMDT_INIT) &&
+ PD_VDO_CMD(vdm_hdr) == CMD_ATTENTION) {
+ if (!pd_check_pe_state_ready(pd_port)) {
+ PE_DBG("670 : invalid, current status\r\n");
+ return false;
+ }
+
+ if (PE_MAKE_VDM_CMD_STATE_TRANSIT(PD_DFP_VDM_ATTENTION))
+ return true;
+ }
+
+ switch (pd_port->pe_state_curr) {
+ case PE_DFP_UFP_VDM_IDENTITY_REQUEST:
+ if (PE_MAKE_VDM_CMD_STATE_TRANSIT(PD_DFP_VDM_DISCOVER_ID))
+ return true;
+
+ case PE_DFP_VDM_SVIDS_REQUEST:
+ if (PE_MAKE_VDM_CMD_STATE_TRANSIT(PD_DFP_VDM_DISCOVER_SVID))
+ return true;
+
+ case PE_DFP_VDM_MODES_REQUEST:
+ if (PE_MAKE_VDM_CMD_STATE_TRANSIT(PD_DFP_VDM_DISCOVER_MODES))
+ return true;
+
+ case PE_DFP_VDM_MODE_ENTRY_REQUEST:
+ if (PE_MAKE_VDM_CMD_STATE_TRANSIT(PD_DFP_VDM_ENTER_MODE))
+ return true;
+
+ case PE_DFP_VDM_MODE_EXIT_REQUEST:
+ if (PE_MAKE_VDM_CMD_STATE_TRANSIT_VIRT(PD_DFP_VDM_EXIT_MODE))
+ return true;
+ }
+ return false;
+}
+
+static inline bool pd_process_sop_vdm(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ if (pd_port->data_role == PD_ROLE_UFP)
+ ret = pd_process_ufp_vdm(pd_port, pd_event);
+ else
+ ret = pd_process_dfp_vdm(pd_port, pd_event);
+
+ if (!ret)
+ PE_DBG("Unknown VDM\r\n");
+ return ret;
+}
+
+static inline bool pd_process_sop_prime_vdm(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_port->pe_state_curr) {
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+ case PE_SRC_VDM_IDENTITY_REQUEST:
+ if (PE_MAKE_VDM_CMD_STATE_TRANSIT(PD_SRC_VDM_DISCOVER_CABLE))
+ return true;
+#endif /* CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID */
+
+#ifdef CONFIG_USB_PD_DFP_READY_DISCOVER_ID
+ case PE_DFP_CBL_VDM_IDENTITY_REQUEST:
+ if (PE_MAKE_VDM_CMD_STATE_TRANSIT(PD_DFP_VDM_DISCOVER_CABLE))
+ return true;
+#endif /* CONFIG_USB_PD_DFP_READY_DISCOVER_ID */
+ }
+ return false;
+}
+
+static inline bool pd_process_data_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+ u32 vdm_hdr;
+ pd_msg_t *pd_msg = pd_event->pd_msg;
+
+ if (pd_event->msg != PD_DATA_VENDOR_DEF)
+ return ret;
+
+ vdm_hdr = pd_msg->payload[0];
+ if (!PD_VDO_SVDM(vdm_hdr))
+ return pd_process_uvdm(pd_port, pd_event);
+
+ /* From Port Partner, copy curr_state from pd_state */
+ if (PD_VDO_CMDT(vdm_hdr) == CMDT_INIT) {
+ pd_port->pe_vdm_state = pd_port->pe_pd_state;
+ pd_port->pe_state_curr = pd_port->pe_pd_state;
+#if PE_DBG_RESET_VDM_DIS == 0
+ PE_DBG("reset vdm_state\r\n");
+#endif /* if PE_DBG_RESET_VDM_DIS == 0 */
+ }
+
+ print_vdm_msg(pd_port, pd_event);
+
+ if (pd_msg->frame_type == TCPC_TX_SOP_PRIME)
+ ret = pd_process_sop_prime_vdm(pd_port, pd_event);
+ else
+ ret = pd_process_sop_vdm(pd_port, pd_event);
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess PDM MSG
+ */
+
+static inline bool pd_process_dpm_msg_ack(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ if (pd_port->data_role == PD_ROLE_DFP) {
+ switch (pd_port->pe_state_curr) {
+ case PE_DFP_UFP_VDM_IDENTITY_ACKED:
+ case PE_DFP_UFP_VDM_IDENTITY_NAKED:
+ case PE_DFP_CBL_VDM_IDENTITY_ACKED:
+ case PE_DFP_CBL_VDM_IDENTITY_NAKED:
+ case PE_DFP_VDM_SVIDS_ACKED:
+ case PE_DFP_VDM_SVIDS_NAKED:
+ case PE_DFP_VDM_MODES_ACKED:
+ case PE_DFP_VDM_MODES_NAKED:
+ case PE_DFP_VDM_MODE_ENTRY_ACKED:
+ case PE_DFP_VDM_MODE_EXIT_REQUEST:
+ case PE_DFP_VDM_MODE_EXIT_ACKED:
+ case PE_DFP_VDM_ATTENTION_REQUEST:
+ PE_TRANSIT_READY_STATE(pd_port);
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ return PE_MAKE_STATE_TRANSIT(PD_DPM_MSG_ACK);
+ }
+}
+
+static inline bool pd_process_dpm_msg_vdm_request(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool is_dfp;
+ bool is_attention;
+
+ if (!pd_check_pe_state_ready(pd_port)) {
+ pd_update_dpm_request_state(pd_port, DPM_REQ_ERR_NOT_READY);
+ PE_DBG("skip vdm_request, not ready_state (%d)\r\n",
+ pd_port->pe_state_curr);
+ return false;
+ }
+
+ is_dfp = pd_port->data_role == PD_ROLE_DFP;
+ is_attention = pd_event->msg_sec == PD_DPM_VDM_REQUEST_ATTENTION;
+
+ if ((is_dfp && is_attention) || (!is_dfp && !is_attention)) {
+ pd_update_dpm_request_state(pd_port, DPM_REQ_ERR_WRONG_ROLE);
+ PE_DBG("skip vdm_request, not dfp\r\n");
+ return false;
+ }
+
+ PE_TRANSIT_STATE(pd_port, pd_event->msg_sec);
+ return true;
+}
+
+static inline bool pd_process_dpm_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_DPM_ACK:
+ ret = pd_process_dpm_msg_ack(pd_port, pd_event);
+ break;
+
+ case PD_DPM_NAK:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DPM_MSG_NAK);
+ break;
+
+ case PD_DPM_VDM_REQUEST:
+ ret = pd_process_dpm_msg_vdm_request(pd_port, pd_event);
+ break;
+
+#ifdef CONFIG_PD_DISCOVER_CABLE_ID
+ case PD_DPM_DISCOVER_CABLE_ID:
+ ret = PE_MAKE_STATE_TRANSIT(PD_DPM_MSG_DISCOVER_CABLE);
+ break;
+#endif
+ }
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess HW MSG
+ */
+
+static inline bool pd_process_hw_msg_retry_vdm(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ PE_DBG("RetryVDM\r\n");
+ return pd_process_sop_vdm(pd_port, pd_event);
+}
+
+static inline bool pd_process_hw_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ bool ret = false;
+
+ switch (pd_event->msg) {
+ case PD_HW_TX_FAILED:
+ ret = PE_MAKE_STATE_TRANSIT(PD_HW_MSG_TX_FAILED);
+ break;
+
+ case PD_HW_RETRY_VDM:
+ ret = pd_process_hw_msg_retry_vdm(pd_port, pd_event);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Porcess Timer MSG
+ */
+
+static inline bool pd_process_timer_msg(
+ pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_event->msg) {
+ case PD_TIMER_VDM_RESPONSE:
+ return PE_MAKE_STATE_TRANSIT_VIRT(PD_TIMER_VDM_RESPONSE);
+
+ default:
+ return false;
+ }
+}
+
+/*
+ * [BLOCK] Process Policy Engine's VDM Message
+ */
+
+bool pd_process_event_vdm(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ switch (pd_event->event_type) {
+ case PD_EVT_CTRL_MSG:
+ return pd_process_ctrl_msg(pd_port, pd_event);
+
+ case PD_EVT_DATA_MSG:
+ return pd_process_data_msg(pd_port, pd_event);
+
+ case PD_EVT_DPM_MSG:
+ return pd_process_dpm_msg(pd_port, pd_event);
+
+ case PD_EVT_HW_MSG:
+ return pd_process_hw_msg(pd_port, pd_event);
+
+ case PD_EVT_TIMER_MSG:
+ return pd_process_timer_msg(pd_port, pd_event);
+ }
+
+ return false;
+}
diff --git a/drivers/usb/pd/richtek/rt-regmap.c b/drivers/usb/pd/richtek/rt-regmap.c
new file mode 100644
index 000000000000..db396dce5889
--- /dev/null
+++ b/drivers/usb/pd/richtek/rt-regmap.c
@@ -0,0 +1,2129 @@
+/* drivers/misc/rt-regmap.c
+ * Richtek regmap with debugfs Driver
+ *
+ * Copyright (C) 2014 Richtek Technology Corp.
+ * Author: Jeff Chang <jeff_chang@richtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/semaphore.h>
+
+#include <linux/hisi/usb/pd/richtek/rt-regmap.h>
+
+struct rt_regmap_ops {
+ int (*regmap_block_write)(struct rt_regmap_device *rd, u32 reg,
+ int bytes, const void *data);
+ int (*regmap_block_read)(struct rt_regmap_device *rd, u32 reg,
+ int bytes, void *dest);
+};
+
+enum {
+ RT_DBG_REG,
+ RT_DBG_DATA,
+ RT_DBG_REGS,
+ RT_DBG_SYNC,
+ RT_DBG_ERROR,
+ RT_DBG_NAME,
+ RT_DBG_BLOCK,
+ RT_DBG_SIZE,
+ RT_DBG_SLAVE_ADDR,
+ RT_SUPPORT_MODE,
+ RT_DBG_IO_LOG,
+ RT_DBG_CACHE_MODE,
+ RT_DBG_REG_SIZE,
+};
+
+struct reg_index_offset {
+ int index;
+ int offset;
+};
+
+struct rt_debug_data {
+ struct reg_index_offset rio;
+ unsigned int reg_addr;
+ unsigned int reg_size;
+ unsigned char part_id;
+};
+
+/* rt_regmap_device
+ *
+ * Richtek regmap device. One for each rt_regmap.
+ *
+ */
+struct rt_regmap_device {
+ struct rt_regmap_properties props;
+ struct rt_regmap_fops *rops;
+ struct rt_regmap_ops regmap_ops;
+ struct device dev;
+ void *client;
+ struct semaphore semaphore;
+ struct dentry *rt_den;
+ struct dentry *rt_debug_file[13];
+ struct rt_debug_st rtdbg_st[13];
+ struct dentry **rt_reg_file;
+ struct rt_debug_st **reg_st;
+ struct rt_debug_data dbg_data;
+ struct delayed_work rt_work;
+ unsigned char *cache_flag;
+ unsigned char part_size_limit;
+ unsigned char *alloc_data;
+ char *err_msg;
+
+ int (*rt_block_write[4])(struct rt_regmap_device *rd,
+ struct rt_register *rm, int size,
+ const struct reg_index_offset *rio,
+ unsigned char *wdata, int *count);
+ unsigned char cache_inited:1;
+ unsigned char error_occurred:1;
+ unsigned char pending_event:1;
+};
+
+struct dentry *rt_regmap_dir;
+
+static int get_parameters(char *buf, long int *param1, int num_of_par)
+{
+ char *token;
+ int base, cnt;
+
+ token = strsep(&buf, " ");
+
+ for (cnt = 0; cnt < num_of_par; cnt++) {
+ if (token) {
+ if ((token[1] == 'x') || (token[1] == 'X'))
+ base = 16;
+ else
+ base = 10;
+
+ if (kstrtoul(token, base, &param1[cnt]) != 0)
+ return -EINVAL;
+
+ token = strsep(&buf, " ");
+ } else {
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int get_datas(const char *buf, const int length,
+ unsigned char *data_buffer, unsigned char data_length)
+{
+ int i, ptr;
+ long int value;
+ char token[5];
+
+ token[0] = '0';
+ token[1] = 'x';
+ token[4] = 0;
+ if (buf[0] != '0' || buf[1] != 'x')
+ return -EINVAL;
+
+ ptr = 2;
+ for (i = 0; (i < data_length) && (ptr + 2 <= length); i++) {
+ token[2] = buf[ptr++];
+ token[3] = buf[ptr++];
+ ptr++;
+ if (kstrtoul(token, 16, &value) != 0)
+ return -EINVAL;
+ data_buffer[i] = value;
+ }
+ return 0;
+}
+
+static struct reg_index_offset find_register_index(
+ const struct rt_regmap_device *rd, u32 reg)
+{
+ const rt_register_map_t *rm = rd->props.rm;
+ int register_num = rd->props.register_num;
+ struct reg_index_offset rio = {0, 0};
+ int index = 0, i = 0, unit = RT_1BYTE_MODE;
+
+ for (index = 0; index < register_num; index++) {
+ if (reg == rm[index]->addr) {
+ rio.index = index;
+ rio.offset = 0;
+ break;
+ } else if (reg > rm[index]->addr) {
+ if ((reg - rm[index]->addr) < rm[index]->size) {
+ rio.index = index;
+ while (&rd->props.group[i]) {
+ if (reg >= rd->props.group[i].start &&
+ reg <= rd->props.group[i].end) {
+ unit =
+ rd->props.group[i].mode;
+ break;
+ }
+ i++;
+ unit = RT_1BYTE_MODE;
+ }
+ rio.offset =
+ (reg - rm[index]->addr) * unit;
+ } else {
+ rio.index = -1;
+ rio.offset = rio.index;
+ }
+ }
+ }
+ return rio;
+}
+
+static int rt_chip_block_write(struct rt_regmap_device *rd, u32 reg,
+ int bytes, const void *src);
+
+/* rt_regmap_cache_sync - sync all cache data to real chip*/
+void rt_regmap_cache_sync(struct rt_regmap_device *rd)
+{
+ int i, rc, num;
+ const rt_register_map_t *rm = rd->props.rm;
+
+ down(&rd->semaphore);
+ if (!rd->pending_event)
+ goto err_cache_sync;
+
+ num = rd->props.register_num;
+ for (i = 0; i < num; i++) {
+ if (*(rd->cache_flag + i) == 1) {
+ rc = rt_chip_block_write(rd, rm[i]->addr,
+ rm[i]->size,
+ rm[i]->cache_data);
+ if (rc < 0) {
+ dev_err(&rd->dev, "rt-regmap sync error\n");
+ goto err_cache_sync;
+ }
+ *(rd->cache_flag + i) = 0;
+ }
+ }
+ rd->pending_event = 0;
+ dev_info(&rd->dev, "regmap sync successfully\n");
+err_cache_sync:
+ up(&rd->semaphore);
+}
+EXPORT_SYMBOL(rt_regmap_cache_sync);
+
+/* rt_regmap_cache_write_back - write current cache data to chip
+ * @rd: rt_regmap_device pointer.
+ * @reg: register map address
+ */
+void rt_regmap_cache_write_back(struct rt_regmap_device *rd, u32 reg)
+{
+ struct reg_index_offset rio;
+ const rt_register_map_t *rm = rd->props.rm;
+ int rc;
+
+ rio = find_register_index(rd, reg);
+ if (rio.index < 0) {
+ dev_err(&rd->dev, "reg 0x%02x is out of range\n", reg);
+ return;
+ }
+
+ down(&rd->semaphore);
+ if ((rm[rio.index]->reg_type & RT_REG_TYPE_MASK) != RT_VOLATILE) {
+ rc = rt_chip_block_write(rd, rm[rio.index]->addr,
+ rm[rio.index]->size,
+ rm[rio.index]->cache_data);
+ if (rc < 0) {
+ dev_err(&rd->dev, "rt-regmap sync error\n");
+ goto err_cache_chip_write;
+ }
+ *(rd->cache_flag + rio.index) = 0;
+ }
+ dev_info(&rd->dev, "regmap sync successfully\n");
+err_cache_chip_write:
+ up(&rd->semaphore);
+}
+EXPORT_SYMBOL(rt_regmap_cache_write_back);
+
+/* rt_is_reg_volatile - check register map is volatile or not
+ * @rd: rt_regmap_device pointer.
+ * reg: register map address.
+ */
+int rt_is_reg_volatile(struct rt_regmap_device *rd, u32 reg)
+{
+ struct reg_index_offset rio;
+ rt_register_map_t rm;
+
+ rio = find_register_index(rd, reg);
+ if (rio.index < 0) {
+ dev_err(&rd->dev, "reg 0x%02x is out of range\n", reg);
+ return -EINVAL;
+ }
+ rm = rd->props.rm[rio.index];
+
+ return (rm->reg_type & RT_REG_TYPE_MASK) == RT_VOLATILE ? 1 : 0;
+}
+EXPORT_SYMBOL(rt_is_reg_volatile);
+
+/* rt_reg_regsize - get register map size for specific register
+ * @rd: rt_regmap_device pointer.
+ * reg: register map address
+ */
+int rt_get_regsize(struct rt_regmap_device *rd, u32 reg)
+{
+ struct reg_index_offset rio;
+
+ rio = find_register_index(rd, reg);
+ if (rio.index < 0 || rio.offset != 0) {
+ dev_err(&rd->dev, "reg 0x%02x is out of map\n", reg);
+ return -EINVAL;
+ }
+ return rd->props.rm[rio.index]->size;
+}
+EXPORT_SYMBOL(rt_get_regsize);
+
+static void rt_work_func(struct work_struct *work)
+{
+ struct rt_regmap_device *rd;
+
+ pr_info(" %s\n", __func__);
+ rd = container_of(work, struct rt_regmap_device, rt_work.work);
+ rt_regmap_cache_sync(rd);
+}
+
+static int rt_chip_block_write(struct rt_regmap_device *rd, u32 reg,
+ int bytes, const void *src)
+{
+ int ret;
+
+ if ((rd->props.rt_regmap_mode & RT_IO_BLK_MODE_MASK) == RT_IO_BLK_ALL ||
+ (rd->props.rt_regmap_mode & RT_IO_BLK_MODE_MASK) == RT_IO_BLK_CHIP)
+ return 0;
+
+ ret = rd->rops->write_device(rd->client, reg, bytes, src);
+
+ return ret;
+}
+
+static int rt_chip_block_read(struct rt_regmap_device *rd, u32 reg,
+ int bytes, void *dst)
+{
+ int ret;
+
+ ret = rd->rops->read_device(rd->client, reg, bytes, dst);
+ return ret;
+}
+
+static int rt_cache_block_write(struct rt_regmap_device *rd, u32 reg,
+ int bytes, const void *data)
+{
+ int i, j, reg_base = 0, count = 0, ret = 0, size = 0;
+ struct reg_index_offset rio;
+ unsigned char wdata[64];
+ unsigned char wri_data[128];
+ unsigned char blk_index;
+ rt_register_map_t rm;
+
+ memcpy(wdata, data, bytes);
+
+ rio = find_register_index(rd, reg);
+ if (rio.index < 0) {
+ dev_err(&rd->dev, "reg 0x%02x is out of range\n", reg);
+ return -EINVAL;
+ }
+
+ reg_base = 0;
+ rm = rd->props.rm[rio.index + reg_base];
+ while (bytes > 0) {
+ size = ((bytes <= (rm->size - rio.offset)) ?
+ bytes : rm->size - rio.offset);
+ if ((rm->reg_type & RT_REG_TYPE_MASK) == RT_VOLATILE) {
+ ret = rt_chip_block_write(rd,
+ rm->addr + rio.offset,
+ size,
+ &wdata[count]);
+ count += size;
+ } else {
+ blk_index = (rd->props.rt_regmap_mode &
+ RT_IO_BLK_MODE_MASK) >> 3;
+
+ ret = rd->rt_block_write[blk_index]
+ (rd, rm, size, &rio, wdata, &count);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rd->rt_block_write fail\n");
+ goto ERR;
+ }
+ }
+
+ if ((rm->reg_type & RT_REG_TYPE_MASK) != RT_VOLATILE)
+ *(rd->cache_flag + rio.index + reg_base) = 1;
+
+ bytes -= size;
+ if (bytes <= 0)
+ goto finished;
+ reg_base++;
+ rio.offset = 0;
+ rm = rd->props.rm[rio.index + reg_base];
+ if ((rio.index + reg_base) >= rd->props.register_num) {
+ dev_err(&rd->dev, "over regmap size\n");
+ goto ERR;
+ }
+ }
+finished:
+ if (rd->props.io_log_en) {
+ j = 0;
+ for (i = 0; i < count; i++)
+ j += sprintf(wri_data + j, "%02x,", wdata[i]);
+ pr_info("RT_REGMAP [WRITE] reg0x%04x [Data] 0x%s\n",
+ reg, wri_data);
+ }
+ return 0;
+ERR:
+ return -EIO;
+}
+
+static int rt_asyn_cache_block_write(struct rt_regmap_device *rd, u32 reg,
+ int bytes, const void *data)
+{
+ int i, j, reg_base, count = 0, ret = 0, size = 0;
+ struct reg_index_offset rio;
+ unsigned char wdata[64];
+ unsigned char wri_data[128];
+ unsigned char blk_index;
+ rt_register_map_t rm;
+
+ memcpy(wdata, data, bytes);
+
+ cancel_delayed_work_sync(&rd->rt_work);
+
+ rio = find_register_index(rd, reg);
+ if (rio.index < 0) {
+ dev_err(&rd->dev, "reg 0x%02x is out of range\n", reg);
+ return -EINVAL;
+ }
+
+ reg_base = 0;
+ rm = rd->props.rm[rio.index + reg_base];
+ while (bytes > 0) {
+ size = ((bytes <= (rm->size - rio.offset)) ?
+ bytes : rm->size - rio.offset);
+ if ((rm->reg_type & RT_REG_TYPE_MASK) == RT_VOLATILE) {
+ ret = rt_chip_block_write(rd,
+ rm->addr + rio.offset,
+ size, &wdata[count]);
+ count += size;
+ } else {
+ blk_index = (rd->props.rt_regmap_mode &
+ RT_IO_BLK_MODE_MASK) >> 3;
+ ret = rd->rt_block_write[blk_index]
+ (rd, rm, size, &rio, wdata, &count);
+ }
+ if (ret < 0) {
+ dev_err(&rd->dev, "rd->rt_block_write fail\n");
+ goto ERR;
+ }
+
+ if ((rm->reg_type & RT_REG_TYPE_MASK) != RT_VOLATILE) {
+ *(rd->cache_flag + rio.index + reg_base) = 1;
+ rd->pending_event = 1;
+ }
+
+ bytes -= size;
+ if (bytes <= 0)
+ goto finished;
+ reg_base++;
+ rm = rd->props.rm[rio.index + reg_base];
+ rio.offset = 0;
+ if ((rio.index + reg_base) >= rd->props.register_num) {
+ dev_err(&rd->dev, "over regmap size\n");
+ goto ERR;
+ }
+ }
+finished:
+ if (rd->props.io_log_en) {
+ j = 0;
+ for (i = 0; i < count; i++)
+ j += sprintf(wri_data + j, "%02x,", wdata[i]);
+ pr_info("RT_REGMAP [WRITE] reg0x%04x [Data] 0x%s\n",
+ reg, wri_data);
+ }
+
+ schedule_delayed_work(&rd->rt_work, msecs_to_jiffies(1));
+ return 0;
+ERR:
+ return -EIO;
+}
+
+static int rt_block_write_blk_all(struct rt_regmap_device *rd,
+ struct rt_register *rm, int size,
+ const struct reg_index_offset *rio,
+ unsigned char *wdata, int *count)
+{
+ int cnt;
+
+ cnt = *count;
+ cnt += size;
+ *count = cnt;
+ return 0;
+}
+
+static int rt_block_write_blk_chip(struct rt_regmap_device *rd,
+ struct rt_register *rm, int size,
+ const struct reg_index_offset *rio,
+ unsigned char *wdata, int *count)
+{
+ int i, cnt;
+
+ cnt = *count;
+ for (i = rio->offset; i < rio->offset + size; i++) {
+ if ((rm->reg_type & RT_REG_TYPE_MASK) != RT_VOLATILE)
+ rm->cache_data[i] =
+ wdata[cnt] & rm->wbit_mask[i];
+ cnt++;
+ }
+ *count = cnt;
+ return 0;
+}
+
+static int rt_block_write_blk_cache(struct rt_regmap_device *rd,
+ struct rt_register *rm, int size,
+ const struct reg_index_offset *rio,
+ unsigned char *wdata, int *count)
+{
+ int ret, cnt;
+
+ cnt = *count;
+
+ ret = rt_chip_block_write(rd, rm->addr + rio->offset,
+ size, &wdata[cnt]);
+ if (ret < 0) {
+ dev_err(&rd->dev,
+ "rt block write fail at 0x%02x\n",
+ rm->addr + rio->offset);
+ return -EIO;
+ }
+ cnt += size;
+ *count = cnt;
+ return 0;
+}
+
+static int rt_block_write(struct rt_regmap_device *rd,
+ struct rt_register *rm, int size,
+ const struct reg_index_offset *rio,
+ unsigned char *wdata, int *count)
+{
+ int i, ret, cnt, change = 0;
+
+ cnt = *count;
+
+ for (i = rio->offset; i < size + rio->offset; i++) {
+ if ((rm->reg_type & RT_REG_TYPE_MASK) != RT_VOLATILE) {
+ if (rm->reg_type & RT_WR_ONCE) {
+ if (rm->cache_data[i] !=
+ (wdata[cnt] & rm->wbit_mask[i]))
+ change++;
+ }
+ rm->cache_data[i] = wdata[cnt] & rm->wbit_mask[i];
+ }
+ cnt++;
+ }
+
+ if (!change && (rm->reg_type & RT_WR_ONCE))
+ goto finish;
+
+ if ((rd->props.rt_regmap_mode & RT_CACHE_MODE_MASK) ==
+ RT_CACHE_WR_THROUGH) {
+ ret = rt_chip_block_write(rd,
+ rm->addr + rio->offset,
+ size, rm->cache_data);
+ if (ret < 0) {
+ dev_err(&rd->dev,
+ "rt block write fail at 0x%02x\n",
+ rm->addr + rio->offset);
+ return -EIO;
+ }
+ }
+
+finish:
+ *count = cnt;
+ return 0;
+}
+
+static int (*rt_block_map[])(struct rt_regmap_device *rd,
+ struct rt_register *rm, int size,
+ const struct reg_index_offset *rio,
+ unsigned char *wdata, int *count) = {
+ &rt_block_write,
+ &rt_block_write_blk_all,
+ &rt_block_write_blk_cache,
+ &rt_block_write_blk_chip,
+};
+
+static int rt_cache_block_read(struct rt_regmap_device *rd, u32 reg,
+ int bytes, void *dest)
+{
+ int i, ret, count = 0, reg_base = 0, total_bytes = 0;
+ struct reg_index_offset rio;
+ rt_register_map_t rm;
+ unsigned char data[100];
+ unsigned char tmp_data[32];
+
+ rio = find_register_index(rd, reg);
+ if (rio.index < 0) {
+ dev_err(&rd->dev, "reg 0x%02x is out of range\n", reg);
+ return -EINVAL;
+ }
+
+ rm = rd->props.rm[rio.index];
+
+ total_bytes += (rm->size - rio.offset);
+
+ for (i = rio.index + 1; i < rd->props.register_num; i++)
+ total_bytes += rd->props.rm[i]->size;
+
+ if (bytes > total_bytes) {
+ dev_err(&rd->dev, "out of cache map range\n");
+ return -EINVAL;
+ }
+
+ memcpy(data, &rm->cache_data[rio.offset], bytes);
+
+ if ((rm->reg_type & RT_REG_TYPE_MASK) == RT_VOLATILE) {
+ ret = rd->rops->read_device(rd->client,
+ rm->addr, rm->size, tmp_data);
+ if (ret < 0) {
+ dev_err(&rd->dev,
+ "rt_regmap Error at 0x%02x\n",
+ rm->addr);
+ return -EIO;
+ }
+ for (i = rio.offset; i < rm->size; i++) {
+ data[count] = tmp_data[i];
+ count++;
+ }
+ } else {
+ count += (rm->size - rio.offset);
+ }
+
+ while (count < bytes) {
+ reg_base++;
+ rm = rd->props.rm[rio.index + reg_base];
+ if ((rm->reg_type & RT_REG_TYPE_MASK) == RT_VOLATILE) {
+ ret = rd->rops->read_device(rd->client,
+ rm->addr, rm->size, &data[count]);
+ if (ret < 0) {
+ dev_err(&rd->dev,
+ "rt_regmap Error at 0x%02x\n",
+ rm->addr);
+ return -EIO;
+ }
+ }
+ count += rm->size;
+ }
+
+ if (rd->props.io_log_en)
+ pr_info("RT_REGMAP [READ] reg0x%04x\n", reg);
+
+ memcpy(dest, data, bytes);
+
+ return 0;
+}
+
+/* rt_regmap_cache_backup - back up all cache register value*/
+void rt_regmap_cache_backup(struct rt_regmap_device *rd)
+{
+ const rt_register_map_t *rm = rd->props.rm;
+ int i;
+
+ down(&rd->semaphore);
+ for (i = 0; i < rd->props.register_num; i++)
+ if ((rm[i]->reg_type & RT_REG_TYPE_MASK) != RT_VOLATILE)
+ *(rd->cache_flag + i) = 1;
+ rd->pending_event = 1;
+ up(&rd->semaphore);
+}
+EXPORT_SYMBOL(rt_regmap_cache_backup);
+
+/* _rt_regmap_reg_write - write data to specific register map
+ * only support 1, 2, 4 bytes regisetr map
+ * @rd: rt_regmap_device pointer.
+ * @rrd: rt_reg_data pointer.
+ */
+int _rt_regmap_reg_write(struct rt_regmap_device *rd,
+ struct rt_reg_data *rrd)
+{
+ const rt_register_map_t *rm = rd->props.rm;
+ struct reg_index_offset rio;
+ int ret, tmp_data;
+
+ rio = find_register_index(rd, rrd->reg);
+ if (rio.index < 0 || rio.offset != 0) {
+ dev_err(&rd->dev, "reg 0x%02x is out of regmap\n", rrd->reg);
+ return -EINVAL;
+ }
+
+ down(&rd->semaphore);
+ switch (rm[rio.index]->size) {
+ case 1:
+ ret = rd->regmap_ops.regmap_block_write(rd,
+ rrd->reg, 1, &rrd->rt_data.data_u8);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block write fail\n");
+ up(&rd->semaphore);
+ return -EIO;
+ }
+ break;
+ case 2:
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN)
+ tmp_data = be16_to_cpu(rrd->rt_data.data_u32);
+ ret = rd->regmap_ops.regmap_block_write(rd,
+ rrd->reg, rm[rio.index]->size, &tmp_data);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block write fail\n");
+ up(&rd->semaphore);
+ return -EIO;
+ }
+ break;
+ case 3:
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN) {
+ tmp_data = be32_to_cpu(rrd->rt_data.data_u32);
+ tmp_data >>= 8;
+ }
+ ret = rd->regmap_ops.regmap_block_write(rd,
+ rrd->reg, rm[rio.index]->size, &tmp_data);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block write fail\n");
+ up(&rd->semaphore);
+ return -EIO;
+ }
+ break;
+ case 4:
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN)
+ tmp_data = be32_to_cpu(rrd->rt_data.data_u32);
+ ret = rd->regmap_ops.regmap_block_write(rd,
+ rrd->reg, rm[rio.index]->size, &tmp_data);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block write fail\n");
+ up(&rd->semaphore);
+ return -EIO;
+ }
+ break;
+ default:
+ dev_err(&rd->dev,
+ "Failed: only support 1~4 bytes regmap write\n");
+ break;
+ }
+ up(&rd->semaphore);
+ return 0;
+}
+EXPORT_SYMBOL(_rt_regmap_reg_write);
+
+/* _rt_asyn_regmap_reg_write - asyn write data to specific register map*/
+int _rt_asyn_regmap_reg_write(struct rt_regmap_device *rd,
+ struct rt_reg_data *rrd)
+{
+ const rt_register_map_t *rm = rd->props.rm;
+ struct reg_index_offset rio;
+ int ret, tmp_data;
+
+ rio = find_register_index(rd, rrd->reg);
+ if (rio.index < 0 || rio.offset != 0) {
+ dev_err(&rd->dev, "reg 0x%02x is out of regmap\n", rrd->reg);
+ return -EINVAL;
+ }
+
+ down(&rd->semaphore);
+ switch (rm[rio.index]->size) {
+ case 1:
+ ret = rt_asyn_cache_block_write(rd,
+ rrd->reg, 1,
+ &rrd->rt_data.data_u8);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block write fail\n");
+ ret = -EIO;
+ goto err_regmap_write;
+ }
+ break;
+ case 2:
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN)
+ tmp_data = be16_to_cpu(rrd->rt_data.data_u32);
+ ret = rt_asyn_cache_block_write(rd,
+ rrd->reg,
+ rm[rio.index]->size, &tmp_data);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block write fail\n");
+ ret = -EIO;
+ goto err_regmap_write;
+ }
+ break;
+ case 3:
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN) {
+ tmp_data = be32_to_cpu(rrd->rt_data.data_u32);
+ tmp_data >>= 8;
+ }
+ ret = rt_asyn_cache_block_write(rd,
+ rrd->reg,
+ rm[rio.index]->size, &tmp_data);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block write fail\n");
+ ret = -EIO;
+ goto err_regmap_write;
+ }
+ break;
+ case 4:
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN)
+ tmp_data = be32_to_cpu(rrd->rt_data.data_u32);
+ ret = rt_asyn_cache_block_write(rd,
+ rrd->reg,
+ rm[rio.index]->size, &tmp_data);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block write fail\n");
+ ret = -EIO;
+ goto err_regmap_write;
+ }
+ break;
+ default:
+ dev_err(&rd->dev,
+ "Failed: only support 1~4 bytes regmap write\n");
+ break;
+ }
+ up(&rd->semaphore);
+ return 0;
+err_regmap_write:
+ up(&rd->semaphore);
+ return ret;
+}
+EXPORT_SYMBOL(_rt_asyn_regmap_reg_write);
+
+/* _rt_regmap_update_bits - assign bits specific register map */
+int _rt_regmap_update_bits(struct rt_regmap_device *rd,
+ struct rt_reg_data *rrd)
+{
+ const rt_register_map_t *rm = rd->props.rm;
+ struct reg_index_offset rio;
+ int ret, new, old;
+ bool change = false;
+
+ rio = find_register_index(rd, rrd->reg);
+ if (rio.index < 0 || rio.offset != 0) {
+ dev_err(&rd->dev, "reg 0x%02x is out of regmap\n", rrd->reg);
+ return -EINVAL;
+ }
+
+ down(&rd->semaphore);
+ switch (rm[rio.index]->size) {
+ case 1:
+ ret = rd->regmap_ops.regmap_block_read(rd,
+ rrd->reg, 1, &old);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block read fail\n");
+ goto err_update_bits;
+ }
+ new = (old & ~(rrd->mask)) | (rrd->rt_data.data_u8 & rrd->mask);
+ change = old != new;
+
+ if (((rm[rio.index]->reg_type & RT_WR_ONCE) && change) ||
+ !(rm[rio.index]->reg_type & RT_WR_ONCE)) {
+ ret = rd->regmap_ops.regmap_block_write(rd,
+ rrd->reg, 1, &new);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block write fail\n");
+ goto err_update_bits;
+ }
+ }
+ break;
+ case 2:
+ ret = rd->regmap_ops.regmap_block_read(rd,
+ rrd->reg, rm[rio.index]->size, &old);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block read fail\n");
+ goto err_update_bits;
+ }
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN)
+ old = be16_to_cpu(old);
+
+ new = (old & ~(rrd->mask)) |
+ (rrd->rt_data.data_u16 & rrd->mask);
+
+ change = old != new;
+ if (((rm[rio.index]->reg_type & RT_WR_ONCE) && change) ||
+ !(rm[rio.index]->reg_type & RT_WR_ONCE)) {
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN)
+ new = be16_to_cpu(new);
+ ret = rd->regmap_ops.regmap_block_write(rd,
+ rrd->reg, rm[rio.index]->size, &new);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block write fail\n");
+ goto err_update_bits;
+ }
+ }
+ break;
+ case 3:
+ ret = rd->regmap_ops.regmap_block_read(rd,
+ rrd->reg, rm[rio.index]->size, &old);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block read fail\n");
+ goto err_update_bits;
+ }
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN) {
+ old = be32_to_cpu(old);
+ old >>= 8;
+ }
+
+ new = (old & ~(rrd->mask)) |
+ (rrd->rt_data.data_u32 & rrd->mask);
+ change = old != new;
+ if (((rm[rio.index]->reg_type & RT_WR_ONCE) && change) ||
+ !(rm[rio.index]->reg_type & RT_WR_ONCE)) {
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN) {
+ new <<= 8;
+ new = be32_to_cpu(new);
+ }
+ ret = rd->regmap_ops.regmap_block_write(rd,
+ rrd->reg, rm[rio.index]->size, &new);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block write fail\n");
+ goto err_update_bits;
+ }
+ }
+ break;
+ case 4:
+ ret = rd->regmap_ops.regmap_block_read(rd,
+ rrd->reg, rm[rio.index]->size, &old);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block read fail\n");
+ goto err_update_bits;
+ }
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN)
+ old = be32_to_cpu(old);
+
+ new = (old & ~(rrd->mask)) |
+ (rrd->rt_data.data_u32 & rrd->mask);
+ change = old != new;
+ if (((rm[rio.index]->reg_type & RT_WR_ONCE) && change) ||
+ !(rm[rio.index]->reg_type & RT_WR_ONCE)) {
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN)
+ new = be32_to_cpu(new);
+ ret = rd->regmap_ops.regmap_block_write(rd,
+ rrd->reg, rm[rio.index]->size, &new);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block write fail\n");
+ goto err_update_bits;
+ }
+ }
+ break;
+ default:
+ dev_err(&rd->dev,
+ "Failed: only support 1~4 bytes regmap write\n");
+ break;
+ }
+ up(&rd->semaphore);
+ return change;
+err_update_bits:
+ up(&rd->semaphore);
+ return ret;
+}
+EXPORT_SYMBOL(_rt_regmap_update_bits);
+
+/* rt_regmap_block_write - block write data to register
+ * @rd: rt_regmap_device pointer
+ * @reg: register address
+ * bytes: leng for write
+ * src: source data
+ */
+int rt_regmap_block_write(struct rt_regmap_device *rd, u32 reg,
+ int bytes, const void *src)
+{
+ int ret;
+
+ down(&rd->semaphore);
+ ret = rd->regmap_ops.regmap_block_write(rd, reg, bytes, src);
+ up(&rd->semaphore);
+ return ret;
+};
+EXPORT_SYMBOL(rt_regmap_block_write);
+
+/* rt_asyn_regmap_block_write - asyn block write*/
+int rt_asyn_regmap_block_write(struct rt_regmap_device *rd, u32 reg,
+ int bytes, const void *src)
+{
+ int ret;
+
+ down(&rd->semaphore);
+ ret = rt_asyn_cache_block_write(rd, reg, bytes, src);
+ up(&rd->semaphore);
+ return ret;
+};
+EXPORT_SYMBOL(rt_asyn_regmap_block_write);
+
+/* rt_regmap_block_read - block read data form register
+ * @rd: rt_regmap_device pointer
+ * @reg: register address
+ * @bytes: read length
+ * @dst: destination for read data
+ */
+int rt_regmap_block_read(struct rt_regmap_device *rd, u32 reg,
+ int bytes, void *dst)
+{
+ int ret;
+
+ down(&rd->semaphore);
+ ret = rd->regmap_ops.regmap_block_read(rd, reg, bytes, dst);
+ up(&rd->semaphore);
+ return ret;
+};
+EXPORT_SYMBOL(rt_regmap_block_read);
+
+/* _rt_regmap_reg_read - register read for specific register map
+ * only support 1, 2, 4 bytes register map.
+ * @rd: rt_regmap_device pointer.
+ * @rrd: rt_reg_data pointer.
+ */
+int _rt_regmap_reg_read(struct rt_regmap_device *rd, struct rt_reg_data *rrd)
+{
+ const rt_register_map_t *rm = rd->props.rm;
+ struct reg_index_offset rio;
+ int ret, tmp_data = 0;
+
+ rio = find_register_index(rd, rrd->reg);
+ if (rio.index < 0 || rio.offset != 0) {
+ dev_err(&rd->dev, "reg 0x%02x is out of regmap\n", rrd->reg);
+ return -EINVAL;
+ }
+
+ down(&rd->semaphore);
+ switch (rm[rio.index]->size) {
+ case 1:
+ ret = rd->regmap_ops.regmap_block_read(rd,
+ rrd->reg, 1, &rrd->rt_data.data_u8);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block read fail\n");
+ goto err_regmap_reg_read;
+ }
+ break;
+ case 2:
+ ret = rd->regmap_ops.regmap_block_read(rd,
+ rrd->reg, rm[rio.index]->size, &tmp_data);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block read fail\n");
+ goto err_regmap_reg_read;
+ }
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN)
+ tmp_data = be16_to_cpu(tmp_data);
+ rrd->rt_data.data_u16 = tmp_data;
+ break;
+ case 3:
+ ret = rd->regmap_ops.regmap_block_read(rd,
+ rrd->reg, rm[rio.index]->size, &tmp_data);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block read fail\n");
+ goto err_regmap_reg_read;
+ }
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN)
+ tmp_data = be32_to_cpu(tmp_data);
+ rrd->rt_data.data_u32 = (tmp_data >> 8);
+ break;
+ case 4:
+ ret = rd->regmap_ops.regmap_block_read(rd,
+ rrd->reg, rm[rio.index]->size, &tmp_data);
+ if (ret < 0) {
+ dev_err(&rd->dev, "rt regmap block read fail\n");
+ goto err_regmap_reg_read;
+ }
+ if (rd->props.rt_format == RT_LITTLE_ENDIAN)
+ tmp_data = be32_to_cpu(tmp_data);
+ rrd->rt_data.data_u32 = tmp_data;
+ break;
+ default:
+ dev_err(&rd->dev,
+ "Failed: only support 1~4 bytes regmap read\n");
+ break;
+ }
+ up(&rd->semaphore);
+ return 0;
+err_regmap_reg_read:
+ up(&rd->semaphore);
+ return ret;
+}
+EXPORT_SYMBOL(_rt_regmap_reg_read);
+
+void rt_cache_getlasterror(struct rt_regmap_device *rd, char *buf)
+{
+ down(&rd->semaphore);
+ sprintf(buf, "%s\n", rd->err_msg);
+ up(&rd->semaphore);
+}
+EXPORT_SYMBOL(rt_cache_getlasterror);
+
+void rt_cache_clrlasterror(struct rt_regmap_device *rd)
+{
+ down(&rd->semaphore);
+ rd->error_occurred = 0;
+ sprintf(rd->err_msg, "%s", "No Error");
+ up(&rd->semaphore);
+}
+EXPORT_SYMBOL(rt_cache_clrlasterror);
+
+/* initialize cache data from rt_register */
+int rt_regmap_cache_init(struct rt_regmap_device *rd)
+{
+ int i, j, ret, bytes_num = 0, count = 0;
+ const rt_register_map_t *rm = rd->props.rm;
+
+ dev_info(&rd->dev, "rt register cache data init\n");
+
+ down(&rd->semaphore);
+ rd->cache_flag = devm_kzalloc(&rd->dev,
+ rd->props.register_num * sizeof(int), GFP_KERNEL);
+
+ if (!rd->props.group) {
+ rd->props.group = devm_kzalloc(&rd->dev,
+ sizeof(*rd->props.group), GFP_KERNEL);
+ rd->props.group[0].start = 0x00;
+ rd->props.group[0].end = 0xffff;
+ rd->props.group[0].mode = RT_1BYTE_MODE;
+ }
+
+ /* calculate maxima size for showing on regs debugfs node*/
+ rd->part_size_limit = 0;
+ for (i = 0; i < rd->props.register_num; i++) {
+ if (!rm[i]->cache_data)
+ bytes_num += rm[i]->size;
+ if (rm[i]->size > rd->part_size_limit &&
+ (rm[i]->reg_type & RT_REG_TYPE_MASK) != RT_RESERVE)
+ rd->part_size_limit = rm[i]->size;
+ }
+ rd->part_size_limit = 400 / ((rd->part_size_limit - 1) * 3 + 5);
+
+ rd->alloc_data =
+ devm_kzalloc(&rd->dev,
+ bytes_num * sizeof(unsigned char), GFP_KERNEL);
+ if (!rd->alloc_data) {
+ pr_info("tmp data memory allocate fail\n");
+ goto mem_err;
+ }
+
+ /* reload cache data from real chip */
+ for (i = 0; i < rd->props.register_num; i++) {
+ if (!rm[i]->cache_data) {
+ rm[i]->cache_data = rd->alloc_data + count;
+ count += rm[i]->size;
+ if ((rm[i]->reg_type & RT_REG_TYPE_MASK) !=
+ RT_VOLATILE) {
+ ret = rd->rops->read_device(rd->client,
+ rm[i]->addr, rm[i]->size,
+ rm[i]->cache_data);
+ if (ret < 0) {
+ dev_err(&rd->dev, "chip read fail\n");
+ goto io_err;
+ }
+ } else {
+ memset(rm[i]->cache_data, 0x00, rm[i]->size);
+ }
+ }
+ *(rd->cache_flag + i) = 0;
+ }
+
+ /* set 0xff writeable mask for NORMAL and RESERVE type */
+ for (i = 0; i < rd->props.register_num; i++) {
+ if ((rm[i]->reg_type & RT_REG_TYPE_MASK) == RT_NORMAL ||
+ (rm[i]->reg_type & RT_REG_TYPE_MASK) == RT_RESERVE) {
+ for (j = 0; j < rm[i]->size; j++)
+ rm[i]->wbit_mask[j] = 0xff;
+ }
+ }
+
+ rd->cache_inited = 1;
+ dev_info(&rd->dev, "cache cata init successfully\n");
+ up(&rd->semaphore);
+ return 0;
+mem_err:
+ up(&rd->semaphore);
+ return -ENOMEM;
+io_err:
+ up(&rd->semaphore);
+ return -EIO;
+}
+EXPORT_SYMBOL(rt_regmap_cache_init);
+
+/* rt_regmap_cache_reload - reload cache valuew from real chip*/
+int rt_regmap_cache_reload(struct rt_regmap_device *rd)
+{
+ int i, ret;
+ const rt_register_map_t *rm = rd->props.rm;
+
+ down(&rd->semaphore);
+ for (i = 0; i < rd->props.register_num; i++) {
+ if ((rm[i]->reg_type & RT_REG_TYPE_MASK) != RT_VOLATILE) {
+ ret = rd->rops->read_device(rd->client, rm[i]->addr,
+ rm[i]->size, rm[i]->cache_data);
+ if (ret < 0) {
+ dev_err(&rd->dev, "i2c read fail\n");
+ goto io_err;
+ }
+ *(rd->cache_flag + i) = 0;
+ }
+ }
+ rd->pending_event = 0;
+ up(&rd->semaphore);
+ dev_info(&rd->dev, "cache data reload\n");
+ return 0;
+
+io_err:
+ up(&rd->semaphore);
+ return -EIO;
+}
+EXPORT_SYMBOL(rt_regmap_cache_reload);
+
+/* rt_regmap_add_debubfs - add user own debugfs node
+ * @rd: rt_regmap_devcie pointer.
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @data: a pointer to something that the caller will want to get to later on.
+ * The inode.i_private pointer will point this value on the open() call.
+ * @fops: a pointer to a struct file_operations that should be used for
+ * this file.
+ */
+int rt_regmap_add_debugfs(struct rt_regmap_device *rd, const char *name,
+ umode_t mode, void *data,
+ const struct file_operations *fops)
+{
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *den;
+
+ den = debugfs_create_file(name, mode, rd->rt_den, data, fops);
+ if (!den)
+ return -EINVAL;
+#endif /*CONFIG_DEBUG_FS*/
+ return 0;
+}
+EXPORT_SYMBOL(rt_regmap_add_debugfs);
+
+/* release cache data*/
+static void rt_regmap_cache_release(struct rt_regmap_device *rd)
+{
+ int i;
+ const rt_register_map_t *rm = rd->props.rm;
+
+ dev_info(&rd->dev, "cache data release\n");
+ for (i = 0; i < rd->props.register_num; i++)
+ rm[i]->cache_data = NULL;
+ devm_kfree(&rd->dev, rd->alloc_data);
+ if (rd->cache_flag)
+ devm_kfree(&rd->dev, rd->cache_flag);
+ rd->cache_inited = 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void rt_check_dump_config_file(struct rt_regmap_device *rd,
+ long int *reg_dump, int *cnt, char *type)
+{
+ char *token, *buf, *tmp_type;
+ char PATH[64];
+ mm_segment_t fs;
+ struct file *fp;
+ int ret, tmp_cnt = 0;
+
+ buf = devm_kzalloc(&rd->dev, 64 * sizeof(char), GFP_KERNEL);
+ sprintf(PATH, "/sdcard/%s_dump_config.txt", rd->props.name);
+ fp = filp_open(PATH, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ pr_info("There is no Dump config file in sdcard\n");
+ devm_kfree(&rd->dev, buf);
+ } else {
+ fs = get_fs();
+ set_fs(get_ds());
+ fp->f_op->read(fp, buf, 64, &fp->f_pos);
+ set_fs(fs);
+
+ token = strsep(&buf, " ");
+ tmp_type = token;
+ while (token) {
+ ret = kstrtoul(token, 16, &reg_dump[tmp_cnt]);
+ if (ret == 0)
+ tmp_cnt++;
+ token = strsep(&buf, " ");
+ }
+ filp_close(fp, NULL);
+ *cnt = tmp_cnt;
+ memcpy(type, tmp_type, 16);
+ devm_kfree(&rd->dev, buf);
+ }
+}
+
+static void rt_show_regs(struct rt_regmap_device *rd, struct seq_file *seq_file)
+{
+ int i = 0, k = 0, ret, count = 0, cnt = 0;
+ unsigned char regval[512];
+ long int reg_dump[64] = {0};
+ const rt_register_map_t *rm = rd->props.rm;
+ char type[16];
+
+ rt_check_dump_config_file(rd, reg_dump, &cnt, type);
+ down(&rd->semaphore);
+ for (i = 0; i < rd->props.register_num; i++) {
+ ret = rd->regmap_ops.regmap_block_read(rd, rm[i]->addr,
+ rm[i]->size, &regval[count]);
+ count += rm[i]->size;
+ if (ret < 0) {
+ dev_err(&rd->dev, "regmap block read fail\n");
+ if (rd->error_occurred) {
+ sprintf(rd->err_msg + strlen(rd->err_msg),
+ "Error block read fail at 0x%02x\n",
+ rm[i]->addr);
+ } else {
+ sprintf(rd->err_msg,
+ "Error block read fail at 0x%02x\n",
+ rm[i]->addr);
+ rd->error_occurred = 1;
+ }
+ goto err_show_regs;
+ }
+
+ if ((rm[i]->reg_type & RT_REG_TYPE_MASK) != RT_RESERVE) {
+ seq_printf(seq_file, "reg0x%02x:0x", rm[i]->addr);
+ for (k = 0; k < rm[i]->size; k++)
+ seq_printf(seq_file, "%02x,",
+ regval[count - rm[i]->size + k]);
+ seq_puts(seq_file, "\n");
+ } else {
+ seq_printf(seq_file,
+ "reg0x%02x:reserve\n", rm[i]->addr);
+ }
+ }
+err_show_regs:
+ up(&rd->semaphore);
+}
+
+static int general_read(struct seq_file *seq_file, void *_data)
+{
+ struct rt_debug_st *st = (struct rt_debug_st *)seq_file->private;
+ struct rt_regmap_device *rd = st->info;
+ rt_register_map_t rm;
+ char lbuf[900];
+ unsigned char reg_data[24] = { 0 };
+ unsigned char data;
+ int i = 0, rc = 0, size = 0;
+
+ lbuf[0] = '\0';
+ switch (st->id) {
+ case RT_DBG_REG:
+ seq_printf(seq_file, "0x%04x\n", rd->dbg_data.reg_addr);
+ break;
+ case RT_DBG_DATA:
+ if (rd->dbg_data.reg_size == 0)
+ rd->dbg_data.reg_size = 1;
+
+ size = rd->dbg_data.reg_size;
+
+ if (rd->dbg_data.rio.index == -1) {
+ down(&rd->semaphore);
+ rc = rt_chip_block_read(rd, rd->dbg_data.reg_addr,
+ size, reg_data);
+ up(&rd->semaphore);
+ if (rc < 0) {
+ seq_puts(seq_file, "invalid read\n");
+ break;
+ }
+ goto hiden_read;
+ }
+
+ rm = rd->props.rm[rd->dbg_data.rio.index];
+
+ down(&rd->semaphore);
+ rc = rd->regmap_ops.regmap_block_read(rd,
+ rd->dbg_data.reg_addr, size, reg_data);
+ up(&rd->semaphore);
+ if (rc < 0) {
+ seq_puts(seq_file, "invalid read\n");
+ break;
+ }
+
+hiden_read:
+ if (&reg_data[i]) {
+ seq_puts(seq_file, "0x");
+ for (i = 0; i < size; i++)
+ seq_printf(seq_file, "%02x,", reg_data[i]);
+ seq_puts(seq_file, "\n");
+ }
+ break;
+ case RT_DBG_ERROR:
+ seq_puts(seq_file, "======== Error Message ========\n");
+ if (!rd->error_occurred)
+ seq_puts(seq_file, "No Error\n");
+ else
+ seq_printf(seq_file, rd->err_msg);
+ break;
+ case RT_DBG_REGS:
+ rt_show_regs(rd, seq_file);
+ break;
+ case RT_DBG_NAME:
+ seq_printf(seq_file, "%s\n", rd->props.aliases);
+ break;
+ case RT_DBG_SIZE:
+ seq_printf(seq_file, "%d\n", rd->dbg_data.reg_size);
+ break;
+ case RT_DBG_BLOCK:
+ data = rd->props.rt_regmap_mode & RT_IO_BLK_MODE_MASK;
+ if (data == RT_IO_PASS_THROUGH)
+ seq_puts(seq_file, "0 => IO_PASS_THROUGH\n");
+ else if (data == RT_IO_BLK_ALL)
+ seq_puts(seq_file, "1 => IO_BLK_ALL\n");
+ else if (data == RT_IO_BLK_CACHE)
+ seq_puts(seq_file, "2 => IO_BLK_CACHE\n");
+ else if (data == RT_IO_BLK_CHIP)
+ seq_puts(seq_file, "3 => IO_BLK_CHIP\n");
+ break;
+ case RT_DBG_SLAVE_ADDR:
+ {
+ struct i2c_client *i2c = rd->client;
+
+ seq_printf(seq_file, "0x%02x\n", i2c->addr);
+ }
+ break;
+ case RT_SUPPORT_MODE:
+ seq_puts(seq_file, " == BLOCK MODE ==\n");
+ seq_puts(seq_file, "0 => IO_PASS_THROUGH\n");
+ seq_puts(seq_file, "1 => IO_BLK_ALL\n");
+ seq_puts(seq_file, "2 => IO_BLK_CHIP\n");
+ seq_puts(seq_file, "3 => IO_BLK_CACHE\n");
+ seq_puts(seq_file, " == CACHE MODE ==\n");
+ seq_puts(seq_file, "0 => CACHE_WR_THROUGH\n");
+ seq_puts(seq_file, "1 => CACHE_WR_BACK\n");
+ seq_puts(seq_file, "2 => CACHE_DISABLE\n");
+
+ break;
+ case RT_DBG_IO_LOG:
+ seq_printf(seq_file, "%d\n", rd->props.io_log_en);
+ break;
+ case RT_DBG_CACHE_MODE:
+ data = rd->props.rt_regmap_mode & RT_CACHE_MODE_MASK;
+ if (data == RT_CACHE_WR_THROUGH)
+ seq_printf(seq_file, "%s",
+ "0 => Cache Write Through\n");
+ else if (data == RT_CACHE_WR_BACK)
+ seq_printf(seq_file, "%s", "1 => Cache Write Back\n");
+ else if (data == RT_CACHE_DISABLE)
+ seq_printf(seq_file, "%s", "2 => Cache Disable\n");
+ break;
+ case RT_DBG_REG_SIZE:
+ size = rt_get_regsize(rd, rd->dbg_data.reg_addr);
+ if (size < 0)
+ seq_printf(seq_file, "%d\n", 0);
+ else
+ seq_printf(seq_file, "%d\n", size);
+ break;
+ }
+ return 0;
+}
+
+static int general_open(struct inode *inode, struct file *file)
+{
+ if (file->f_mode & FMODE_READ)
+ return single_open(file, general_read, inode->i_private);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t general_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct rt_debug_st *st = file->private_data;
+ struct rt_regmap_device *rd = st->info;
+ struct reg_index_offset rio;
+ long int param[5];
+ unsigned char reg_data[24] = { 0 };
+ int rc, size = 0;
+ char lbuf[128];
+
+ if (count > sizeof(lbuf) - 1)
+ return -EFAULT;
+
+ rc = copy_from_user(lbuf, ubuf, count);
+ if (rc)
+ return -EFAULT;
+
+ lbuf[count] = '\0';
+
+ switch (st->id) {
+ case RT_DBG_REG:
+ rc = get_parameters(lbuf, param, 1);
+ rio = find_register_index(rd, param[0]);
+ down(&rd->semaphore);
+ if (rio.index < 0) {
+ pr_info("this is an invalid or hiden register\n");
+ rd->dbg_data.reg_addr = param[0];
+ rd->dbg_data.rio.index = -1;
+ } else {
+ rd->dbg_data.rio = rio;
+ rd->dbg_data.reg_addr = param[0];
+ }
+ up(&rd->semaphore);
+ break;
+ case RT_DBG_DATA:
+ if (rd->dbg_data.reg_size == 0)
+ rd->dbg_data.reg_size = 1;
+
+ if (rd->dbg_data.rio.index == -1) {
+ size = rd->dbg_data.reg_size;
+ if ((size - 1) * 3 + 5 != count) {
+ dev_err(&rd->dev, "wrong input length\n");
+ if (rd->error_occurred) {
+ sprintf(rd->err_msg +
+ strlen(rd->err_msg),
+ "Error, wrong input length\n");
+ } else {
+ sprintf(rd->err_msg,
+ "Error, wrong input length\n");
+ rd->error_occurred = 1;
+ }
+ return -EINVAL;
+ }
+
+ rc = get_datas((char *)ubuf, count, reg_data, size);
+ if (rc < 0) {
+ dev_err(&rd->dev, "get datas fail\n");
+ if (rd->error_occurred) {
+ sprintf(rd->err_msg +
+ strlen(rd->err_msg),
+ "Error, get datas fail\n");
+ } else {
+ sprintf(rd->err_msg,
+ "Error, get datas fail\n");
+ rd->error_occurred = 1;
+ }
+ return -EINVAL;
+ }
+ down(&rd->semaphore);
+ rc = rt_chip_block_write(rd, rd->dbg_data.reg_addr,
+ size, reg_data);
+ up(&rd->semaphore);
+ if (rc < 0) {
+ dev_err(&rd->dev, "chip block write fail\n");
+ if (rd->error_occurred) {
+ sprintf(rd->err_msg +
+ strlen(rd->err_msg),
+ "Error chip block write fail at 0x%02x\n",
+ rd->dbg_data.reg_addr);
+ } else {
+ sprintf(rd->err_msg,
+ "Error chip block write fail at 0x%02x\n",
+ rd->dbg_data.reg_addr);
+ rd->error_occurred = 1;
+ }
+ return -EIO;
+ }
+ break;
+ }
+
+ size = rd->dbg_data.reg_size;
+
+ if ((size - 1) * 3 + 5 != count) {
+ dev_err(&rd->dev, "wrong input length\n");
+ if (rd->error_occurred) {
+ sprintf(rd->err_msg + strlen(rd->err_msg),
+ "Error, wrong input length\n");
+ } else {
+ sprintf(rd->err_msg,
+ "Error, wrong input length\n");
+ rd->error_occurred = 1;
+ }
+ return -EINVAL;
+ }
+
+ rc = get_datas((char *)ubuf, count, reg_data, size);
+ if (rc < 0) {
+ dev_err(&rd->dev, "get datas fail\n");
+ if (rd->error_occurred) {
+ sprintf(rd->err_msg + strlen(rd->err_msg),
+ "Error, get datas fail\n");
+ } else {
+ sprintf(rd->err_msg,
+ "Error, get datas fail\n");
+ rd->error_occurred = 1;
+ }
+ return -EINVAL;
+ }
+
+ down(&rd->semaphore);
+ rc = rd->regmap_ops.regmap_block_write(rd,
+ rd->dbg_data.reg_addr, size, reg_data);
+ up(&rd->semaphore);
+ if (rc < 0) {
+ dev_err(&rd->dev, "regmap block write fail\n");
+ if (rd->error_occurred) {
+ sprintf(rd->err_msg + strlen(rd->err_msg),
+ "Error regmap block write fail at 0x%02x\n",
+ rd->dbg_data.reg_addr);
+ } else {
+ sprintf(rd->err_msg,
+ "Error regmap block write fail at 0x%02x\n",
+ rd->dbg_data.reg_addr);
+ rd->error_occurred = 1;
+ }
+ return -EIO;
+ }
+
+ break;
+ case RT_DBG_SYNC:
+ rc = get_parameters(lbuf, param, 1);
+ if (param[0])
+ rt_regmap_cache_sync(rd);
+ break;
+ case RT_DBG_ERROR:
+ rc = get_parameters(lbuf, param, 1);
+ if (param[0])
+ rt_cache_clrlasterror(rd);
+ break;
+ case RT_DBG_SIZE:
+ rc = get_parameters(lbuf, param, 1);
+ if (param[0] >= 0) {
+ down(&rd->semaphore);
+ rd->dbg_data.reg_size = param[0];
+ up(&rd->semaphore);
+ } else {
+ if (rd->error_occurred) {
+ sprintf(rd->err_msg + strlen(rd->err_msg),
+ "Error, size must > 0\n");
+ } else {
+ sprintf(rd->err_msg,
+ "Error, size must > 0\n");
+ rd->error_occurred = 1;
+ }
+ return -EINVAL;
+ }
+ break;
+ case RT_DBG_BLOCK:
+ rc = get_parameters(lbuf, param, 1);
+ if (param[0] < 0)
+ param[0] = 0;
+ else if (param[0] > 3)
+ param[0] = 3;
+
+ param[0] <<= 3;
+
+ down(&rd->semaphore);
+ rd->props.rt_regmap_mode &= ~RT_IO_BLK_MODE_MASK;
+ rd->props.rt_regmap_mode |= param[0];
+ up(&rd->semaphore);
+ if (param[0] == RT_IO_PASS_THROUGH)
+ rt_regmap_cache_sync(rd);
+ break;
+ case RT_DBG_IO_LOG:
+ rc = get_parameters(lbuf, param, 1);
+ down(&rd->semaphore);
+ if (!param[0])
+ rd->props.io_log_en = 0;
+ else
+ rd->props.io_log_en = 1;
+ up(&rd->semaphore);
+ break;
+ case RT_DBG_CACHE_MODE:
+ rc = get_parameters(lbuf, param, 1);
+ if (param[0] < 0)
+ param[0] = 0;
+ else if (param[0] > 2)
+ param[0] = 2;
+ param[0] <<= 1;
+
+ if (param[0] == RT_CACHE_WR_THROUGH) {
+ rt_regmap_cache_reload(rd);
+ rd->regmap_ops.regmap_block_write =
+ rt_cache_block_write;
+ rd->regmap_ops.regmap_block_read = &rt_cache_block_read;
+ } else if (param[0] == RT_CACHE_WR_BACK) {
+ rt_regmap_cache_reload(rd);
+ rd->regmap_ops.regmap_block_write =
+ rt_asyn_cache_block_write;
+ rd->regmap_ops.regmap_block_read = &rt_cache_block_read;
+ } else if (param[0] == RT_CACHE_DISABLE) {
+ rd->regmap_ops.regmap_block_write =
+ rt_chip_block_write;
+ rd->regmap_ops.regmap_block_read = rt_chip_block_read;
+ }
+
+ rd->props.rt_regmap_mode &= ~RT_CACHE_MODE_MASK;
+ rd->props.rt_regmap_mode |= param[0];
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static int general_release(struct inode *inode, struct file *file)
+{
+ if (file->f_mode & FMODE_READ)
+ return single_release(inode, file);
+ return 0;
+}
+
+static const struct file_operations general_ops = {
+ .owner = THIS_MODULE,
+ .open = general_open,
+ .write = general_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = general_release,
+};
+
+/* create general debugfs node */
+static void rt_create_general_debug(struct rt_regmap_device *rd,
+ struct dentry *dir)
+{
+ rd->rtdbg_st[0].info = rd;
+ rd->rtdbg_st[0].id = RT_DBG_REG;
+ rd->rt_debug_file[0] = debugfs_create_file("reg_addr",
+ S_IFREG | 0444, dir,
+ (void *)&rd->rtdbg_st[0],
+ &general_ops);
+ rd->rtdbg_st[1].info = rd;
+ rd->rtdbg_st[1].id = RT_DBG_DATA;
+ rd->rt_debug_file[1] = debugfs_create_file("data",
+ S_IFREG | 0444, dir,
+ (void *)&rd->rtdbg_st[1],
+ &general_ops);
+
+ rd->rtdbg_st[2].info = rd;
+ rd->rtdbg_st[2].id = RT_DBG_REGS;
+ rd->rt_debug_file[2] = debugfs_create_file("regs",
+ S_IFREG | 0444, dir,
+ (void *)&rd->rtdbg_st[2],
+ &general_ops);
+
+ rd->rtdbg_st[3].info = rd;
+ rd->rtdbg_st[3].id = RT_DBG_SYNC;
+ rd->rt_debug_file[3] = debugfs_create_file("sync",
+ S_IFREG | 0444, dir,
+ (void *)&rd->rtdbg_st[3],
+ &general_ops);
+
+ rd->rtdbg_st[4].info = rd;
+ rd->rtdbg_st[4].id = RT_DBG_ERROR;
+ rd->rt_debug_file[4] = debugfs_create_file("Error",
+ S_IFREG | 0444, dir,
+ (void *)&rd->rtdbg_st[4],
+ &general_ops);
+
+ rd->rtdbg_st[5].info = rd;
+ rd->rtdbg_st[5].id = RT_DBG_NAME;
+ rd->rt_debug_file[5] = debugfs_create_file("name",
+ S_IFREG | 0444, dir,
+ (void *)&rd->rtdbg_st[5],
+ &general_ops);
+
+ rd->rtdbg_st[6].info = rd;
+ rd->rtdbg_st[6].id = RT_DBG_BLOCK;
+ rd->rt_debug_file[6] = debugfs_create_file("block",
+ S_IFREG | 0444, dir,
+ (void *)&rd->rtdbg_st[6],
+ &general_ops);
+
+ rd->rtdbg_st[7].info = rd;
+ rd->rtdbg_st[7].id = RT_DBG_SIZE;
+ rd->rt_debug_file[7] = debugfs_create_file("size",
+ S_IFREG | 0444, dir,
+ (void *)&rd->rtdbg_st[7],
+ &general_ops);
+
+ rd->rtdbg_st[8].info = rd;
+ rd->rtdbg_st[8].id = RT_DBG_SLAVE_ADDR;
+ rd->rt_debug_file[8] = debugfs_create_file("slave_addr",
+ S_IFREG | 0444, dir,
+ (void *)
+ &rd->rtdbg_st[8],
+ &general_ops);
+
+ rd->rtdbg_st[9].info = rd;
+ rd->rtdbg_st[9].id = RT_SUPPORT_MODE;
+ rd->rt_debug_file[9] = debugfs_create_file("support_mode",
+ S_IFREG | 0444, dir,
+ (void *)&rd->rtdbg_st[9],
+ &general_ops);
+
+ rd->rtdbg_st[10].info = rd;
+ rd->rtdbg_st[10].id = RT_DBG_IO_LOG;
+ rd->rt_debug_file[10] = debugfs_create_file("io_log",
+ S_IFREG | 0444, dir,
+ (void *)&rd->rtdbg_st[10],
+ &general_ops);
+
+ rd->rtdbg_st[11].info = rd;
+ rd->rtdbg_st[11].id = RT_DBG_CACHE_MODE;
+ rd->rt_debug_file[11] = debugfs_create_file("cache_mode",
+ S_IFREG | 0444, dir,
+ (void *)&rd->rtdbg_st[11],
+ &general_ops);
+ rd->rtdbg_st[12].info = rd;
+ rd->rtdbg_st[12].id = RT_DBG_REG_SIZE;
+ rd->rt_debug_file[12] = debugfs_create_file("reg_size",
+ S_IFREG | 0444, dir,
+ (void *)&rd->rtdbg_st[12],
+ &general_ops);
+}
+
+static int eachreg_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t eachreg_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct rt_debug_st *st = file->private_data;
+ struct rt_regmap_device *rd = st->info;
+ rt_register_map_t rm = rd->props.rm[st->id];
+ int rc;
+ unsigned char pars[20];
+
+ if ((rm->size - 1) * 3 + 5 != count) {
+ dev_err(&rd->dev, "wrong input length\n");
+ return -EINVAL;
+ }
+ rc = get_datas((char *)ubuf, count, pars, rm->size);
+ if (rc < 0) {
+ dev_err(&rd->dev, "get datas fail\n");
+ return -EINVAL;
+ }
+
+ down(&rd->semaphore);
+ rc = rd->regmap_ops.regmap_block_write(rd, rm->addr,
+ rm->size, &pars[0]);
+ up(&rd->semaphore);
+ if (rc < 0) {
+ dev_err(&rd->dev, "regmap block read fail\n");
+ return -EIO;
+ }
+
+ return count;
+}
+
+static ssize_t eachreg_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct rt_debug_st *st = file->private_data;
+ struct rt_regmap_device *rd = st->info;
+ char lbuf[80];
+ unsigned char regval[32];
+ rt_register_map_t rm = rd->props.rm[st->id];
+ int i, j = 0, rc;
+
+ lbuf[0] = '\0';
+
+ down(&rd->semaphore);
+ rc = rd->regmap_ops.regmap_block_read(rd, rm->addr, rm->size, regval);
+ up(&rd->semaphore);
+ if (rc < 0) {
+ dev_err(&rd->dev, "regmap block read fail\n");
+ return -EIO;
+ }
+
+ j += sprintf(lbuf + j, "reg0x%02x:0x", rm->addr);
+ for (i = 0; i < rm->size; i++)
+ j += sprintf(lbuf + j, "%02x,", regval[i]);
+ j += sprintf(lbuf + j, "\n");
+
+ return simple_read_from_buffer(ubuf, count, ppos, lbuf, strlen(lbuf));
+}
+
+static const struct file_operations eachreg_ops = {
+ .open = eachreg_open,
+ .read = eachreg_read,
+ .write = eachreg_write,
+};
+
+/* create every register node at debugfs */
+static void rt_create_every_debug(struct rt_regmap_device *rd,
+ struct dentry *dir)
+{
+ int i;
+ char buf[10];
+
+ rd->rt_reg_file = devm_kzalloc(&rd->dev,
+ rd->props.register_num * sizeof(struct dentry *), GFP_KERNEL);
+ rd->reg_st = devm_kzalloc(&rd->dev,
+ rd->props.register_num * sizeof(struct rt_debug_st *),
+ GFP_KERNEL);
+ for (i = 0; i < rd->props.register_num; i++) {
+ sprintf(buf, "reg0x%02x", (rd->props.rm[i])->addr);
+ rd->rt_reg_file[i] = devm_kzalloc(&rd->dev,
+ sizeof(rd->rt_reg_file[i]),
+ GFP_KERNEL);
+ rd->reg_st[i] =
+ devm_kzalloc(&rd->dev, sizeof(rd->reg_st[i]), GFP_KERNEL);
+
+ rd->reg_st[i]->info = rd;
+ rd->reg_st[i]->id = i;
+ rd->rt_reg_file[i] = debugfs_create_file(buf,
+ S_IFREG | 0444, dir,
+ (void *)rd->reg_st[i],
+ &eachreg_ops);
+ }
+}
+
+static void rt_release_every_debug(struct rt_regmap_device *rd)
+{
+ int num = rd->props.register_num;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ devm_kfree(&rd->dev, rd->rt_reg_file[i]);
+ devm_kfree(&rd->dev, rd->reg_st[i]);
+ }
+ devm_kfree(&rd->dev, rd->rt_reg_file);
+ devm_kfree(&rd->dev, rd->reg_st);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static void rt_regmap_device_release(struct device *dev)
+{
+ struct rt_regmap_device *rd = to_rt_regmap_device(dev);
+
+ devm_kfree(dev, rd);
+}
+
+/* check the rt_register format is correct */
+static int rt_regmap_check(struct rt_regmap_device *rd)
+{
+ const rt_register_map_t *rm = rd->props.rm;
+ int num = rd->props.register_num;
+ int i;
+
+ /* check name property */
+ if (!rd->props.name) {
+ pr_info("there is no node name for rt-regmap\n");
+ return -EINVAL;
+ }
+
+ if (!(rd->props.rt_regmap_mode & RT_BYTE_MODE_MASK))
+ goto single_byte;
+
+ for (i = 0; i < num; i++) {
+ /* check byte size, 1 byte ~ 24 bytes is valid */
+ if (rm[i]->size < 1 || rm[i]->size > 24) {
+ pr_info("rt register size error at reg 0x%02x\n",
+ rm[i]->addr);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < num - 1; i++) {
+ /* check register sequence */
+ if (rm[i]->addr >= rm[i + 1]->addr) {
+ pr_info("sequence format error at reg 0x%02x\n",
+ rm[i]->addr);
+ return -EINVAL;
+ }
+ }
+
+single_byte:
+ /* no default reg_addr and reister_map first addr is not 0x00 */
+ if (!rd->dbg_data.reg_addr && rm[0]->addr) {
+ rd->dbg_data.reg_addr = rm[0]->addr;
+ rd->dbg_data.rio.index = 0;
+ rd->dbg_data.rio.offset = 0;
+ }
+ return 0;
+}
+
+static int rt_create_simple_map(struct rt_regmap_device *rd)
+{
+ int i, j, count = 0, num = 0;
+ rt_register_map_t *rm;
+
+ pr_info("%s\n", __func__);
+ for (i = 0; i < rd->props.register_num; i++)
+ num += rd->props.rm[i]->size;
+
+ rm = devm_kzalloc(&rd->dev, num * sizeof(*rm), GFP_KERNEL);
+
+ for (i = 0; i < rd->props.register_num; i++) {
+ for (j = 0; j < rd->props.rm[i]->size; j++) {
+ rm[count] = devm_kzalloc(&rd->dev,
+ sizeof(struct rt_register),
+ GFP_KERNEL);
+ rm[count]->wbit_mask = devm_kzalloc(&rd->dev,
+ sizeof(unsigned char), GFP_KERNEL);
+
+ rm[count]->addr = rd->props.rm[i]->addr + j;
+ rm[count]->size = 1;
+ rm[count]->reg_type = rd->props.rm[i]->reg_type;
+ if ((rd->props.rm[i]->reg_type & RT_REG_TYPE_MASK) !=
+ RT_WBITS)
+ rm[count]->wbit_mask[0] = 0xff;
+ else
+ rm[count]->wbit_mask[0] =
+ rd->props.rm[i]->wbit_mask[0];
+ count++;
+ }
+ if (count > num)
+ break;
+ }
+
+ rd->props.register_num = num;
+ rd->props.rm = rm;
+
+ return 0;
+}
+
+/* rt_regmap_device_register
+ * @props: a pointer to rt_regmap_properties for rt_regmap_device
+ * @rops: a pointer to rt_regmap_fops for rt_regmap_device
+ * @parent: a pinter to parent device
+ * @client: a pointer to the slave client of this device
+ * @drvdata: a pointer to the driver data
+ */
+struct rt_regmap_device *rt_regmap_device_register
+ (struct rt_regmap_properties *props,
+ struct rt_regmap_fops *rops,
+ struct device *parent,
+ void *client, void *drvdata)
+{
+ struct rt_regmap_device *rd;
+ int ret = 0, i;
+ char device_name[32];
+ unsigned char data;
+
+ pr_info("regmap_device_register: name = %s\n", props->name);
+ rd = devm_kzalloc(parent, sizeof(*rd), GFP_KERNEL);
+ if (!rd) {
+ pr_info("rt_regmap_device memory allocate fail\n");
+ return NULL;
+ }
+
+ /* create a binary semaphore */
+ sema_init(&rd->semaphore, 1);
+ rd->dev.parent = parent;
+ rd->client = client;
+ rd->dev.release = rt_regmap_device_release;
+ dev_set_drvdata(&rd->dev, drvdata);
+ sprintf(device_name, "rt_regmap_%s", props->name);
+ dev_set_name(&rd->dev, device_name);
+ if (props)
+ memcpy(&rd->props, props, sizeof(struct rt_regmap_properties));
+
+ /* check rt_registe_map format */
+ ret = rt_regmap_check(rd);
+ if (ret) {
+ pr_info("rt register map format error\n");
+ devm_kfree(parent, rd);
+ return NULL;
+ }
+
+ ret = device_register(&rd->dev);
+ if (ret) {
+ pr_info("rt-regmap dev register fail\n");
+ devm_kfree(parent, rd);
+ return NULL;
+ }
+
+ rd->rops = rops;
+ rd->err_msg = devm_kzalloc(parent, 128 * sizeof(char), GFP_KERNEL);
+
+ if (!(rd->props.rt_regmap_mode & RT_BYTE_MODE_MASK)) {
+ ret = rt_create_simple_map(rd);
+ if (ret < 0) {
+ pr_info(" rt create simple register map fail\n");
+ goto err_cacheinit;
+ }
+ }
+
+ /* init cache data */
+ ret = rt_regmap_cache_init(rd);
+ if (ret < 0) {
+ pr_info(" rt cache data init fail\n");
+ goto err_cacheinit;
+ }
+
+ INIT_DELAYED_WORK(&rd->rt_work, rt_work_func);
+
+ for (i = 0; i <= 3; i++)
+ rd->rt_block_write[i] = rt_block_map[i];
+
+ data = rd->props.rt_regmap_mode & RT_CACHE_MODE_MASK;
+ if (data == RT_CACHE_WR_THROUGH) {
+ rd->regmap_ops.regmap_block_write = &rt_cache_block_write;
+ rd->regmap_ops.regmap_block_read = &rt_cache_block_read;
+ } else if (data == RT_CACHE_WR_BACK) {
+ rd->regmap_ops.regmap_block_write = &rt_asyn_cache_block_write;
+ rd->regmap_ops.regmap_block_read = &rt_cache_block_read;
+ } else if (data == RT_CACHE_DISABLE) {
+ rd->regmap_ops.regmap_block_write = &rt_chip_block_write;
+ rd->regmap_ops.regmap_block_read = &rt_chip_block_read;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ rd->rt_den = debugfs_create_dir(props->name, rt_regmap_dir);
+ if (!IS_ERR(rd->rt_den)) {
+ rt_create_general_debug(rd, rd->rt_den);
+ if (rd->props.rt_regmap_mode & DBG_MODE_MASK)
+ rt_create_every_debug(rd, rd->rt_den);
+ } else {
+ goto err_debug;
+ }
+#endif /* CONFIG_DEBUG_FS */
+
+ return rd;
+
+#ifdef CONFIG_DEBUG_FS
+err_debug:
+ rt_regmap_cache_release(rd);
+#endif /* CONFIG_DEBUG_FS */
+err_cacheinit:
+ device_unregister(&rd->dev);
+ return NULL;
+}
+EXPORT_SYMBOL(rt_regmap_device_register);
+
+/* rt_regmap_device_unregister - unregister rt_regmap_device*/
+void rt_regmap_device_unregister(struct rt_regmap_device *rd)
+{
+ if (!rd)
+ return;
+ down(&rd->semaphore);
+ rd->rops = NULL;
+ up(&rd->semaphore);
+ if (rd->cache_inited)
+ rt_regmap_cache_release(rd);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(rd->rt_den);
+ if (rd->props.rt_regmap_mode & DBG_MODE_MASK)
+ rt_release_every_debug(rd);
+#endif /* CONFIG_DEBUG_FS */
+ device_unregister(&rd->dev);
+}
+EXPORT_SYMBOL(rt_regmap_device_unregister);
+
+static int __init regmap_plat_init(void)
+{
+ rt_regmap_dir = debugfs_create_dir("rt-regmap", 0);
+ pr_info("Init Richtek RegMap\n");
+ if (IS_ERR(rt_regmap_dir)) {
+ pr_err("rt-regmap debugfs node create fail\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+subsys_initcall(regmap_plat_init);
+
+static void __exit regmap_plat_exit(void)
+{
+ debugfs_remove(rt_regmap_dir);
+}
+
+module_exit(regmap_plat_exit);
+
+MODULE_DESCRIPTION("Richtek regmap Driver");
+MODULE_AUTHOR("Jeff Chang <jeff_chang@richtek.com>");
+MODULE_VERSION(RT_REGMAP_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/pd/richtek/tcpc_rt1711h.c b/drivers/usb/pd/richtek/tcpc_rt1711h.c
new file mode 100644
index 000000000000..d7f4ea0404ae
--- /dev/null
+++ b/drivers/usb/pd/richtek/tcpc_rt1711h.c
@@ -0,0 +1,1417 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Richtek RT1711H Type-C Port Control Driver
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/cpu.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/rt1711h.h>
+#include <linux/hisi/log/hisi_log.h>
+
+#ifdef CONFIG_RT_REGMAP
+#include <linux/hisi/usb/pd/richtek/rt-regmap.h>
+#endif /* CONFIG_RT_REGMAP */
+#include <linux/sched/rt.h>
+
+/* #define DEBUG_GPIO 66 */
+
+#define RT1711H_DRV_VERSION "1.1.8_G"
+
+struct rt1711_chip {
+ struct i2c_client *client;
+ struct device *dev;
+#ifdef CONFIG_RT_REGMAP
+ struct rt_regmap_device *m_dev;
+#endif /* CONFIG_RT_REGMAP */
+ struct semaphore io_lock;
+ struct semaphore suspend_lock;
+ struct tcpc_desc *tcpc_desc;
+ struct tcpc_device *tcpc;
+ struct kthread_worker irq_worker;
+ struct kthread_work irq_work;
+ struct task_struct *irq_worker_task;
+
+ atomic_t poll_count;
+ struct delayed_work poll_work;
+
+ int irq_gpio;
+ int irq;
+ int chip_id;
+};
+
+#ifdef CONFIG_RT_REGMAP
+RT_REG_DECL(TCPC_V10_REG_VID, 2, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_PID, 2, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_DID, 2, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_TYPEC_REV, 2, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_PD_REV, 2, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_PDIF_REV, 2, RT_VOLATILE, {});
+
+RT_REG_DECL(TCPC_V10_REG_ALERT, 2, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_ALERT_MASK, 2, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_POWER_STATUS_MASK, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_FAULT_STATUS_MASK, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_TCPC_CTRL, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_ROLE_CTRL, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_FAULT_CTRL, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_POWER_CTRL, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_CC_STATUS, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_POWER_STATUS, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_FAULT_STATUS, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_COMMAND, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_MSG_HDR_INFO, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_RX_DETECT, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_RX_BYTE_CNT, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_RX_BUF_FRAME_TYPE, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_RX_HDR, 2, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_RX_DATA, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_TRANSMIT, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_TX_BYTE_CNT, 1, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_TX_HDR, 2, RT_VOLATILE, {});
+RT_REG_DECL(TCPC_V10_REG_TX_DATA, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_CLK_CTRL2, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_CLK_CTRL3, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_BMC_CTRL, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_BMCIO_RXDZSEL, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_RT_STATUS, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_RT_INT, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_RT_MASK, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_IDLE_CTRL, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_INTRST_CTRL, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_WATCHDOG_CTRL, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_I2CRST_CTRL, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_SWRESET, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_TTCPC_FILTER, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_DRP_TOGGLE_CYCLE, 1, RT_VOLATILE, {});
+RT_REG_DECL(RT1711H_REG_DRP_DUTY_CTRL, 1, RT_VOLATILE, {});
+
+static const rt_register_map_t rt1711_chip_regmap[] = {
+ RT_REG(TCPC_V10_REG_VID),
+ RT_REG(TCPC_V10_REG_PID),
+ RT_REG(TCPC_V10_REG_DID),
+ RT_REG(TCPC_V10_REG_TYPEC_REV),
+ RT_REG(TCPC_V10_REG_PD_REV),
+ RT_REG(TCPC_V10_REG_PDIF_REV),
+ RT_REG(TCPC_V10_REG_ALERT),
+ RT_REG(TCPC_V10_REG_ALERT_MASK),
+ RT_REG(TCPC_V10_REG_POWER_STATUS_MASK),
+ RT_REG(TCPC_V10_REG_FAULT_STATUS_MASK),
+ RT_REG(TCPC_V10_REG_TCPC_CTRL),
+ RT_REG(TCPC_V10_REG_ROLE_CTRL),
+ RT_REG(TCPC_V10_REG_FAULT_CTRL),
+ RT_REG(TCPC_V10_REG_POWER_CTRL),
+ RT_REG(TCPC_V10_REG_CC_STATUS),
+ RT_REG(TCPC_V10_REG_POWER_STATUS),
+ RT_REG(TCPC_V10_REG_FAULT_STATUS),
+ RT_REG(TCPC_V10_REG_COMMAND),
+ RT_REG(TCPC_V10_REG_MSG_HDR_INFO),
+ RT_REG(TCPC_V10_REG_RX_DETECT),
+ RT_REG(TCPC_V10_REG_RX_BYTE_CNT),
+ RT_REG(TCPC_V10_REG_RX_BUF_FRAME_TYPE),
+ RT_REG(TCPC_V10_REG_RX_HDR),
+ RT_REG(TCPC_V10_REG_RX_DATA),
+ RT_REG(TCPC_V10_REG_TRANSMIT),
+ RT_REG(TCPC_V10_REG_TX_BYTE_CNT),
+ RT_REG(TCPC_V10_REG_TX_HDR),
+ RT_REG(TCPC_V10_REG_TX_DATA),
+ RT_REG(RT1711H_REG_CLK_CTRL2),
+ RT_REG(RT1711H_REG_CLK_CTRL3),
+ RT_REG(RT1711H_REG_BMC_CTRL),
+ RT_REG(RT1711H_REG_BMCIO_RXDZSEL),
+ RT_REG(RT1711H_REG_RT_STATUS),
+ RT_REG(RT1711H_REG_RT_INT),
+ RT_REG(RT1711H_REG_RT_MASK),
+ RT_REG(RT1711H_REG_IDLE_CTRL),
+ RT_REG(RT1711H_REG_INTRST_CTRL),
+ RT_REG(RT1711H_REG_WATCHDOG_CTRL),
+ RT_REG(RT1711H_REG_I2CRST_CTRL),
+ RT_REG(RT1711H_REG_SWRESET),
+ RT_REG(RT1711H_REG_TTCPC_FILTER),
+ RT_REG(RT1711H_REG_DRP_TOGGLE_CYCLE),
+ RT_REG(RT1711H_REG_DRP_DUTY_CTRL),
+};
+
+#define RT1711_CHIP_REGMAP_SIZE ARRAY_SIZE(rt1711_chip_regmap)
+
+#endif /* CONFIG_RT_REGMAP */
+
+static int rt1711_read_device(void *client, u32 reg, int len, void *dst)
+{
+ struct i2c_client *i2c = (struct i2c_client *)client;
+ int ret = 0, count = 5;
+
+ while (count) {
+ if (len > 1) {
+ ret = i2c_smbus_read_i2c_block_data(i2c, reg, len, dst);
+ if (ret < 0)
+ count--;
+ else
+ return ret;
+ } else {
+ ret = i2c_smbus_read_byte_data(i2c, reg);
+ if (ret < 0) {
+ count--;
+ } else {
+ *(u8 *)dst = (u8)ret;
+ return ret;
+ }
+ }
+ usleep_range(100, 120);
+ }
+ return ret;
+}
+
+static int rt1711_write_device(void *client, u32 reg, int len, const void *src)
+{
+ const u8 *data;
+ struct i2c_client *i2c = (struct i2c_client *)client;
+ int ret = 0, count = 5;
+
+ while (count) {
+ if (len > 1) {
+ ret = i2c_smbus_write_i2c_block_data(
+ i2c, reg, len, src);
+ if (ret < 0)
+ count--;
+ else
+ return ret;
+ } else {
+ data = src;
+ ret = i2c_smbus_write_byte_data(i2c, reg, *data);
+ if (ret < 0)
+ count--;
+ else
+ return ret;
+ }
+ usleep_range(100, 120);
+ }
+ return ret;
+}
+
+static int rt1711_reg_read(struct i2c_client *i2c, u8 reg)
+{
+ struct rt1711_chip *chip = i2c_get_clientdata(i2c);
+ u8 val = 0;
+ int ret = 0;
+
+#ifdef CONFIG_RT_REGMAP
+ ret = rt_regmap_block_read(chip->m_dev, reg, 1, &val);
+#else
+ ret = rt1711_read_device(chip->client, reg, 1, &val);
+#endif /* CONFIG_RT_REGMAP */
+ if (ret < 0) {
+ dev_err(chip->dev, "rt1711 reg read fail\n");
+ return ret;
+ }
+ return val;
+}
+
+static int rt1711_reg_write(struct i2c_client *i2c, u8 reg, const u8 data)
+{
+ struct rt1711_chip *chip = i2c_get_clientdata(i2c);
+ int ret = 0;
+
+#ifdef CONFIG_RT_REGMAP
+ ret = rt_regmap_block_write(chip->m_dev, reg, 1, &data);
+#else
+ ret = rt1711_write_device(chip->client, reg, 1, &data);
+#endif /* CONFIG_RT_REGMAP */
+ if (ret < 0)
+ dev_err(chip->dev, "rt1711 reg write fail\n");
+ return ret;
+}
+
+static int rt1711_block_read(struct i2c_client *i2c,
+ u8 reg, int len, void *dst)
+{
+ struct rt1711_chip *chip = i2c_get_clientdata(i2c);
+ int ret = 0;
+#ifdef CONFIG_RT_REGMAP
+ ret = rt_regmap_block_read(chip->m_dev, reg, len, dst);
+#else
+ ret = rt1711_read_device(chip->client, reg, len, dst);
+#endif /* #ifdef CONFIG_RT_REGMAP */
+ if (ret < 0)
+ dev_err(chip->dev, "rt1711 block read fail\n");
+ return ret;
+}
+
+static int rt1711_block_write(struct i2c_client *i2c,
+ u8 reg, int len, const void *src)
+{
+ struct rt1711_chip *chip = i2c_get_clientdata(i2c);
+ int ret = 0;
+#ifdef CONFIG_RT_REGMAP
+ ret = rt_regmap_block_write(chip->m_dev, reg, len, src);
+#else
+ ret = rt1711_write_device(chip->client, reg, len, src);
+#endif /* #ifdef CONFIG_RT_REGMAP */
+ if (ret < 0)
+ dev_err(chip->dev, "rt1711 block write fail\n");
+ return ret;
+}
+
+static int32_t rt1711_write_word(struct i2c_client *client,
+ u8 reg_addr, u16 data)
+{
+ int ret;
+
+ /* don't need swap */
+ ret = rt1711_block_write(client, reg_addr, 2, (u8 *)&data);
+ return ret;
+}
+
+static int32_t rt1711_read_word(struct i2c_client *client,
+ u8 reg_addr, u16 *data)
+{
+ int ret;
+
+ /* don't need swap */
+ ret = rt1711_block_read(client, reg_addr, 2, (u8 *)data);
+ return ret;
+}
+
+static inline int rt1711_i2c_write8(
+ struct tcpc_device *tcpc, u8 reg, const u8 data)
+{
+ struct rt1711_chip *chip = tcpc_get_dev_data(tcpc);
+
+ return rt1711_reg_write(chip->client, reg, data);
+}
+
+static inline int rt1711_i2c_write16(
+ struct tcpc_device *tcpc, u8 reg, const u16 data)
+{
+ struct rt1711_chip *chip = tcpc_get_dev_data(tcpc);
+
+ return rt1711_write_word(chip->client, reg, data);
+}
+
+static inline int rt1711_i2c_read8(struct tcpc_device *tcpc, u8 reg)
+{
+ struct rt1711_chip *chip = tcpc_get_dev_data(tcpc);
+
+ return rt1711_reg_read(chip->client, reg);
+}
+
+static inline int rt1711_i2c_read16(
+ struct tcpc_device *tcpc, u8 reg)
+{
+ struct rt1711_chip *chip = tcpc_get_dev_data(tcpc);
+ u16 data;
+ int ret;
+
+ ret = rt1711_read_word(chip->client, reg, &data);
+ if (ret < 0)
+ return ret;
+ return data;
+}
+
+#ifdef CONFIG_RT_REGMAP
+static struct rt_regmap_fops rt1711_regmap_fops = {
+ .read_device = rt1711_read_device,
+ .write_device = rt1711_write_device,
+};
+#endif /* CONFIG_RT_REGMAP */
+
+static int rt1711_regmap_init(struct rt1711_chip *chip)
+{
+#ifdef CONFIG_RT_REGMAP
+ struct rt_regmap_properties *props;
+ char name[32];
+ int len;
+
+ props = devm_kzalloc(chip->dev, sizeof(*props), GFP_KERNEL);
+ if (!props)
+ return -ENOMEM;
+
+ props->register_num = RT1711_CHIP_REGMAP_SIZE;
+ props->rm = rt1711_chip_regmap;
+
+ props->rt_regmap_mode = RT_MULTI_BYTE | RT_CACHE_DISABLE |
+ RT_IO_PASS_THROUGH | RT_DBG_GENERAL;
+ snprintf(name, 32, "rt1711-%02x", chip->client->addr);
+
+ len = strlen(name);
+ props->name = kzalloc(len + 1, GFP_KERNEL);
+ props->aliases = kzalloc(len + 1, GFP_KERNEL);
+ strcpy((char *)props->name, name);
+ strcpy((char *)props->aliases, name);
+ props->io_log_en = 0;
+
+ chip->m_dev = rt_regmap_device_register(props,
+ &rt1711_regmap_fops, chip->dev, chip->client, chip);
+ if (!chip->m_dev) {
+ dev_err(chip->dev, "rt1711 chip rt_regmap register fail\n");
+ return -EINVAL;
+ }
+#endif
+ return 0;
+}
+
+static int rt1711_regmap_deinit(struct rt1711_chip *chip)
+{
+#ifdef CONFIG_RT_REGMAP
+ rt_regmap_device_unregister(chip->m_dev);
+#endif
+ return 0;
+}
+
+static inline int rt1711_software_reset(struct tcpc_device *tcpc)
+{
+ int ret = rt1711_i2c_write8(tcpc, RT1711H_REG_SWRESET, 1);
+
+ if (ret < 0)
+ return ret;
+
+ mdelay(1);
+ return 0;
+}
+
+static inline int rt1711_command(struct tcpc_device *tcpc, u8 cmd)
+{
+ return rt1711_i2c_write8(tcpc, TCPC_V10_REG_COMMAND, cmd);
+}
+
+static int rt1711_init_alert_mask(struct tcpc_device *tcpc)
+{
+ u16 mask;
+ struct rt1711_chip *chip = tcpc_get_dev_data(tcpc);
+
+ mask = TCPC_V10_REG_ALERT_CC_STATUS | TCPC_V10_REG_ALERT_POWER_STATUS;
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ /* Need to handle RX overflow */
+ mask |= TCPC_V10_REG_ALERT_TX_SUCCESS | TCPC_V10_REG_ALERT_TX_DISCARDED
+ | TCPC_V10_REG_ALERT_TX_FAILED
+ | TCPC_V10_REG_ALERT_RX_HARD_RST
+ | TCPC_V10_REG_ALERT_RX_STATUS
+ | TCPC_V10_REG_RX_OVERFLOW;
+#endif
+
+ mask |= TCPC_REG_ALERT_FAULT;
+
+ return rt1711_write_word(chip->client, TCPC_V10_REG_ALERT_MASK, mask);
+}
+
+static int rt1711_init_power_status_mask(struct tcpc_device *tcpc)
+{
+ const u8 mask = TCPC_V10_REG_POWER_STATUS_VBUS_PRES;
+
+ return rt1711_i2c_write8(tcpc,
+ TCPC_V10_REG_POWER_STATUS_MASK, mask);
+}
+
+static int rt1711_init_fault_mask(struct tcpc_device *tcpc)
+{
+ const u8 mask =
+ TCPC_V10_REG_FAULT_STATUS_VCONN_OV |
+ TCPC_V10_REG_FAULT_STATUS_VCONN_OC;
+
+ return rt1711_i2c_write8(tcpc,
+ TCPC_V10_REG_FAULT_STATUS_MASK, mask);
+}
+
+static int rt1711_init_rt_mask(struct tcpc_device *tcpc)
+{
+ u8 rt_mask = 0;
+#ifdef CONFIG_TCPC_VSAFE0V_DETECT_IC
+ rt_mask |= RT1711H_REG_M_VBUS_80;
+#endif /* CONFIG_TCPC_VSAFE0V_DETECT_IC */
+
+#ifdef CONFIG_TYPEC_CAP_RA_DETACH
+ if (tcpc->tcpc_flags & TCPC_FLAGS_CHECK_RA_DETACHE)
+ rt_mask |= RT1711H_REG_M_RA_DETACH;
+#endif /* CONFIG_TYPEC_CAP_RA_DETACH */
+
+#ifdef CONFIG_TYPEC_CAP_LPM_WAKEUP_WATCHDOG
+ if (tcpc->tcpc_flags & TCPC_FLAGS_LPM_WAKEUP_WATCHDOG)
+ rt_mask |= RT1711H_REG_M_WAKEUP;
+#endif /* CONFIG_TYPEC_CAP_LPM_WAKEUP_WATCHDOG */
+
+ return rt1711_i2c_write8(tcpc, RT1711H_REG_RT_MASK, rt_mask);
+}
+
+static inline void rt1711_poll_ctrl(struct rt1711_chip *chip)
+{
+ cancel_delayed_work_sync(&chip->poll_work);
+
+ if (atomic_read(&chip->poll_count) == 0) {
+ atomic_inc(&chip->poll_count);
+ cpu_idle_poll_ctrl(true);
+ }
+
+ schedule_delayed_work(
+ &chip->poll_work, msecs_to_jiffies(40));
+}
+
+static void rt1711_irq_work_handler(struct kthread_work *work)
+{
+ struct rt1711_chip *chip =
+ container_of(work, struct rt1711_chip, irq_work);
+ int regval = 0;
+ int gpio_val;
+
+ rt1711_poll_ctrl(chip);
+ /* make sure I2C bus had resumed */
+ down(&chip->suspend_lock);
+ tcpci_lock_typec(chip->tcpc);
+
+#ifdef DEBUG_GPIO
+ gpio_set_value(DEBUG_GPIO, 1);
+#endif
+
+ do {
+ regval = tcpci_alert(chip->tcpc);
+ if (regval)
+ break;
+ gpio_val = gpio_get_value(chip->irq_gpio);
+ } while (gpio_val == 0);
+
+ tcpci_unlock_typec(chip->tcpc);
+ up(&chip->suspend_lock);
+
+#ifdef DEBUG_GPIO
+ gpio_set_value(DEBUG_GPIO, 1);
+#endif
+}
+
+static void rt1711_poll_work(struct work_struct *work)
+{
+ struct rt1711_chip *chip = container_of(
+ work, struct rt1711_chip, poll_work.work);
+
+ if (atomic_dec_and_test(&chip->poll_count))
+ cpu_idle_poll_ctrl(false);
+}
+
+static irqreturn_t rt1711_intr_handler(int irq, void *data)
+{
+ struct rt1711_chip *chip = data;
+
+#ifdef DEBUG_GPIO
+ gpio_set_value(DEBUG_GPIO, 0);
+#endif
+ kthread_queue_work(&chip->irq_worker, &chip->irq_work);
+ return IRQ_HANDLED;
+}
+
+static int rt1711_init_alert(struct tcpc_device *tcpc)
+{
+ struct rt1711_chip *chip = tcpc_get_dev_data(tcpc);
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ int ret;
+ char *name;
+ int len;
+
+ /* Clear Alert Mask & Status */
+ rt1711_write_word(chip->client, TCPC_V10_REG_ALERT_MASK, 0);
+ rt1711_write_word(chip->client, TCPC_V10_REG_ALERT, 0xffff);
+
+ len = strlen(chip->tcpc_desc->name);
+ name = kzalloc(len + 5, GFP_KERNEL);
+ sprintf(name, "%s-IRQ", chip->tcpc_desc->name);
+
+ pr_info("%s name = %s\n", __func__, chip->tcpc_desc->name);
+ pr_info("%s gpio # = %d\n", __func__, chip->irq_gpio);
+
+ ret = gpio_request(chip->irq_gpio, name);
+#ifdef DEBUG_GPIO
+ gpio_request(DEBUG_GPIO, "debug_latency_pin");
+ gpio_direction_output(DEBUG_GPIO, 1);
+#endif
+ if (ret < 0) {
+ pr_err("Error: failed to request GPIO%d (ret = %d)\n",
+ chip->irq_gpio, ret);
+ return ret;
+ }
+ pr_info("GPIO requested...\n");
+
+ ret = gpio_direction_input(chip->irq_gpio);
+ if (ret < 0) {
+ pr_err("Error: failed to set GPIO%d as input pin(ret = %d)\n",
+ chip->irq_gpio, ret);
+ return ret;
+ }
+
+ chip->irq = gpio_to_irq(chip->irq_gpio);
+ pr_info("%s : IRQ number = %d\n", __func__, chip->irq);
+
+ /*
+ * ret = devm_request_threaded_irq(chip->dev, chip->irq, NULL,
+ * rt1711_intr_handler, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ * name, chip);
+ */
+
+ if (ret < 0) {
+ pr_err("Error: failed to request irq%d (gpio = %d, ret = %d)\n",
+ chip->irq, chip->irq_gpio, ret);
+ return ret;
+ }
+ pr_info("%s : irq initialized...\n", __func__);
+
+ kthread_init_worker(&chip->irq_worker);
+ chip->irq_worker_task = kthread_run(kthread_worker_fn,
+ &chip->irq_worker, chip->tcpc_desc->name);
+ if (IS_ERR(chip->irq_worker_task)) {
+ pr_err("Error: Could not create tcpc task\n");
+ return -EINVAL;
+ }
+
+ sched_setscheduler(chip->irq_worker_task, SCHED_FIFO, &param);
+ kthread_init_work(&chip->irq_work, rt1711_irq_work_handler);
+
+ pr_info("IRQF_NO_THREAD Test\r\n");
+ ret = request_irq(
+ chip->irq, rt1711_intr_handler,
+ IRQF_TRIGGER_FALLING | IRQF_NO_THREAD |
+ IRQF_NO_SUSPEND, name, chip);
+
+ enable_irq_wake(chip->irq);
+
+ return 0;
+}
+
+static inline int rt1711h_set_clock_gating(
+ struct tcpc_device *tcpc_dev, bool en)
+{
+ int ret = 0;
+
+#ifdef CONFIG_TCPC_CLOCK_GATING
+ u8 clk2 = RT1711H_REG_CLK_DIV_600K_EN
+ | RT1711H_REG_CLK_DIV_300K_EN | RT1711H_REG_CLK_CK_300K_EN;
+
+ u8 clk3 = RT1711H_REG_CLK_DIV_2P4M_EN;
+
+ if (!en) {
+ clk2 |=
+ RT1711H_REG_CLK_BCLK2_EN | RT1711H_REG_CLK_BCLK_EN;
+ clk3 |=
+ RT1711H_REG_CLK_CK_24M_EN | RT1711H_REG_CLK_PCLK_EN;
+ }
+
+ ret = rt1711_i2c_write8(tcpc_dev, RT1711H_REG_CLK_CTRL2, clk2);
+ if (ret == 0)
+ ret = rt1711_i2c_write8(tcpc_dev, RT1711H_REG_CLK_CTRL3, clk3);
+#endif /* CONFIG_TCPC_CLOCK_GATING */
+
+ return ret;
+}
+
+static inline int rt1711h_init_cc_params(
+ struct tcpc_device *tcpc, u8 cc_res)
+{
+ int rv = 0;
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+#ifdef CONFIG_USB_PD_SNK_DFT_NO_GOOD_CRC
+ if (cc_res == TYPEC_CC_VOLT_SNK_DFT)
+ rv = rt1711_i2c_write8(tcpc, RT1711H_REG_BMCIO_RXDZSEL, 0x81);
+ else
+ rv = rt1711_i2c_write8(tcpc, RT1711H_REG_BMCIO_RXDZSEL, 0x80);
+#endif /* CONFIG_USB_PD_SNK_DFT_NO_GOOD_CRC */
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+ return rv;
+}
+
+static int rt1711_tcpc_init(struct tcpc_device *tcpc, bool sw_reset)
+{
+ int ret;
+ struct rt1711_chip *chip = tcpc_get_dev_data(tcpc);
+
+ RT1711_INFO("\n");
+
+ if (sw_reset) {
+ ret = rt1711_software_reset(tcpc);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* CK_300K from 320K, SHIPPING off, AUTOIDLE enable, TIMEOUT = 32ms */
+ rt1711_i2c_write8(
+ tcpc, RT1711H_REG_IDLE_CTRL,
+ RT1711H_REG_IDLE_SET(0, 1, 1, 2));
+
+ /* UFP Both RD setting */
+ /* DRP = 0, RpVal = 0 (Default), Rd, Rd */
+ rt1711_i2c_write8(
+ tcpc, TCPC_V10_REG_ROLE_CTRL,
+ TCPC_V10_REG_ROLE_CTRL_RES_SET(0, 0, CC_RD, CC_RD));
+
+ if (chip->chip_id == RT1711H_DID_A) {
+ rt1711_i2c_write8(
+ tcpc, TCPC_V10_REG_FAULT_CTRL,
+ TCPC_V10_REG_FAULT_CTRL_DIS_VCONN_OV);
+ }
+
+ /*
+ * CC Detect Debounce : 26.7*val us
+ * Transition window count : spec 12~20us, based on 2.4MHz
+ * DRP Toggle Cycle : 51.2 + 6.4*val ms
+ * DRP Duyt Ctrl : dcSRC: /1024
+ */
+
+ rt1711_i2c_write8(tcpc, RT1711H_REG_TTCPC_FILTER, 5);
+ rt1711_i2c_write8(tcpc, RT1711H_REG_DRP_TOGGLE_CYCLE, 4);
+ rt1711_i2c_write16(tcpc, RT1711H_REG_DRP_DUTY_CTRL, 400);
+
+ /* Vconn OC */
+ rt1711_i2c_write8(tcpc, RT1711H_REG_VCONN_CLIMITEN, 1);
+
+ /* RX/TX Clock Gating (Auto Mode)*/
+ if (!sw_reset)
+ rt1711h_set_clock_gating(tcpc, true);
+
+ tcpci_alert_status_clear(tcpc, 0xffffffff);
+
+ rt1711_init_power_status_mask(tcpc);
+ rt1711_init_alert_mask(tcpc);
+ rt1711_init_fault_mask(tcpc);
+ rt1711_init_rt_mask(tcpc);
+
+ return 0;
+}
+
+int rt1711_alert_status_clear(struct tcpc_device *tcpc, u32 mask)
+{
+ int ret;
+ u16 mask_t1;
+
+#ifdef CONFIG_TCPC_VSAFE0V_DETECT_IC
+ u8 mask_t2;
+#endif
+
+ /* Write 1 clear */
+ mask_t1 = (u16)mask;
+ ret = rt1711_i2c_write16(tcpc, TCPC_V10_REG_ALERT, mask_t1);
+ if (ret < 0)
+ return ret;
+
+#ifdef CONFIG_TCPC_VSAFE0V_DETECT_IC
+ mask_t2 = mask >> 16;
+ ret = rt1711_i2c_write8(tcpc, RT1711H_REG_RT_INT, mask_t2);
+ if (ret < 0)
+ return ret;
+#endif
+
+ return 0;
+}
+
+int rt1711_fault_status_clear(struct tcpc_device *tcpc, u8 status)
+{
+ /* Write 1 clear (Check it later )*/
+ int ret;
+
+ rt1711_i2c_write8(tcpc, TCPC_V10_REG_FAULT_STATUS, status);
+
+ /* discharge ... */
+ ret = rt1711_i2c_read8(tcpc, RT1711H_REG_BMC_CTRL);
+ if (ret < 0)
+ return ret;
+
+ rt1711_i2c_write8(
+ tcpc, RT1711H_REG_BMC_CTRL,
+ ret & (~RT1711H_REG_DISCHARGE_EN));
+
+ return 0;
+}
+
+int rt1711_get_alert_status(struct tcpc_device *tcpc, u32 *alert)
+{
+ int ret;
+
+#ifdef CONFIG_TCPC_VSAFE0V_DETECT_IC
+ u8 v2;
+#endif
+
+ ret = rt1711_i2c_read16(tcpc, TCPC_V10_REG_ALERT);
+ if (ret < 0)
+ return ret;
+
+ *alert = (u16)ret;
+
+#ifdef CONFIG_TCPC_VSAFE0V_DETECT_IC
+ ret = rt1711_i2c_read8(tcpc, RT1711H_REG_RT_INT);
+ if (ret < 0)
+ return ret;
+
+ v2 = (u8)ret;
+ *alert |= v2 << 16;
+#endif
+
+ return 0;
+}
+
+static int rt1711_get_power_status(
+ struct tcpc_device *tcpc, u16 *pwr_status)
+{
+ int ret;
+
+ ret = rt1711_i2c_read8(tcpc, TCPC_V10_REG_POWER_STATUS);
+ if (ret < 0)
+ return ret;
+
+ *pwr_status = 0;
+
+ if (ret & TCPC_V10_REG_POWER_STATUS_VBUS_PRES)
+ *pwr_status |= TCPC_REG_POWER_STATUS_VBUS_PRES;
+
+#ifdef CONFIG_TCPC_VSAFE0V_DETECT_IC
+ ret = rt1711_i2c_read8(tcpc, RT1711H_REG_RT_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if (ret & RT1711H_REG_VBUS_80)
+ *pwr_status |= TCPC_REG_POWER_STATUS_EXT_VSAFE0V;
+#endif
+ return 0;
+}
+
+int rt1711_get_fault_status(struct tcpc_device *tcpc, u8 *status)
+{
+ int ret;
+
+ ret = rt1711_i2c_read8(tcpc, TCPC_V10_REG_FAULT_STATUS);
+ if (ret < 0)
+ return ret;
+ *status = (u8)ret;
+ return 0;
+}
+
+static int rt1711_get_cc(struct tcpc_device *tcpc, int *cc1, int *cc2)
+{
+ int status, role_ctrl, cc_role;
+ bool act_as_sink, act_as_drp;
+
+ status = rt1711_i2c_read8(tcpc, TCPC_V10_REG_CC_STATUS);
+ if (status < 0)
+ return status;
+
+ role_ctrl = rt1711_i2c_read8(tcpc, TCPC_V10_REG_ROLE_CTRL);
+ if (role_ctrl < 0)
+ return role_ctrl;
+
+ if (status & TCPC_V10_REG_CC_STATUS_DRP_TOGGLING) {
+ *cc1 = TYPEC_CC_DRP_TOGGLING;
+ *cc2 = TYPEC_CC_DRP_TOGGLING;
+ return 0;
+ }
+
+ *cc1 = TCPC_V10_REG_CC_STATUS_CC1(status);
+ *cc2 = TCPC_V10_REG_CC_STATUS_CC2(status);
+
+ act_as_drp = TCPC_V10_REG_ROLE_CTRL_DRP & role_ctrl;
+
+ if (act_as_drp) {
+ act_as_sink = TCPC_V10_REG_CC_STATUS_DRP_RESULT(status);
+ } else {
+ cc_role = TCPC_V10_REG_CC_STATUS_CC1(role_ctrl);
+ if (cc_role == TYPEC_CC_RP)
+ act_as_sink = false;
+ else
+ act_as_sink = true;
+ }
+
+ /*
+ * If status is not open, then OR in termination to convert to
+ * enum tcpc_cc_voltage_status.
+ */
+
+ if (*cc1 != TYPEC_CC_VOLT_OPEN)
+ *cc1 |= (act_as_sink << 2);
+
+ if (*cc2 != TYPEC_CC_VOLT_OPEN)
+ *cc2 |= (act_as_sink << 2);
+
+ rt1711h_init_cc_params(
+ tcpc,
+ (u8)tcpc->typec_polarity ? *cc2 : *cc1);
+
+ return 0;
+}
+
+static int rt1711_set_cc(struct tcpc_device *tcpc, int pull)
+{
+ int ret;
+ u8 data;
+ int rp_lvl = TYPEC_CC_PULL_GET_RP_LVL(pull);
+
+ RT1711_INFO("\n");
+ pull = TYPEC_CC_PULL_GET_RES(pull);
+ if (pull == TYPEC_CC_DRP) {
+ data = TCPC_V10_REG_ROLE_CTRL_RES_SET(
+ 1, rp_lvl, TYPEC_CC_RD, TYPEC_CC_RD);
+
+ ret = rt1711_i2c_write8(
+ tcpc, TCPC_V10_REG_ROLE_CTRL, data);
+
+ if (ret == 0)
+ ret = rt1711_command(tcpc, TCPM_CMD_LOOK_CONNECTION);
+ } else {
+ data = TCPC_V10_REG_ROLE_CTRL_RES_SET(0, rp_lvl, pull, pull);
+ ret = rt1711_i2c_write8(tcpc, TCPC_V10_REG_ROLE_CTRL, data);
+ }
+
+ return 0;
+}
+
+static int rt1711_set_polarity(struct tcpc_device *tcpc, int polarity)
+{
+ int data;
+
+ data = rt1711h_init_cc_params(tcpc,
+ tcpc->typec_remote_cc[polarity]);
+ if (data)
+ return data;
+
+ data = rt1711_i2c_read8(tcpc, TCPC_V10_REG_TCPC_CTRL);
+ if (data < 0)
+ return data;
+
+ data &= ~TCPC_V10_REG_TCPC_CTRL_PLUG_ORIENT;
+ data |= polarity ? TCPC_V10_REG_TCPC_CTRL_PLUG_ORIENT : 0;
+
+ return rt1711_i2c_write8(tcpc, TCPC_V10_REG_TCPC_CTRL, data);
+}
+
+static int rt1711_set_vconn(struct tcpc_device *tcpc, int enable)
+{
+ int data;
+
+ data = rt1711_i2c_read8(tcpc, TCPC_V10_REG_POWER_CTRL);
+ if (data < 0)
+ return data;
+
+ data &= ~TCPC_V10_REG_POWER_CTRL_VCONN;
+ data |= enable ? TCPC_V10_REG_POWER_CTRL_VCONN : 0;
+
+ return rt1711_i2c_write8(tcpc, TCPC_V10_REG_POWER_CTRL, data);
+}
+
+#ifdef CONFIG_TCPC_LOW_POWER_MODE
+static int rt1711_set_low_power_mode(
+ struct tcpc_device *tcpc_dev, bool en, int pull)
+{
+ int rv = 0;
+ u8 data;
+
+ if (en) {
+ data = RT1711H_REG_BMCIO_LPEN;
+
+ if (pull & TYPEC_CC_RP)
+ data |= RT1711H_REG_BMCIO_LPRPRD;
+ } else {
+ data = RT1711H_REG_BMCIO_BG_EN |
+ RT1711H_REG_VBUS_DET_EN | RT1711H_REG_BMCIO_OSC_EN;
+ }
+ rv = rt1711_i2c_write8(tcpc_dev, RT1711H_REG_BMC_CTRL, data);
+ return rv;
+}
+#endif /* CONFIG_TCPC_LOW_POWER_MODE */
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+static int rt1711_set_msg_header(
+ struct tcpc_device *tcpc, int power_role, int data_role)
+{
+ return rt1711_i2c_write8(tcpc, TCPC_V10_REG_MSG_HDR_INFO,
+ TCPC_V10_REG_MSG_HDR_INFO_SET(data_role, power_role));
+}
+
+static int rt1711_set_rx_enable(struct tcpc_device *tcpc, u8 enable)
+{
+ int ret = rt1711h_set_clock_gating(tcpc, !enable);
+
+ if (ret == 0)
+ ret = rt1711_i2c_write8(tcpc, TCPC_V10_REG_RX_DETECT, enable);
+ return ret;
+}
+
+static int rt1711_get_message(
+ struct tcpc_device *tcpc, u32 *payload,
+ u16 *msg_head, enum tcpm_transmit_type *frame_type)
+{
+ struct rt1711_chip *chip = tcpc_get_dev_data(tcpc);
+ int rv;
+ u8 type, cnt = 0;
+ u8 buf[4];
+ const u16 alert_rx =
+ TCPC_V10_REG_ALERT_RX_STATUS | TCPC_V10_REG_RX_OVERFLOW;
+
+ rv = rt1711_block_read(chip->client,
+ TCPC_V10_REG_RX_BYTE_CNT, 4, buf);
+ cnt = buf[0];
+ type = buf[1];
+ *msg_head = *(u16 *)&buf[2];
+
+ /* TCPC 1.0 ==> no need to subtract the size of msg_head */
+ if (rv >= 0 && cnt > 0) {
+ cnt -= 3; /* MSG_HDR */
+ rv = rt1711_block_read(
+ chip->client, TCPC_V10_REG_RX_DATA, cnt,
+ (u8 *)payload);
+ }
+
+ *frame_type = (enum tcpm_transmit_type)type;
+
+ /* Read complete, clear RX status alert bit */
+ tcpci_alert_status_clear(tcpc, alert_rx);
+
+ /*mdelay(1); */
+ return rv;
+}
+
+static int rt1711_set_bist_carrier_mode(
+ struct tcpc_device *tcpc, u8 pattern)
+{
+ /* Don't support this function */
+ return 0;
+}
+
+/* message header (2byte) + data object (7*4) */
+#define RT1711_TRANSMIT_MAX_SIZE (sizeof(u16) + sizeof(u32) * 7)
+
+#ifdef CONFIG_USB_PD_RETRY_CRC_DISCARD
+static int rt1711_retransmit(struct tcpc_device *tcpc)
+{
+ return rt1711_i2c_write8(tcpc, TCPC_V10_REG_TRANSMIT,
+ TCPC_V10_REG_TRANSMIT_SET(TCPC_TX_SOP));
+}
+#endif
+
+static int rt1711_transmit(struct tcpc_device *tcpc,
+ enum tcpm_transmit_type type,
+ u16 header, const u32 *data)
+{
+ struct rt1711_chip *chip = tcpc_get_dev_data(tcpc);
+ int rv;
+ int data_cnt, packet_cnt;
+ u8 temp[RT1711_TRANSMIT_MAX_SIZE];
+
+ if (type < TCPC_TX_HARD_RESET) {
+ data_cnt = sizeof(u32) * PD_HEADER_CNT(header);
+ packet_cnt = data_cnt + sizeof(u16);
+
+ temp[0] = packet_cnt;
+ memcpy(temp + 1, (u8 *)&header, 2);
+ if (data_cnt > 0)
+ memcpy(temp + 3, (u8 *)data, data_cnt);
+
+ rv = rt1711_block_write(
+ chip->client,
+ TCPC_V10_REG_TX_BYTE_CNT,
+ packet_cnt + 1, (u8 *)temp);
+ if (rv < 0)
+ return rv;
+ }
+
+ rv = rt1711_i2c_write8(
+ tcpc, TCPC_V10_REG_TRANSMIT,
+ TCPC_V10_REG_TRANSMIT_SET(type));
+ return rv;
+}
+
+static int rt1711_set_bist_test_mode(struct tcpc_device *tcpc, bool en)
+{
+ int data;
+
+ data = rt1711_i2c_read8(tcpc, TCPC_V10_REG_TCPC_CTRL);
+ if (data < 0)
+ return data;
+
+ data &= ~TCPC_V10_REG_TCPC_CTRL_BIST_TEST_MODE;
+ data |= en ? TCPC_V10_REG_TCPC_CTRL_BIST_TEST_MODE : 0;
+
+ return rt1711_i2c_write8(tcpc, TCPC_V10_REG_TCPC_CTRL, data);
+}
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+static struct tcpc_ops rt1711_tcpc_ops = {
+ .init = rt1711_tcpc_init,
+ .alert_status_clear = rt1711_alert_status_clear,
+ .fault_status_clear = rt1711_fault_status_clear,
+ .get_alert_status = rt1711_get_alert_status,
+ .get_power_status = rt1711_get_power_status,
+ .get_fault_status = rt1711_get_fault_status,
+ .get_cc = rt1711_get_cc,
+ .set_cc = rt1711_set_cc,
+ .set_polarity = rt1711_set_polarity,
+ .set_vconn = rt1711_set_vconn,
+
+#ifdef CONFIG_TCPC_LOW_POWER_MODE
+ .set_low_power_mode = rt1711_set_low_power_mode,
+#endif
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ .set_msg_header = rt1711_set_msg_header,
+ .set_rx_enable = rt1711_set_rx_enable,
+ .get_message = rt1711_get_message,
+ .transmit = rt1711_transmit,
+ .set_bist_test_mode = rt1711_set_bist_test_mode,
+ .set_bist_carrier_mode = rt1711_set_bist_carrier_mode,
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+#ifdef CONFIG_USB_PD_RETRY_CRC_DISCARD
+ .retransmit = rt1711_retransmit,
+#endif /* CONFIG_USB_PD_RETRY_CRC_DISCARD */
+
+};
+
+static int rt_parse_dt(struct rt1711_chip *chip, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!np)
+ return -EINVAL;
+
+ pr_info("%s\n", __func__);
+ chip->irq_gpio = of_get_named_gpio(np, "rt1711,irq_pin", 0);
+
+ return 0;
+}
+
+static int rt1711_tcpcdev_init(struct rt1711_chip *chip, struct device *dev)
+{
+ struct tcpc_desc *desc;
+ struct device_node *np = dev->of_node;
+ u32 val, len;
+ const char *name = "default";
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ if (of_property_read_u32(np, "rt-tcpc,role_def", &val) >= 0) {
+ if (val >= TYPEC_ROLE_NR)
+ desc->role_def = TYPEC_ROLE_DRP;
+ else
+ desc->role_def = val;
+ } else {
+ dev_info(dev, "use default Role DRP\n");
+ desc->role_def = TYPEC_ROLE_DRP;
+ }
+
+ if (of_property_read_u32(
+ np, "rt-tcpc,notifier_supply_num", &val) >= 0) {
+ if (val < 0)
+ desc->notifier_supply_num = 0;
+ else
+ desc->notifier_supply_num = val;
+ } else {
+ desc->notifier_supply_num = 0;
+ }
+ if (of_property_read_u32(np, "rt-tcpc,rp_level", &val) >= 0) {
+ switch (val) {
+ case 0: /* RP Default */
+ desc->rp_lvl = TYPEC_CC_RP_DFT;
+ break;
+ case 1: /* RP 1.5V */
+ desc->rp_lvl = TYPEC_CC_RP_1_5;
+ break;
+ case 2: /* RP 3.0V */
+ desc->rp_lvl = TYPEC_CC_RP_3_0;
+ break;
+ default:
+ break;
+ }
+ }
+ of_property_read_string(np, "rt-tcpc,name", (char const **)&name);
+
+ len = strlen(name);
+ desc->name = kzalloc(len + 1, GFP_KERNEL);
+ strcpy((char *)desc->name, name);
+
+ chip->tcpc_desc = desc;
+
+ chip->tcpc = tcpc_device_register(dev,
+ desc, &rt1711_tcpc_ops, chip);
+ if (IS_ERR(chip->tcpc))
+ return -EINVAL;
+
+ if (chip->chip_id <= RT1711H_DID_B) {
+ chip->tcpc->tcpc_flags =
+ TCPC_FLAGS_RETRY_CRC_DISCARD |
+ TCPC_FLAGS_WAIT_HRESET_COMPLETE |
+ TCPC_FLAGS_LPM_WAKEUP_WATCHDOG;
+ } else {
+ chip->tcpc->tcpc_flags =
+ TCPC_FLAGS_RETRY_CRC_DISCARD |
+ TCPC_FLAGS_WAIT_HRESET_COMPLETE |
+ TCPC_FLAGS_CHECK_RA_DETACHE;
+ }
+ return 0;
+}
+
+#define RICHTEK_1711_VID 0x29cf
+#define RICHTEK_1711_PID 0x1711
+
+static inline int rt1711h_check_revision(struct i2c_client *client)
+{
+ u16 vid, pid, did;
+ int ret;
+ u8 data = 1;
+
+ ret = rt1711_read_device(client, TCPC_V10_REG_VID, 2, &vid);
+ if (ret < 0) {
+ dev_err(&client->dev, "read chip ID fail\n");
+ return -EIO;
+ }
+
+ if (vid != RICHTEK_1711_VID) {
+ pr_info("%s failed, VID=0x%04x\n", __func__, vid);
+ return -ENODEV;
+ }
+
+ ret = rt1711_read_device(client, TCPC_V10_REG_PID, 2, &pid);
+ if (ret < 0) {
+ dev_err(&client->dev, "read product ID fail\n");
+ return -EIO;
+ }
+
+ if (pid != RICHTEK_1711_PID) {
+ pr_info("%s failed, PID=0x%04x\n", __func__, pid);
+ return -ENODEV;
+ }
+
+ ret = rt1711_write_device(client, RT1711H_REG_SWRESET, 1, &data);
+ if (ret < 0)
+ return ret;
+
+ mdelay(1);
+
+ ret = rt1711_read_device(client, TCPC_V10_REG_DID, 2, &did);
+ if (ret < 0) {
+ dev_err(&client->dev, "read device ID fail\n");
+ return -EIO;
+ }
+
+ return did;
+}
+
+static int rt1711_i2c_probe(
+ struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct rt1711_chip *chip;
+ int ret = 0, chip_id;
+ bool use_dt = client->dev.of_node;
+
+ pr_info("%s\n", __func__);
+
+ if (i2c_check_functionality(
+ client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK |
+ I2C_FUNC_SMBUS_BYTE_DATA))
+ pr_info("I2C functionality : OK...\n");
+ else
+ pr_info("I2C functionality check : failuare...\n");
+
+ chip_id = rt1711h_check_revision(client);
+ if (chip_id < 0)
+ return ret;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ if (use_dt) {
+ rt_parse_dt(chip, &client->dev);
+ } else {
+ dev_err(&client->dev, "no dts node\n");
+ return -ENODEV;
+ }
+ chip->dev = &client->dev;
+ chip->client = client;
+ sema_init(&chip->io_lock, 1);
+ sema_init(&chip->suspend_lock, 1);
+ i2c_set_clientdata(client, chip);
+ INIT_DELAYED_WORK(&chip->poll_work, rt1711_poll_work);
+
+ chip->chip_id = chip_id;
+ pr_info("rt1711h_chipID = 0x%0x\n", chip_id);
+
+ ret = rt1711_regmap_init(chip);
+ if (ret < 0) {
+ dev_err(chip->dev, "rt1711 regmap init fail\n");
+ return -EINVAL;
+ }
+
+ ret = rt1711_tcpcdev_init(chip, &client->dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "rt1711 tcpc dev init fail\n");
+ goto err_tcpc_reg;
+ }
+
+ ret = rt1711_init_alert(chip->tcpc);
+ if (ret < 0) {
+ pr_err("rt1711 init alert fail\n");
+ goto err_irq_init;
+ }
+
+ tcpc_schedule_init_work(chip->tcpc);
+
+ pr_info("%s probe OK!\n", __func__);
+ return 0;
+
+err_irq_init:
+ tcpc_device_unregister(chip->dev, chip->tcpc);
+err_tcpc_reg:
+ rt1711_regmap_deinit(chip);
+ return ret;
+}
+
+static int rt1711_i2c_remove(struct i2c_client *client)
+{
+ struct rt1711_chip *chip = i2c_get_clientdata(client);
+
+ if (chip) {
+ cancel_delayed_work_sync(&chip->poll_work);
+
+ tcpc_device_unregister(chip->dev, chip->tcpc);
+ rt1711_regmap_deinit(chip);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int rt1711_i2c_suspend(struct device *dev)
+{
+ struct rt1711_chip *chip;
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client) {
+ chip = i2c_get_clientdata(client);
+ if (chip)
+ down(&chip->suspend_lock);
+ }
+
+ return 0;
+}
+
+static int rt1711_i2c_resume(struct device *dev)
+{
+ struct rt1711_chip *chip;
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client) {
+ chip = i2c_get_clientdata(client);
+ if (chip)
+ up(&chip->suspend_lock);
+ }
+
+ return 0;
+}
+
+static void rt1711_shutdown(struct i2c_client *client)
+{
+ struct rt1711_chip *chip = i2c_get_clientdata(client);
+
+ /* Please reset IC here */
+ if (chip && chip->irq)
+ disable_irq(chip->irq);
+ i2c_smbus_write_byte_data(client, RT1711H_REG_SWRESET, 0x01);
+}
+
+static int rt1711_pm_suspend_runtime(struct device *device)
+{
+ dev_dbg(device, "pm_runtime: suspending...\n");
+ return 0;
+}
+
+static int rt1711_pm_resume_runtime(struct device *device)
+{
+ dev_dbg(device, "pm_runtime: resuming...\n");
+ return 0;
+}
+
+static const struct dev_pm_ops rt1711_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(
+ rt1711_i2c_suspend,
+ rt1711_i2c_resume)
+ SET_RUNTIME_PM_OPS(
+ rt1711_pm_suspend_runtime,
+ rt1711_pm_resume_runtime,
+ NULL
+ )
+};
+
+#define RT1711_PM_OPS (&rt1711_pm_ops)
+#else
+#define RT1711_PM_OPS (NULL)
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id rt1711_id_table[] = {
+ {"rt1711", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, rt1711_id_table);
+
+static const struct of_device_id rt_match_table[] = {
+ {.compatible = "richtek,rt1711",},
+ {},
+};
+
+static struct i2c_driver rt1711_driver = {
+ .driver = {
+ .name = "rt1711h",
+ .owner = THIS_MODULE,
+ .of_match_table = rt_match_table,
+ .pm = RT1711_PM_OPS,
+ },
+ .probe = rt1711_i2c_probe,
+ .remove = rt1711_i2c_remove,
+ .shutdown = rt1711_shutdown,
+ .id_table = rt1711_id_table,
+};
+
+static int __init rt1711_init(void)
+{
+ struct device_node *np;
+
+ pr_info("rt1711h_init (%s): initializing...\n", RT1711H_DRV_VERSION);
+ np = of_find_node_by_name(NULL, "rt1711");
+ if (np)
+ pr_info("rt1711h node found...\n");
+ else
+ pr_info("rt1711h node not found...\n");
+
+ return i2c_add_driver(&rt1711_driver);
+}
+module_init(rt1711_init);
+
+static void __exit rt1711_exit(void)
+{
+ i2c_del_driver(&rt1711_driver);
+}
+module_exit(rt1711_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jeff Chang <jeff_chang@richtek.com>");
+MODULE_DESCRIPTION("RT1711 TCPC Driver");
+MODULE_VERSION(RT1711H_DRV_VERSION);
diff --git a/drivers/usb/pd/richtek/tcpci_alert.c b/drivers/usb/pd/richtek/tcpci_alert.c
new file mode 100644
index 000000000000..566419ca07cb
--- /dev/null
+++ b/drivers/usb/pd/richtek/tcpci_alert.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * TCPC Interface for alert handler
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/cpu.h>
+
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_typec.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_event.h>
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+static int tcpci_alert_cc_changed(struct tcpc_device *tcpc_dev)
+{
+ return tcpc_typec_handle_cc_change(tcpc_dev);
+}
+
+#ifdef CONFIG_TCPC_VSAFE0V_DETECT_IC
+
+static inline int tcpci_alert_vsafe0v(struct tcpc_device *tcpc_dev)
+{
+ tcpc_typec_handle_vsafe0v(tcpc_dev);
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ pd_put_vbus_safe0v_event(tcpc_dev);
+#endif
+
+ return 0;
+}
+
+#endif /* CONFIG_TCPC_VSAFE0V_DETECT_IC */
+
+void tcpci_vbus_level_init(struct tcpc_device *tcpc_dev, u16 power_status)
+{
+ mutex_lock(&tcpc_dev->access_lock);
+
+ tcpc_dev->vbus_level =
+ power_status & TCPC_REG_POWER_STATUS_VBUS_PRES ?
+ TCPC_VBUS_VALID : TCPC_VBUS_INVALID;
+
+#ifdef CONFIG_TCPC_VSAFE0V_DETECT_IC
+ if (power_status & TCPC_REG_POWER_STATUS_EXT_VSAFE0V) {
+ if (tcpc_dev->vbus_level == TCPC_VBUS_INVALID)
+ tcpc_dev->vbus_level = TCPC_VBUS_SAFE0V;
+ else
+ TCPC_INFO("ps_confused: 0x%02x\r\n", power_status);
+ }
+#endif
+
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+static int tcpci_alert_power_status_changed(struct tcpc_device *tcpc_dev)
+{
+ int rv = 0;
+ u16 power_status = 0;
+
+ rv = tcpci_get_power_status(tcpc_dev, &power_status);
+ if (rv < 0)
+ return rv;
+
+ tcpci_vbus_level_init(tcpc_dev, power_status);
+
+ TCPC_INFO("ps_change=%d\r\n", tcpc_dev->vbus_level);
+ rv = tcpc_typec_handle_ps_change(
+ tcpc_dev,
+ tcpc_dev->vbus_level == TCPC_VBUS_VALID);
+ if (rv < 0)
+ return rv;
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ pd_put_vbus_changed_event(tcpc_dev, true);
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+#ifdef CONFIG_TCPC_VSAFE0V_DETECT_IC
+ if (tcpc_dev->vbus_level == TCPC_VBUS_SAFE0V)
+ rv = tcpci_alert_vsafe0v(tcpc_dev);
+#endif /* CONFIG_TCPC_VSAFE0V_DETECT_IC */
+
+ return rv;
+}
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+static int tcpci_alert_tx_success(struct tcpc_device *tcpc_dev)
+{
+ u8 tx_state;
+
+ pd_event_t evt = {
+ .event_type = PD_EVT_CTRL_MSG,
+ .msg = PD_CTRL_GOOD_CRC,
+ .pd_msg = NULL,
+ };
+
+ mutex_lock(&tcpc_dev->access_lock);
+ tx_state = tcpc_dev->pd_transmit_state;
+ tcpc_dev->pd_transmit_state = PD_TX_STATE_GOOD_CRC;
+ mutex_unlock(&tcpc_dev->access_lock);
+
+ if (tx_state == PD_TX_STATE_WAIT_CRC_VDM)
+ pd_put_vdm_event(tcpc_dev, &evt, false);
+ else
+ pd_put_event(tcpc_dev, &evt, false);
+
+ return 0;
+}
+
+static int tcpci_alert_tx_failed(struct tcpc_device *tcpc_dev)
+{
+ u8 tx_state;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ tx_state = tcpc_dev->pd_transmit_state;
+ tcpc_dev->pd_transmit_state = PD_TX_STATE_NO_GOOD_CRC;
+ mutex_unlock(&tcpc_dev->access_lock);
+
+ if (tx_state == PD_TX_STATE_WAIT_CRC_VDM)
+ vdm_put_hw_event(tcpc_dev, PD_HW_TX_FAILED);
+ else
+ pd_put_hw_event(tcpc_dev, PD_HW_TX_FAILED);
+
+ return 0;
+}
+
+static int tcpci_alert_tx_discard(struct tcpc_device *tcpc_dev)
+{
+ u8 tx_state;
+ bool retry_crc_discard = false;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ tx_state = tcpc_dev->pd_transmit_state;
+ tcpc_dev->pd_transmit_state = PD_TX_STATE_DISCARD;
+ mutex_unlock(&tcpc_dev->access_lock);
+
+ TCPC_INFO("Discard\r\n");
+
+ if (tx_state == PD_TX_STATE_WAIT_CRC_VDM) {
+ pd_put_last_vdm_event(tcpc_dev);
+ } else {
+ retry_crc_discard =
+ (tcpc_dev->tcpc_flags &
+ TCPC_FLAGS_RETRY_CRC_DISCARD) != 0;
+
+ if (retry_crc_discard) {
+#ifdef CONFIG_USB_PD_RETRY_CRC_DISCARD
+ tcpc_dev->pd_discard_pending = true;
+ tcpc_enable_timer(tcpc_dev, PD_TIMER_DISCARD);
+#else
+ TCPC_ERR("RETRY_CRC_DISCARD\r\n");
+#endif
+ } else {
+ pd_put_hw_event(tcpc_dev, PD_HW_TX_FAILED);
+ }
+ }
+ return 0;
+}
+
+static int tcpci_alert_recv_msg(struct tcpc_device *tcpc_dev)
+{
+ int retval;
+ pd_msg_t *pd_msg;
+ enum tcpm_transmit_type type;
+
+ pd_msg = pd_alloc_msg(tcpc_dev);
+ if (!pd_msg)
+ return -1; /* TODO */
+
+ retval = tcpci_get_message(tcpc_dev,
+ pd_msg->payload, &pd_msg->msg_hdr, &type);
+ if (retval < 0) {
+ TCPC_INFO("recv_msg failed: %d\r\n", retval);
+ pd_free_msg(tcpc_dev, pd_msg);
+ return retval;
+ }
+
+ pd_msg->frame_type = (u8)type;
+ pd_put_pd_msg_event(tcpc_dev, pd_msg);
+ return 0;
+}
+
+static int tcpci_alert_rx_overflow(struct tcpc_device *tcpc_dev)
+{
+ TCPC_INFO("RX_OVERFLOW\r\n");
+ return 0;
+}
+
+static int tcpci_alert_fault(struct tcpc_device *tcpc_dev)
+{
+ u8 status = 0;
+
+ tcpci_get_fault_status(tcpc_dev, &status);
+ TCPC_INFO("FaultAlert=0x%x\r\n", status);
+ tcpci_fault_status_clear(tcpc_dev, status);
+ return 0;
+}
+
+static int tcpci_alert_recv_hard_reset(struct tcpc_device *tcpc_dev)
+{
+ TCPC_INFO("HardResetAlert\r\n");
+ pd_put_recv_hard_reset_event(tcpc_dev);
+ return 0;
+}
+
+#ifdef CONFIG_TYPEC_CAP_LPM_WAKEUP_WATCHDOG
+static int tcpci_alert_wakeup(struct tcpc_device *tcpc_dev)
+{
+ if (tcpc_dev->tcpc_flags & TCPC_FLAGS_LPM_WAKEUP_WATCHDOG) {
+ TCPC_DBG("Wakeup\r\n");
+
+ if (tcpc_dev->typec_remote_cc[0] == TYPEC_CC_DRP_TOGGLING &&
+ tcpc_dev->typec_remote_cc[1] == TYPEC_CC_DRP_TOGGLING)
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_WAKEUP);
+ }
+
+ return 0;
+}
+#endif /* CONFIG_TYPEC_CAP_LPM_WAKEUP_WATCHDOG */
+
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+typedef struct __tcpci_alert_handler {
+ u32 bit_mask;
+ int (*handler)(struct tcpc_device *tcpc_dev);
+} tcpci_alert_handler_t;
+
+#define DECL_TCPCI_ALERT_HANDLER(xbit, xhandler) {\
+ .bit_mask = 1 << (xbit),\
+ .handler = xhandler, \
+ }
+
+const tcpci_alert_handler_t tcpci_alert_handlers[] = {
+#ifdef CONFIG_USB_POWER_DELIVERY
+ DECL_TCPCI_ALERT_HANDLER(4, tcpci_alert_tx_failed),
+ DECL_TCPCI_ALERT_HANDLER(5, tcpci_alert_tx_discard),
+ DECL_TCPCI_ALERT_HANDLER(6, tcpci_alert_tx_success),
+ DECL_TCPCI_ALERT_HANDLER(2, tcpci_alert_recv_msg),
+ DECL_TCPCI_ALERT_HANDLER(7, NULL),
+ DECL_TCPCI_ALERT_HANDLER(8, NULL),
+ DECL_TCPCI_ALERT_HANDLER(3, tcpci_alert_recv_hard_reset),
+ DECL_TCPCI_ALERT_HANDLER(10, tcpci_alert_rx_overflow),
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+#ifdef CONFIG_TYPEC_CAP_LPM_WAKEUP_WATCHDOG
+ DECL_TCPCI_ALERT_HANDLER(16, tcpci_alert_wakeup),
+#endif /* CONFIG_TYPEC_CAP_LPM_WAKEUP_WATCHDOG */
+ DECL_TCPCI_ALERT_HANDLER(9, tcpci_alert_fault),
+ DECL_TCPCI_ALERT_HANDLER(0, tcpci_alert_cc_changed),
+ DECL_TCPCI_ALERT_HANDLER(1, tcpci_alert_power_status_changed),
+};
+
+static inline int __tcpci_alert(struct tcpc_device *tcpc_dev)
+{
+ int rv, i;
+ u32 alert_status;
+ const u32 alert_rx =
+ TCPC_REG_ALERT_RX_STATUS | TCPC_REG_ALERT_RX_BUF_OVF;
+
+ const u32 alert_sent_hreset =
+ TCPC_REG_ALERT_TX_SUCCESS | TCPC_REG_ALERT_TX_FAILED;
+
+ rv = tcpci_get_alert_status(tcpc_dev, &alert_status);
+ if (rv)
+ return rv;
+
+ tcpci_alert_status_clear(tcpc_dev, alert_status & (~alert_rx));
+
+ if (tcpc_dev->typec_role == TYPEC_ROLE_UNKNOWN) {
+ TCPC_INFO("SkipAlert:0x%04x\r\n", alert_status);
+ return 0;
+ }
+
+ if (alert_status & TCPC_REG_ALERT_EXT_VBUS_80)
+ alert_status |= TCPC_REG_ALERT_POWER_STATUS;
+
+#ifdef CONFIG_TYPEC_CAP_RA_DETACH
+ if ((alert_status & TCPC_REG_ALERT_EXT_RA_DETACH) &&
+ (tcpc_dev->tcpc_flags & TCPC_FLAGS_CHECK_RA_DETACHE))
+ alert_status |= TCPC_REG_ALERT_CC_STATUS;
+#endif /* CONFIG_TYPEC_CAP_RA_DETACH */
+
+#ifdef CONFIG_USB_PD_IGNORE_HRESET_COMPLETE_TIMER
+ if ((alert_status & alert_sent_hreset) == alert_sent_hreset) {
+ if (tcpc_dev->tcpc_flags & TCPC_FLAGS_WAIT_HRESET_COMPLETE) {
+ alert_status &= ~alert_sent_hreset;
+ pd_put_sent_hard_reset_event(tcpc_dev);
+ }
+ }
+#endif /* CONFIG_USB_PD_IGNORE_HRESET_COMPLETE_TIMER */
+
+ for (i = 0; i < ARRAY_SIZE(tcpci_alert_handlers); i++) {
+ if (tcpci_alert_handlers[i].bit_mask & alert_status) {
+ if (tcpci_alert_handlers[i].handler != 0)
+ tcpci_alert_handlers[i].handler(tcpc_dev);
+ }
+ }
+ return 0;
+}
+
+int tcpci_alert(struct tcpc_device *tcpc_dev)
+{
+ int ret;
+
+ ret = __tcpci_alert(tcpc_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(tcpci_alert);
+
+/*
+ * [BLOCK] TYPEC device changed
+ */
+
+static inline int tcpci_report_usb_port_attached(struct tcpc_device *tcpc)
+{
+ TCPC_INFO("usb_port_attached\r\n");
+
+ __pm_relax(&tcpc->dettach_temp_wake_lock);
+ __pm_stay_awake(&tcpc->attach_wake_lock);
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ pd_put_cc_attached_event(tcpc, tcpc->typec_attach_new);
+#endif /* CONFIG_USB_POWER_DLEIVERY */
+
+ return 0;
+}
+
+static inline int tcpci_report_usb_port_detached(struct tcpc_device *tcpc)
+{
+ TCPC_INFO("usb_port_detached\r\n");
+
+ __pm_wakeup_event(&tcpc->dettach_temp_wake_lock,
+ jiffies_to_msecs(msecs_to_jiffies(5 * 1000)));
+ __pm_relax(&tcpc->attach_wake_lock);
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ pd_put_cc_detached_event(tcpc);
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+ return 0;
+}
+
+int tcpci_report_usb_port_changed(struct tcpc_device *tcpc)
+{
+ tcpci_notify_typec_state(tcpc);
+
+ if (tcpc->typec_attach_old == TYPEC_UNATTACHED)
+ tcpci_report_usb_port_attached(tcpc);
+ else if (tcpc->typec_attach_new == TYPEC_UNATTACHED)
+ tcpci_report_usb_port_detached(tcpc);
+ else
+ TCPC_DBG("TCPC Attach Again\r\n");
+
+ return 0;
+}
+EXPORT_SYMBOL(tcpci_report_usb_port_changed);
diff --git a/drivers/usb/pd/richtek/tcpci_core.c b/drivers/usb/pd/richtek/tcpci_core.c
new file mode 100644
index 000000000000..5c24d8ce426c
--- /dev/null
+++ b/drivers/usb/pd/richtek/tcpci_core.c
@@ -0,0 +1,634 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Richtek TypeC Port Control Interface Core Driver
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/gpio.h>
+#include <linux/hisi/log/hisi_log.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_typec.h>
+#include <linux/hisi/usb/pd/richtek/rt1711h.h>
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+#include "pd_dpm_prv.h"
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+#define TCPC_CORE_VERSION "1.1.1_G"
+
+static ssize_t tcpc_show_property(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t tcpc_store_property(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+
+#define TCPC_DEVICE_ATTR(_name, _mode) \
+{ \
+ .attr = { .name = #_name, .mode = _mode }, \
+ .show = tcpc_show_property, \
+ .store = tcpc_store_property, \
+}
+
+static struct class *tcpc_class;
+EXPORT_SYMBOL_GPL(tcpc_class);
+
+static struct device_type tcpc_dev_type;
+
+static struct device_attribute tcpc_device_attributes[] = {
+ TCPC_DEVICE_ATTR(role_def, 0444),
+ TCPC_DEVICE_ATTR(rp_lvl, 0444),
+ TCPC_DEVICE_ATTR(pd_test, 0664),
+ TCPC_DEVICE_ATTR(info, 0444),
+ TCPC_DEVICE_ATTR(timer, 0664),
+ TCPC_DEVICE_ATTR(caps_info, 0444),
+ TCPC_DEVICE_ATTR(cc_orient_info, 0444),
+ TCPC_DEVICE_ATTR(remote_rp_lvl, 0444),
+};
+
+enum {
+ TCPC_DESC_ROLE_DEF = 0,
+ TCPC_DESC_RP_LEVEL,
+ TCPC_DESC_PD_TEST,
+ TCPC_DESC_INFO,
+ TCPC_DESC_TIMER,
+ TCPC_DESC_CAP_INFO,
+ TCPC_DESC_CC_ORIENT_INFO,
+ TCPC_DESC_REMOTE_RP_LEVEL,
+};
+
+static struct attribute *__tcpc_attrs[ARRAY_SIZE(tcpc_device_attributes) + 1];
+static struct attribute_group tcpc_attr_group = {
+ .attrs = __tcpc_attrs,
+};
+
+static const struct attribute_group *tcpc_attr_groups[] = {
+ &tcpc_attr_group,
+ NULL,
+};
+
+static const char * const role_text[] = {
+ "SNK Only",
+ "SRC Only",
+ "DRP",
+ "Try.SRC",
+ "Try.SNK",
+};
+
+static ssize_t tcpc_show_property(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tcpc_device *tcpc = to_tcpc_device(dev);
+ const ptrdiff_t offset = attr - tcpc_device_attributes;
+ int i = 0;
+ int vmin, vmax, ioper;
+ u8 cc1, cc2;
+ bool from_ic = true;
+ char cc1_buf[32] = {0};
+ char cc2_buf[32] = {0};
+
+ switch (offset) {
+ case TCPC_DESC_CC_ORIENT_INFO:
+ snprintf(buf, 256, "%s\n", tcpc->typec_polarity ? "2" : "1");
+ TCPC_DBG("%s typec_polarity=%s\n", __func__, buf);
+ break;
+ case TCPC_DESC_CAP_INFO:
+ snprintf(buf + strlen(buf), 256, "%s = %d\n%s = %d\n",
+ "local_selected_cap",
+ tcpc->pd_port.local_selected_cap,
+ "remote_selected_cap",
+ tcpc->pd_port.remote_selected_cap);
+
+ snprintf(buf + strlen(buf), 256, "%s\n",
+ "local_src_cap(vmin, vmax, ioper)");
+ for (i = 0; i < tcpc->pd_port.local_src_cap.nr; i++) {
+ pd_extract_pdo_power(
+ tcpc->pd_port.local_src_cap.pdos[i],
+ &vmin, &vmax, &ioper);
+ snprintf(buf + strlen(buf), 256, "%d %d %d\n",
+ vmin, vmax, ioper);
+ }
+ snprintf(buf + strlen(buf), 256, "%s\n",
+ "local_snk_cap(vmin, vmax, ioper)");
+ for (i = 0; i < tcpc->pd_port.local_snk_cap.nr; i++) {
+ pd_extract_pdo_power(
+ tcpc->pd_port.local_snk_cap.pdos[i],
+ &vmin, &vmax, &ioper);
+ snprintf(buf + strlen(buf), 256, "%d %d %d\n",
+ vmin, vmax, ioper);
+ }
+ snprintf(buf + strlen(buf), 256, "%s\n",
+ "remote_src_cap(vmin, vmax, ioper)");
+ for (i = 0; i < tcpc->pd_port.remote_src_cap.nr; i++) {
+ pd_extract_pdo_power(
+ tcpc->pd_port.remote_src_cap.pdos[i],
+ &vmin, &vmax, &ioper);
+ snprintf(buf + strlen(buf), 256, "%d %d %d\n",
+ vmin, vmax, ioper);
+ }
+ snprintf(buf + strlen(buf), 256, "%s\n",
+ "remote_snk_cap(vmin, vmax, ioper)");
+ for (i = 0; i < tcpc->pd_port.remote_snk_cap.nr; i++) {
+ pd_extract_pdo_power(
+ tcpc->pd_port.remote_snk_cap.pdos[i],
+ &vmin, &vmax, &ioper);
+ snprintf(buf + strlen(buf), 256, "%d %d %d\n",
+ vmin, vmax, ioper);
+ }
+ break;
+ case TCPC_DESC_ROLE_DEF:
+ snprintf(buf, 256, "%s\n", role_text[tcpc->desc.role_def]);
+ break;
+ case TCPC_DESC_RP_LEVEL:
+ if (tcpc->typec_local_rp_level == TYPEC_CC_RP_DFT)
+ snprintf(buf, 256, "%s\n", "Default");
+ else if (tcpc->typec_local_rp_level == TYPEC_CC_RP_1_5)
+ snprintf(buf, 256, "%s\n", "1.5");
+ else if (tcpc->typec_local_rp_level == TYPEC_CC_RP_3_0)
+ snprintf(buf, 256, "%s\n", "3.0");
+ break;
+ case TCPC_DESC_REMOTE_RP_LEVEL:
+ tcpm_inquire_remote_cc(tcpc, &cc1, &cc2, from_ic);
+
+ if (cc1 == TYPEC_CC_VOLT_OPEN)
+ snprintf(cc1_buf, 256, "%s\n", "OPEN");
+ else if (cc1 == TYPEC_CC_VOLT_RA)
+ snprintf(cc1_buf, 256, "%s\n", "RA");
+ else if (cc1 == TYPEC_CC_VOLT_RD)
+ snprintf(cc1_buf, 256, "%s\n", "RD");
+ else if (cc1 == TYPEC_CC_VOLT_SNK_DFT)
+ snprintf(cc1_buf, 256, "%s\n", "Default");
+ else if (cc1 == TYPEC_CC_VOLT_SNK_1_5)
+ snprintf(cc1_buf, 256, "%s\n", "1.5");
+ else if (cc1 == TYPEC_CC_VOLT_SNK_3_0)
+ snprintf(cc1_buf, 256, "%s\n", "3.0");
+ else if (cc1 == TYPEC_CC_DRP_TOGGLING)
+ snprintf(cc1_buf, 256, "%s\n", "DRP");
+ else
+ snprintf(cc1_buf, 256, "%s\n", "NULL");
+
+ if (cc2 == TYPEC_CC_VOLT_OPEN)
+ snprintf(cc2_buf, 256, "%s\n", "OPEN");
+ else if (cc2 == TYPEC_CC_VOLT_RA)
+ snprintf(cc2_buf, 256, "%s\n", "RA");
+ else if (cc2 == TYPEC_CC_VOLT_RD)
+ snprintf(cc2_buf, 256, "%s\n", "RD");
+ else if (cc2 == TYPEC_CC_VOLT_SNK_DFT)
+ snprintf(cc2_buf, 256, "%s\n", "Default");
+ else if (cc2 == TYPEC_CC_VOLT_SNK_1_5)
+ snprintf(cc2_buf, 256, "%s\n", "1.5");
+ else if (cc2 == TYPEC_CC_VOLT_SNK_3_0)
+ snprintf(cc2_buf, 256, "%s\n", "3.0");
+ else if (cc2 == TYPEC_CC_DRP_TOGGLING)
+ snprintf(cc2_buf, 256, "%s\n", "DRP");
+ else
+ snprintf(cc1_buf, 256, "%s\n", "NULL");
+
+ snprintf(buf, 256, " cc1 %s cc2 %s\n", cc1_buf, cc2_buf);
+
+ break;
+ case TCPC_DESC_PD_TEST:
+ snprintf(buf,
+ 256, "%s\n%s\n%s\n%s\n%s\n", "1: Power Role Swap Test",
+ "2: Data Role Swap Test", "3: Vconn Swap Test",
+ "4: soft reset", "5: hard reset");
+ break;
+ case TCPC_DESC_INFO:
+ i += snprintf(buf + i,
+ 256, "|^|==( %s info )==|^|\n", tcpc->desc.name);
+ i += snprintf(buf + i,
+ 256, "role = %s\n", role_text[tcpc->desc.role_def]);
+ if (tcpc->typec_local_rp_level == TYPEC_CC_RP_DFT)
+ i += snprintf(buf + i, 256, "rplvl = %s\n", "Default");
+ else if (tcpc->typec_local_rp_level == TYPEC_CC_RP_1_5)
+ i += snprintf(buf + i, 256, "rplvl = %s\n", "1.5");
+ else if (tcpc->typec_local_rp_level == TYPEC_CC_RP_3_0)
+ i += snprintf(buf + i, 256, "rplvl = %s\n", "3.0");
+ break;
+ default:
+ break;
+ }
+ return strlen(buf);
+}
+
+static int get_parameters(char *buf, long int *param1, int num_of_par)
+{
+ char *token;
+ int base, cnt;
+
+ token = strsep(&buf, " ");
+
+ for (cnt = 0; cnt < num_of_par; cnt++) {
+ if (token) {
+ if ((token[1] == 'x') || (token[1] == 'X'))
+ base = 16;
+ else
+ base = 10;
+
+ if (kstrtoul(token, base, &param1[cnt]) != 0)
+ return -EINVAL;
+
+ token = strsep(&buf, " ");
+ } else {
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static ssize_t tcpc_store_property(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tcpc_device *tcpc = to_tcpc_device(dev);
+ struct tcpm_power_cap cap;
+ const ptrdiff_t offset = attr - tcpc_device_attributes;
+ int ret;
+ long int val;
+
+ switch (offset) {
+ case TCPC_DESC_ROLE_DEF:
+ ret = get_parameters((char *)buf, &val, 1);
+ if (ret < 0) {
+ dev_err(dev, "get parameters fail\n");
+ return -EINVAL;
+ }
+
+ tcpm_typec_change_role(tcpc, val);
+ break;
+ case TCPC_DESC_TIMER:
+ ret = get_parameters((char *)buf, &val, 1);
+ if (ret < 0) {
+ dev_err(dev, "get parameters fail\n");
+ return -EINVAL;
+ }
+ #ifdef CONFIG_USB_POWER_DELIVERY
+ if (val > 0 && val <= PD_PE_TIMER_END_ID)
+ pd_enable_timer(&tcpc->pd_port, val);
+ else if (val > PD_PE_TIMER_END_ID && val < PD_TIMER_NR)
+ tcpc_enable_timer(tcpc, val);
+ #else
+ if (val > 0 && val < PD_TIMER_NR)
+ tcpc_enable_timer(tcpc, val);
+ #endif /* CONFIG_USB_POWER_DELIVERY */
+ break;
+ #ifdef CONFIG_USB_POWER_DELIVERY
+ case TCPC_DESC_PD_TEST:
+ ret = get_parameters((char *)buf, &val, 1);
+ if (ret < 0) {
+ dev_err(dev, "get parameters fail\n");
+ return -EINVAL;
+ }
+ switch (val) {
+ case 1: /* Power Role Swap */
+ tcpm_power_role_swap(tcpc);
+ break;
+ case 2: /* Data Role Swap */
+ tcpm_data_role_swap(tcpc);
+ break;
+ case 3: /* Vconn Swap */
+ tcpm_vconn_swap(tcpc);
+ break;
+ case 4: /* Software Reset */
+ tcpm_soft_reset(tcpc);
+ break;
+ case 5: /* Hardware Reset */
+ tcpm_hard_reset(tcpc);
+ break;
+ case 6:
+ tcpm_get_source_cap(tcpc, &cap);
+ break;
+ case 7:
+ tcpm_get_sink_cap(tcpc, &cap);
+ break;
+ default:
+ break;
+ }
+ break;
+ #endif /* CONFIG_USB_POWER_DELIVERY */
+ default:
+ break;
+ }
+ return count;
+}
+
+static int tcpc_match_device_by_name(struct device *dev, const void *data)
+{
+ const char *name = data;
+ struct tcpc_device *tcpc = dev_get_drvdata(dev);
+
+ return strcmp(tcpc->desc.name, name) == 0;
+}
+
+struct tcpc_device *tcpc_dev_get_by_name(const char *name)
+{
+ struct device *dev = class_find_device(tcpc_class,
+ NULL, (const void *)name, tcpc_match_device_by_name);
+ return dev ? dev_get_drvdata(dev) : NULL;
+}
+
+static void tcpc_device_release(struct device *dev)
+{
+ struct tcpc_device *tcpc_dev = to_tcpc_device(dev);
+ char buf[1024] = { 0 };
+
+ pr_info("%s : %s device release\n", __func__, dev_name(dev));
+ if (!tcpc_dev)
+ snprintf(buf, sizeof(buf), "the tcpc device is NULL\n");
+ /* Un-init pe thread */
+#ifdef CONFIG_USB_POWER_DELIVERY
+ tcpci_event_deinit(tcpc_dev);
+#endif /* CONFIG_USB_POWER_DELIVERY */
+ /* Un-init timer thread */
+ tcpci_timer_deinit(tcpc_dev);
+ /* Un-init Mutex */
+ /* Do initialization */
+ devm_kfree(dev, tcpc_dev);
+}
+
+static int pd_dpm_wake_lock_call(struct notifier_block *dpm_nb,
+ unsigned long event, void *data)
+{
+ struct tcpc_device *tcpc = container_of(dpm_nb,
+ struct tcpc_device, dpm_nb);
+
+ switch (event) {
+ case PD_WAKE_LOCK:
+ __pm_stay_awake(&tcpc->attach_wake_lock);
+ break;
+ case PD_WAKE_UNLOCK:
+ __pm_relax(&tcpc->attach_wake_lock);
+ break;
+ default:
+ pr_info("%s unknown event (%lu)\n", __func__, event);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static void tcpc_init_work(struct work_struct *work);
+
+struct tcpc_device *tcpc_device_register(struct device *parent,
+ struct tcpc_desc *tcpc_desc,
+ struct tcpc_ops *ops, void *drv_data)
+{
+ struct tcpc_device *tcpc;
+ int ret = 0;
+
+ pr_info("%s register tcpc device (%s)\n", __func__, tcpc_desc->name);
+ tcpc = devm_kzalloc(parent, sizeof(*tcpc), GFP_KERNEL);
+ if (!tcpc) {
+ pr_err("%s : allocate tcpc memeory failed\n", __func__);
+ return NULL;
+ }
+
+ tcpc->dev.class = tcpc_class;
+ tcpc->dev.type = &tcpc_dev_type;
+ tcpc->dev.parent = parent;
+ tcpc->dev.release = tcpc_device_release;
+ dev_set_drvdata(&tcpc->dev, tcpc);
+ tcpc->drv_data = drv_data;
+ dev_set_name(&tcpc->dev, tcpc_desc->name);
+ tcpc->desc = *tcpc_desc;
+ tcpc->ops = ops;
+ tcpc->typec_local_rp_level = tcpc_desc->rp_lvl;
+
+ ret = device_register(&tcpc->dev);
+ if (ret) {
+ kfree(tcpc);
+ return ERR_PTR(ret);
+ }
+
+ srcu_init_notifier_head(&tcpc->evt_nh);
+ INIT_DELAYED_WORK(&tcpc->init_work, tcpc_init_work);
+
+ mutex_init(&tcpc->access_lock);
+ mutex_init(&tcpc->typec_lock);
+ mutex_init(&tcpc->timer_lock);
+ sema_init(&tcpc->timer_enable_mask_lock, 1);
+ sema_init(&tcpc->timer_tick_lock, 1);
+
+ /* If system support "WAKE_LOCK_IDLE", */
+ /* please use it instead of "WAKE_LOCK_SUSPEND" */
+ wakeup_source_init(&tcpc->attach_wake_lock,
+ "tcpc_attach_wakelock");
+ wakeup_source_init(&tcpc->dettach_temp_wake_lock,
+ "tcpc_detach_wakelock");
+
+ tcpc->dpm_nb.notifier_call = pd_dpm_wake_lock_call;
+ ret = register_pd_wake_unlock_notifier(&tcpc->dpm_nb);
+ if (ret < 0) {
+ hisilog_err("%s register_pd_wake_unlock_notifier failed\n",
+ __func__);
+ } else {
+ hisilog_info("%s register_pd_wake_unlock_notifier OK\n",
+ __func__);
+ }
+
+ tcpci_timer_init(tcpc);
+#ifdef CONFIG_USB_POWER_DELIVERY
+ tcpci_event_init(tcpc);
+ pd_core_init(tcpc);
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+ return tcpc;
+}
+EXPORT_SYMBOL(tcpc_device_register);
+
+static int tcpc_device_irq_enable(struct tcpc_device *tcpc)
+{
+ int ret;
+
+ TCPC_DBG("%s\n", __func__);
+
+ if (!tcpc->ops->init) {
+ pr_err("%s Please implment tcpc ops init function\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = tcpci_init(tcpc, false);
+ if (ret < 0) {
+ pr_err("%s tcpc init fail\n", __func__);
+ return ret;
+ }
+
+ tcpci_lock_typec(tcpc);
+ ret = tcpc_typec_init(tcpc, tcpc->desc.role_def + 1);
+ tcpci_unlock_typec(tcpc);
+
+ if (ret < 0) {
+ pr_err("%s : tcpc typec init fail\n", __func__);
+ return ret;
+ }
+
+ pr_info("%s : tcpc irq enable OK!\n", __func__);
+ return 0;
+}
+
+static int tcpc_dec_notifier_supply_num(struct tcpc_device *tcp_dev)
+{
+ if (tcp_dev->desc.notifier_supply_num == 0) {
+ pr_info("%s already started\n", __func__);
+ return 0;
+ }
+
+ tcp_dev->desc.notifier_supply_num--;
+ pr_info("%s supply_num = %d\n", __func__,
+ tcp_dev->desc.notifier_supply_num);
+
+ if (tcp_dev->desc.notifier_supply_num == 0) {
+ cancel_delayed_work(&tcp_dev->init_work);
+ tcpc_device_irq_enable(tcp_dev);
+ }
+
+ return 0;
+}
+
+struct tcpc_device *notify_tcp_dev_ready(const char *name)
+{
+ struct tcpc_device *tcpc = tcpc_dev_get_by_name(name);
+
+ if (!tcpc)
+ return NULL;
+
+ tcpc_dec_notifier_supply_num(tcpc);
+ return tcpc;
+}
+
+static void tcpc_init_work(struct work_struct *work)
+{
+ struct tcpc_device *tcpc = container_of(
+ work, struct tcpc_device, init_work.work);
+
+ if (tcpc->desc.notifier_supply_num == 0)
+ return;
+
+ pr_info("%s force start\n", __func__);
+
+ tcpc->desc.notifier_supply_num = 0;
+ tcpc_device_irq_enable(tcpc);
+}
+
+int tcpc_schedule_init_work(struct tcpc_device *tcpc)
+{
+ if (tcpc->desc.notifier_supply_num == 0)
+ return tcpc_device_irq_enable(tcpc);
+
+ pr_info("%s wait %d num\n", __func__, tcpc->desc.notifier_supply_num);
+
+ schedule_delayed_work(
+ &tcpc->init_work, msecs_to_jiffies(30 * 1000));
+ return 0;
+}
+EXPORT_SYMBOL(tcpc_schedule_init_work);
+
+int register_tcp_dev_notifier(struct tcpc_device *tcp_dev,
+ struct notifier_block *nb)
+{
+ int ret;
+
+ ret = srcu_notifier_chain_register(&tcp_dev->evt_nh, nb);
+ if (ret != 0)
+ return ret;
+
+ tcpc_dec_notifier_supply_num(tcp_dev);
+ return ret;
+}
+EXPORT_SYMBOL(register_tcp_dev_notifier);
+
+int unregister_tcp_dev_notifier(struct tcpc_device *tcp_dev,
+ struct notifier_block *nb)
+{
+ return srcu_notifier_chain_unregister(&tcp_dev->evt_nh, nb);
+}
+EXPORT_SYMBOL(unregister_tcp_dev_notifier);
+
+void tcpc_device_unregister(struct device *dev, struct tcpc_device *tcpc)
+{
+ if (!tcpc)
+ return;
+
+ tcpc_typec_deinit(tcpc);
+
+ wakeup_source_trash(&tcpc->dettach_temp_wake_lock);
+ wakeup_source_trash(&tcpc->attach_wake_lock);
+
+ device_unregister(&tcpc->dev);
+}
+EXPORT_SYMBOL(tcpc_device_unregister);
+
+void *tcpc_get_dev_data(struct tcpc_device *tcpc)
+{
+ return tcpc->drv_data;
+}
+EXPORT_SYMBOL(tcpc_get_dev_data);
+
+void tcpci_lock_typec(struct tcpc_device *tcpc)
+{
+ mutex_lock(&tcpc->typec_lock);
+}
+EXPORT_SYMBOL(tcpci_lock_typec);
+
+void tcpci_unlock_typec(struct tcpc_device *tcpc)
+{
+ mutex_unlock(&tcpc->typec_lock);
+}
+EXPORT_SYMBOL(tcpci_unlock_typec);
+
+static void tcpc_init_attrs(struct device_type *dev_type)
+{
+ int i;
+
+ dev_type->groups = tcpc_attr_groups;
+ for (i = 0; i < ARRAY_SIZE(tcpc_device_attributes); i++)
+ __tcpc_attrs[i] = &tcpc_device_attributes[i].attr;
+}
+
+static int __init tcpc_class_init(void)
+{
+ pr_info("%s_%s\n", __func__, TCPC_CORE_VERSION);
+
+ tcpc_class = class_create(THIS_MODULE, "hisi_pd");
+ if (IS_ERR(tcpc_class)) {
+ pr_info("Unable to create tcpc class; errno = %ld\n",
+ PTR_ERR(tcpc_class));
+ return PTR_ERR(tcpc_class);
+ }
+ tcpc_init_attrs(&tcpc_dev_type);
+ tcpc_class->suspend = NULL;
+ tcpc_class->resume = NULL;
+
+ pr_info("TCPC class init OK\n");
+ return 0;
+}
+
+static void __exit tcpc_class_exit(void)
+{
+ class_destroy(tcpc_class);
+ pr_info("TCPC class un-init OK\n");
+}
+
+subsys_initcall(tcpc_class_init);
+module_exit(tcpc_class_exit);
+
+MODULE_DESCRIPTION("Richtek TypeC Port Control Core");
+MODULE_AUTHOR("Jeff Chang <jeff_chang@richtek.com>");
+MODULE_VERSION(TCPC_CORE_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/pd/richtek/tcpci_event.c b/drivers/usb/pd/richtek/tcpci_event.c
new file mode 100644
index 000000000000..f6cf5ae7ddfe
--- /dev/null
+++ b/drivers/usb/pd/richtek/tcpci_event.c
@@ -0,0 +1,800 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * TCPC Interface for event handler
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kthread.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/version.h>
+#include <linux/sched/rt.h>
+
+#include <linux/hisi/usb/pd/richtek/tcpci_event.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_typec.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+#include <linux/hisi/usb/pd/richtek/rt1711h.h>
+
+#ifdef CONFIG_USB_PD_POSTPONE_VDM
+static void postpone_vdm_event(struct tcpc_device *tcpc_dev)
+{
+ /*
+ * Postpone VDM retry event due to the retry reason
+ * maybe interrupt by some PD event ....
+ */
+
+ pd_event_t *vdm_event = &tcpc_dev->pd_vdm_event;
+
+ if (tcpc_dev->pd_pending_vdm_event && vdm_event->pd_msg) {
+ tcpc_dev->pd_postpone_vdm_timeout = false;
+ tcpc_restart_timer(tcpc_dev, PD_PE_VDM_POSTPONE);
+ }
+}
+#endif /* CONFIG_USB_PD_POSTPONE_VDM */
+
+pd_msg_t *__pd_alloc_msg(struct tcpc_device *tcpc_dev)
+{
+ int i;
+ u8 mask;
+ char buf[1024] = { 0 };
+
+ for (i = 0, mask = 1; i < PD_MSG_BUF_SIZE; i++, mask <<= 1) {
+ if ((mask & tcpc_dev->pd_msg_buffer_allocated) == 0) {
+ tcpc_dev->pd_msg_buffer_allocated |= mask;
+ return tcpc_dev->pd_msg_buffer + i;
+ }
+ }
+
+ PD_ERR("pd_alloc_msg failed\r\n");
+ if (true)
+ snprintf(buf, sizeof(buf), "pd alloc msg failed\n");
+ return (pd_msg_t *)NULL;
+}
+
+pd_msg_t *pd_alloc_msg(struct tcpc_device *tcpc_dev)
+{
+ pd_msg_t *pd_msg = NULL;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ pd_msg = __pd_alloc_msg(tcpc_dev);
+ mutex_unlock(&tcpc_dev->access_lock);
+
+ return pd_msg;
+}
+
+static void __pd_free_msg(struct tcpc_device *tcpc_dev, pd_msg_t *pd_msg)
+{
+ int index = pd_msg - tcpc_dev->pd_msg_buffer;
+ u8 mask = 1 << index;
+ char buf[1024] = { 0 };
+
+ if ((mask & tcpc_dev->pd_msg_buffer_allocated) == 0)
+ snprintf(buf, sizeof(buf), "pd free msg failed\n");
+ tcpc_dev->pd_msg_buffer_allocated &= (~mask);
+}
+
+static void __pd_free_event(struct tcpc_device *tcpc_dev, pd_event_t *pd_event)
+{
+ if (pd_event->pd_msg) {
+ __pd_free_msg(tcpc_dev, pd_event->pd_msg);
+ pd_event->pd_msg = NULL;
+ }
+}
+
+void pd_free_msg(struct tcpc_device *tcpc_dev, pd_msg_t *pd_msg)
+{
+ mutex_lock(&tcpc_dev->access_lock);
+ __pd_free_msg(tcpc_dev, pd_msg);
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_free_event(struct tcpc_device *tcpc_dev, pd_event_t *pd_event)
+{
+ mutex_lock(&tcpc_dev->access_lock);
+ __pd_free_event(tcpc_dev, pd_event);
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+/*----------------------------------------------------------------------------*/
+
+static bool __pd_get_event(struct tcpc_device *tcpc_dev, pd_event_t *pd_event)
+{
+ int index = 0;
+
+ if (tcpc_dev->pd_event_count <= 0)
+ return false;
+
+ tcpc_dev->pd_event_count--;
+
+ *pd_event = tcpc_dev->pd_event_ring_buffer[
+ tcpc_dev->pd_event_head_index];
+
+ if (tcpc_dev->pd_event_count) {
+ index = tcpc_dev->pd_event_head_index + 1;
+ index %= PD_EVENT_BUF_SIZE;
+ }
+ tcpc_dev->pd_event_head_index = index;
+ return true;
+}
+
+bool pd_get_event(struct tcpc_device *tcpc_dev, pd_event_t *pd_event)
+{
+ bool ret;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ ret = __pd_get_event(tcpc_dev, pd_event);
+ mutex_unlock(&tcpc_dev->access_lock);
+ return ret;
+}
+
+static bool __pd_put_event(struct tcpc_device *tcpc_dev,
+ const pd_event_t *pd_event, bool from_port_partner)
+{
+ int index;
+
+#ifdef CONFIG_USB_PD_POSTPONE_OTHER_VDM
+ if (from_port_partner)
+ postpone_vdm_event(tcpc_dev);
+#endif
+
+ if (tcpc_dev->pd_event_count >= PD_EVENT_BUF_SIZE) {
+ PD_ERR("pd_put_event failed\r\n");
+ return false;
+ }
+
+ index = (tcpc_dev->pd_event_head_index + tcpc_dev->pd_event_count);
+ index %= PD_EVENT_BUF_SIZE;
+
+ tcpc_dev->pd_event_count++;
+ tcpc_dev->pd_event_ring_buffer[index] = *pd_event;
+
+ atomic_inc(&tcpc_dev->pending_event);
+ wake_up_interruptible(&tcpc_dev->event_loop_wait_que);
+ return true;
+}
+
+bool pd_put_event(struct tcpc_device *tcpc_dev, const pd_event_t *pd_event,
+ bool from_port_partner)
+{
+ bool ret;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ ret = __pd_put_event(tcpc_dev, pd_event, from_port_partner);
+ mutex_unlock(&tcpc_dev->access_lock);
+
+ return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+
+bool pd_get_vdm_event(struct tcpc_device *tcpc_dev, pd_event_t *pd_event)
+{
+ pd_event_t delay_evt = {
+ .event_type = PD_EVT_CTRL_MSG,
+ .msg = PD_CTRL_GOOD_CRC,
+ .pd_msg = NULL,
+ };
+
+ pd_event_t *vdm_event = &tcpc_dev->pd_vdm_event;
+
+ if (tcpc_dev->pd_pending_vdm_event) {
+ if (vdm_event->pd_msg && !tcpc_dev->pd_postpone_vdm_timeout)
+ return false;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ if (tcpc_dev->pd_pending_vdm_good_crc) {
+ *pd_event = delay_evt;
+ tcpc_dev->pd_pending_vdm_good_crc = false;
+ } else {
+ *pd_event = *vdm_event;
+ tcpc_dev->pd_pending_vdm_event = false;
+ }
+ mutex_unlock(&tcpc_dev->access_lock);
+ return true;
+ }
+
+ return false;
+}
+
+static inline void reset_pe_vdm_state(pd_port_t *pd_port, u32 vdm_hdr)
+{
+ if (PD_VDO_SVDM(vdm_hdr) && PD_VDO_CMDT(vdm_hdr) == CMDT_INIT) {
+ pd_port->reset_vdm_state = true;
+ pd_port->pe_vdm_state = pd_port->pe_pd_state;
+ }
+}
+
+bool pd_put_vdm_event(struct tcpc_device *tcpc_dev,
+ pd_event_t *pd_event, bool from_port_partner)
+{
+ char buf[1024] = { 0 };
+ pd_msg_t *pd_msg = pd_event->pd_msg;
+
+ mutex_lock(&tcpc_dev->access_lock);
+
+ if (tcpc_dev->pd_pending_vdm_event) {
+ /* If message from port partner, we have to overwrite it */
+
+ if (from_port_partner) {
+ if (pd_event_msg_match(&tcpc_dev->pd_vdm_event,
+ PD_EVT_CTRL_MSG,
+ PD_CTRL_GOOD_CRC)) {
+ TCPC_DBG("PostponeVDM GoodCRC\r\n");
+ tcpc_dev->pd_pending_vdm_good_crc = true;
+ }
+
+ __pd_free_event(tcpc_dev, &tcpc_dev->pd_vdm_event);
+ } else {
+ __pd_free_event(tcpc_dev, pd_event);
+ mutex_unlock(&tcpc_dev->access_lock);
+ return false;
+ }
+ }
+
+ tcpc_dev->pd_vdm_event = *pd_event;
+ tcpc_dev->pd_pending_vdm_event = true;
+ tcpc_dev->pd_postpone_vdm_timeout = true;
+
+ if (from_port_partner) {
+ if (!pd_msg)
+ snprintf(buf, sizeof(buf), "the pd_msg is NULL\n");
+ /* pd_msg->time_stamp = 0; */
+ tcpc_dev->pd_last_vdm_msg = *pd_msg;
+ reset_pe_vdm_state(&tcpc_dev->pd_port, pd_msg->payload[0]);
+#ifdef CONFIG_USB_PD_POSTPONE_FIRST_VDM
+ postpone_vdm_event(tcpc_dev);
+ mutex_unlock(&tcpc_dev->access_lock);
+ return true;
+#endif /* CONFIG_USB_PD_POSTPONE_FIRST_VDM */
+ }
+
+ atomic_inc(&tcpc_dev->pending_event); /* do not really wake up process*/
+ wake_up_interruptible(&tcpc_dev->event_loop_wait_que);
+ mutex_unlock(&tcpc_dev->access_lock);
+
+ return true;
+}
+
+bool pd_put_last_vdm_event(struct tcpc_device *tcpc_dev)
+{
+ pd_msg_t *pd_msg = &tcpc_dev->pd_last_vdm_msg;
+ pd_event_t *vdm_event = &tcpc_dev->pd_vdm_event;
+
+ mutex_lock(&tcpc_dev->access_lock);
+
+ vdm_event->event_type = PD_EVT_HW_MSG;
+ vdm_event->msg = PD_HW_RETRY_VDM;
+
+ if (tcpc_dev->pd_pending_vdm_event)
+ __pd_free_event(tcpc_dev, &tcpc_dev->pd_vdm_event);
+
+ vdm_event->pd_msg = __pd_alloc_msg(tcpc_dev);
+
+ if (!vdm_event->pd_msg) {
+ mutex_unlock(&tcpc_dev->access_lock);
+ return false;
+ }
+
+ *vdm_event->pd_msg = *pd_msg;
+ tcpc_dev->pd_pending_vdm_event = true;
+ tcpc_dev->pd_postpone_vdm_timeout = true;
+
+#ifdef CONFIG_USB_PD_POSTPONE_RETRY_VDM
+ reset_pe_vdm_state(&tcpc_dev->pd_port, pd_msg->payload[0]);
+ postpone_vdm_event(tcpc_dev);
+#else
+ atomic_inc(&tcpc_dev->pending_event); /* do not really wake up process*/
+ wake_up_interruptible(&tcpc_dev->event_loop_wait_que);
+#endif /* CONFIG_USB_PD_POSTPONE_RETRY_VDM */
+
+ mutex_unlock(&tcpc_dev->access_lock);
+ return true;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static void __pd_event_buf_reset(struct tcpc_device *tcpc_dev)
+{
+ pd_event_t pd_event;
+
+ tcpc_dev->pd_hard_reset_event_pending = false;
+ while (__pd_get_event(tcpc_dev, &pd_event))
+ __pd_free_event(tcpc_dev, &pd_event);
+
+ if (tcpc_dev->pd_pending_vdm_event) {
+ __pd_free_event(tcpc_dev, &tcpc_dev->pd_vdm_event);
+ tcpc_dev->pd_pending_vdm_event = false;
+ }
+
+ tcpc_dev->pd_pending_vdm_good_crc = false;
+}
+
+void pd_event_buf_reset(struct tcpc_device *tcpc_dev)
+{
+ mutex_lock(&tcpc_dev->access_lock);
+ __pd_event_buf_reset(tcpc_dev);
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+/*----------------------------------------------------------------------------*/
+
+static inline bool __pd_put_hw_event(
+ struct tcpc_device *tcpc_dev, u8 hw_event)
+{
+ pd_event_t evt = {
+ .event_type = PD_EVT_HW_MSG,
+ .msg = hw_event,
+ .pd_msg = NULL,
+ };
+
+ return __pd_put_event(tcpc_dev, &evt, false);
+}
+
+static inline bool __pd_put_pe_event(
+ struct tcpc_device *tcpc_dev, u8 pe_event)
+{
+ pd_event_t evt = {
+ .event_type = PD_EVT_PE_MSG,
+ .msg = pe_event,
+ .pd_msg = NULL,
+ };
+
+ return __pd_put_event(tcpc_dev, &evt, false);
+}
+
+void pd_put_cc_detached_event(struct tcpc_device *tcpc_dev)
+{
+ mutex_lock(&tcpc_dev->access_lock);
+
+ __pd_event_buf_reset(tcpc_dev);
+ __pd_put_hw_event(tcpc_dev, PD_HW_CC_DETACHED);
+
+ tcpc_dev->pd_wait_pe_idle = true;
+ tcpc_dev->pd_wait_pr_swap_complete = false;
+ tcpc_dev->pd_wait_hard_reset_complete = false;
+ tcpc_dev->pd_hard_reset_event_pending = false;
+ tcpc_dev->pd_wait_vbus_once = PD_WAIT_VBUS_DISABLE;
+ tcpc_dev->pd_bist_mode = PD_BIST_MODE_DISABLE;
+ tcpc_dev->pd_ping_event_pending = false;
+
+#ifdef CONFIG_USB_PD_RETRY_CRC_DISCARD
+ tcpc_dev->pd_discard_pending = false;
+#endif
+
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_put_recv_hard_reset_event(struct tcpc_device *tcpc_dev)
+{
+ mutex_lock(&tcpc_dev->access_lock);
+
+ tcpc_dev->pd_transmit_state = PD_TX_STATE_HARD_RESET;
+
+ if ((!tcpc_dev->pd_hard_reset_event_pending) &&
+ (!tcpc_dev->pd_wait_pe_idle)) {
+ __pd_event_buf_reset(tcpc_dev);
+ __pd_put_hw_event(tcpc_dev, PD_HW_RECV_HARD_RESET);
+ tcpc_dev->pd_bist_mode = PD_BIST_MODE_DISABLE;
+ tcpc_dev->pd_hard_reset_event_pending = true;
+ tcpc_dev->pd_ping_event_pending = false;
+ }
+
+#ifdef CONFIG_USB_PD_RETRY_CRC_DISCARD
+ tcpc_dev->pd_discard_pending = false;
+#endif
+
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_put_sent_hard_reset_event(struct tcpc_device *tcpc_dev)
+{
+ mutex_lock(&tcpc_dev->access_lock);
+ if (tcpc_dev->pd_wait_hard_reset_complete) {
+ tcpc_dev->pd_transmit_state = PD_TX_STATE_GOOD_CRC;
+ __pd_event_buf_reset(tcpc_dev);
+ __pd_put_pe_event(tcpc_dev, PD_PE_HARD_RESET_COMPLETED);
+ } else {
+ TCPC_DBG("[HReset] Unattached\r\n");
+ }
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+bool pd_put_pd_msg_event(struct tcpc_device *tcpc_dev, pd_msg_t *pd_msg)
+{
+ u32 cnt, cmd;
+
+#ifdef CONFIG_USB_PD_RETRY_CRC_DISCARD
+ bool discard_pending = false;
+#endif
+
+ pd_event_t evt = {
+ .event_type = PD_EVT_PD_MSG,
+ .pd_msg = pd_msg,
+ };
+
+ cnt = PD_HEADER_CNT(pd_msg->msg_hdr);
+ cmd = PD_HEADER_TYPE(pd_msg->msg_hdr);
+
+ /* bist mode */
+ mutex_lock(&tcpc_dev->access_lock);
+ if (tcpc_dev->pd_bist_mode != PD_BIST_MODE_DISABLE) {
+ TCPC_DBG("BIST_MODE_RX\r\n");
+ __pd_free_event(tcpc_dev, &evt);
+ mutex_unlock(&tcpc_dev->access_lock);
+ return 0;
+ }
+
+#ifdef CONFIG_USB_PD_RETRY_CRC_DISCARD
+ if (tcpc_dev->pd_discard_pending &&
+ (pd_msg->frame_type == TCPC_TX_SOP) &&
+ (tcpc_dev->tcpc_flags & TCPC_FLAGS_RETRY_CRC_DISCARD)) {
+ discard_pending = true;
+ tcpc_dev->pd_discard_pending = false;
+
+ if ((cmd == PD_CTRL_GOOD_CRC) && (cnt == 0)) {
+ TCPC_DBG("RETRANSMIT\r\n");
+ __pd_free_event(tcpc_dev, &evt);
+ mutex_unlock(&tcpc_dev->access_lock);
+
+ /* TODO: check it later */
+ tcpc_disable_timer(tcpc_dev, PD_TIMER_DISCARD);
+ tcpci_retransmit(tcpc_dev);
+ return 0;
+ }
+ }
+#endif
+
+#ifdef CONFIG_USB_PD_DROP_REPEAT_PING
+ if (cnt == 0 && cmd == PD_CTRL_PING) {
+ if (tcpc_dev->pd_ping_event_pending) {
+ TCPC_DBG("PING\r\n");
+ __pd_free_event(tcpc_dev, &evt);
+ mutex_unlock(&tcpc_dev->access_lock);
+ return 0;
+ }
+
+ tcpc_dev->pd_ping_event_pending = true;
+ }
+#endif
+
+ if (cnt != 0 && cmd == PD_DATA_BIST)
+ tcpc_dev->pd_bist_mode = PD_BIST_MODE_EVENT_PENDING;
+
+ mutex_unlock(&tcpc_dev->access_lock);
+
+#ifdef CONFIG_USB_PD_RETRY_CRC_DISCARD
+ if (discard_pending) {
+ tcpc_disable_timer(tcpc_dev, PD_TIMER_DISCARD);
+ pd_put_hw_event(tcpc_dev, PD_HW_TX_FAILED);
+ }
+#endif
+
+ if (cnt != 0 && cmd == PD_DATA_VENDOR_DEF)
+ return pd_put_vdm_event(tcpc_dev, &evt, true);
+ return pd_put_event(tcpc_dev, &evt, true);
+}
+
+static void pd_report_vbus_present(struct tcpc_device *tcpc_dev)
+{
+ tcpc_dev->pd_wait_vbus_once = PD_WAIT_VBUS_DISABLE;
+ __pd_put_hw_event(tcpc_dev, PD_HW_VBUS_PRESENT);
+}
+
+void pd_put_vbus_changed_event(struct tcpc_device *tcpc_dev, bool from_ic)
+{
+ int vbus_valid;
+ bool postpone_vbus_present = false;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ vbus_valid = tcpci_check_vbus_valid(tcpc_dev);
+
+ switch (tcpc_dev->pd_wait_vbus_once) {
+ case PD_WAIT_VBUS_VALID_ONCE:
+ if (vbus_valid) {
+#if CONFIG_USB_PD_VBUS_PRESENT_TOUT
+ postpone_vbus_present = from_ic;
+#endif /* CONFIG_USB_PD_VBUS_PRESENT_TOUT */
+ if (!postpone_vbus_present)
+ pd_report_vbus_present(tcpc_dev);
+ }
+ break;
+
+ case PD_WAIT_VBUS_INVALID_ONCE:
+ if (!vbus_valid) {
+ tcpc_dev->pd_wait_vbus_once = PD_WAIT_VBUS_DISABLE;
+ __pd_put_hw_event(tcpc_dev, PD_HW_VBUS_ABSENT);
+ }
+ break;
+ }
+ mutex_unlock(&tcpc_dev->access_lock);
+
+#if CONFIG_USB_PD_VBUS_PRESENT_TOUT
+ if (postpone_vbus_present)
+ tcpc_enable_timer(tcpc_dev, PD_TIMER_VBUS_PRESENT);
+#endif /* CONFIG_USB_PD_VBUS_PRESENT_TOUT */
+}
+
+void pd_put_vbus_safe0v_event(struct tcpc_device *tcpc_dev)
+{
+#ifdef CONFIG_USB_PD_SAFE0V_TIMEOUT
+ tcpc_disable_timer(tcpc_dev, PD_TIMER_VSAFE0V_TOUT);
+#endif /* CONFIG_USB_PD_SAFE0V_TIMEOUT */
+
+ mutex_lock(&tcpc_dev->access_lock);
+ if (tcpc_dev->pd_wait_vbus_once == PD_WAIT_VBUS_SAFE0V_ONCE) {
+ tcpc_dev->pd_wait_vbus_once = PD_WAIT_VBUS_DISABLE;
+ __pd_put_hw_event(tcpc_dev, PD_HW_VBUS_SAFE0V);
+ }
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_put_vbus_stable_event(struct tcpc_device *tcpc_dev)
+{
+ mutex_lock(&tcpc_dev->access_lock);
+ if (tcpc_dev->pd_wait_vbus_once == PD_WAIT_VBUS_STABLE_ONCE) {
+ tcpc_dev->pd_wait_vbus_once = PD_WAIT_VBUS_DISABLE;
+ __pd_put_hw_event(tcpc_dev, PD_HW_VBUS_STABLE);
+ }
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_put_vbus_present_event(struct tcpc_device *tcpc_dev)
+{
+ mutex_lock(&tcpc_dev->access_lock);
+ pd_report_vbus_present(tcpc_dev);
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+/* ---- PD Notify TCPC ---- */
+
+void pd_try_put_pe_idle_event(pd_port_t *pd_port)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ if (tcpc_dev->pd_transmit_state <= PD_TX_STATE_WAIT)
+ __pd_put_pe_event(tcpc_dev, PD_PE_IDLE);
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_notify_pe_idle(pd_port_t *pd_port)
+{
+ bool notify_pe_idle = false;
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ if (tcpc_dev->pd_wait_pe_idle) {
+ notify_pe_idle = true;
+ tcpc_dev->pd_wait_pe_idle = false;
+ }
+
+ tcpc_dev->pd_wait_error_recovery = false;
+ mutex_unlock(&tcpc_dev->access_lock);
+
+ pd_update_connect_state(pd_port, PD_CONNECT_NONE);
+
+ if (notify_pe_idle)
+ tcpc_enable_timer(tcpc_dev, TYPEC_RT_TIMER_PE_IDLE);
+}
+
+void pd_notify_pe_wait_vbus_once(pd_port_t *pd_port, int wait_evt)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ tcpc_dev->pd_wait_vbus_once = wait_evt;
+ mutex_unlock(&tcpc_dev->access_lock);
+
+ switch (wait_evt) {
+ case PD_WAIT_VBUS_VALID_ONCE:
+ case PD_WAIT_VBUS_INVALID_ONCE:
+ pd_put_vbus_changed_event(tcpc_dev, false);
+ break;
+ case PD_WAIT_VBUS_SAFE0V_ONCE:
+#ifdef CONFIG_TCPC_VSAFE0V_DETECT
+ if (tcpci_check_vsafe0v(tcpc_dev, true)) {
+ pd_put_vbus_safe0v_event(tcpc_dev);
+ break;
+ }
+#else
+ pd_enable_timer(pd_port, PD_TIMER_VSAFE0V_DELAY);
+#endif /* CONFIG_TCPC_VSAFE0V_DETECT */
+
+#ifdef CONFIG_USB_PD_SAFE0V_TIMEOUT
+ pd_enable_timer(pd_port, PD_TIMER_VSAFE0V_TOUT);
+#endif /* CONFIG_USB_PD_SAFE0V_TIMEOUT */
+ break;
+ }
+}
+
+void pd_notify_pe_error_recovery(pd_port_t *pd_port)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ tcpc_dev->pd_wait_hard_reset_complete = false;
+ tcpc_dev->pd_wait_pr_swap_complete = false;
+ tcpc_dev->pd_wait_error_recovery = true;
+ mutex_unlock(&tcpc_dev->access_lock);
+
+ tcpci_set_cc(pd_port->tcpc_dev, TYPEC_CC_OPEN);
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_ERROR_RECOVERY);
+}
+
+void pd_notify_pe_transit_to_default(pd_port_t *pd_port)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ tcpc_dev->pd_hard_reset_event_pending = false;
+ tcpc_dev->pd_wait_hard_reset_complete = true;
+ tcpc_dev->pd_wait_pr_swap_complete = false;
+ tcpc_dev->pd_bist_mode = PD_BIST_MODE_DISABLE;
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_notify_pe_hard_reset_completed(pd_port_t *pd_port)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ if (!tcpc_dev->pd_wait_hard_reset_complete)
+ return;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ tcpc_dev->pd_wait_hard_reset_complete = false;
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_notify_pe_send_hard_reset(pd_port_t *pd_port)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ tcpc_dev->pd_transmit_state = PD_TX_STATE_WAIT_HARD_RESET;
+ tcpc_dev->pd_wait_hard_reset_complete = true;
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_notify_pe_execute_pr_swap(pd_port_t *pd_port, bool start_swap)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ pd_port->during_swap = start_swap;
+ mutex_lock(&tcpc_dev->access_lock);
+ tcpc_dev->pd_wait_pr_swap_complete = true;
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_notify_pe_cancel_pr_swap(pd_port_t *pd_port)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ if (!tcpc_dev->pd_wait_pr_swap_complete)
+ return;
+
+ pd_port->during_swap = false;
+ mutex_lock(&tcpc_dev->access_lock);
+ tcpc_dev->pd_wait_pr_swap_complete = false;
+ mutex_unlock(&tcpc_dev->access_lock);
+
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_PDDEBOUNCE);
+}
+
+void pd_notify_pe_reset_protocol(pd_port_t *pd_port)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ tcpc_dev->pd_wait_pr_swap_complete = false;
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_noitfy_pe_bist_mode(pd_port_t *pd_port, u8 mode)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ tcpc_dev->pd_bist_mode = mode;
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_notify_pe_recv_ping_event(pd_port_t *pd_port)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ tcpc_dev->pd_ping_event_pending = false;
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_notify_pe_transmit_msg(
+ pd_port_t *pd_port, u8 type)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ tcpc_dev->pd_transmit_state = type;
+ mutex_unlock(&tcpc_dev->access_lock);
+}
+
+void pd_notify_pe_pr_changed(pd_port_t *pd_port)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ /* Check mutex later, actually,
+ * typec layer will igrone all cc-change during PR-SWAP
+ */
+
+ /* mutex_lock(&tcpc_dev->access_lock); */
+ tcpc_typec_handle_pe_pr_swap(tcpc_dev);
+ /* mutex_unlock(&tcpc_dev->access_lock); */
+}
+
+void pd_notify_pe_src_explicit_contract(pd_port_t *pd_port)
+{
+ struct tcpc_device *tcpc_dev = pd_port->tcpc_dev;
+
+ if (pd_port->explicit_contract)
+ return;
+
+ /*mutex_lock(&tcpc_dev->access_lock); */
+ tcpc_typec_advertise_explicit_contract(tcpc_dev);
+ /*mutex_unlock(&tcpc_dev->access_lock); */
+}
+
+/* ---- init ---- */
+static int tcpc_event_thread(void *param)
+{
+ struct tcpc_device *tcpc_dev = param;
+ struct sched_param sch_param = {.sched_priority = MAX_RT_PRIO - 2};
+
+ sched_setscheduler(current, SCHED_FIFO, &sch_param);
+
+ while (true) {
+ wait_event_interruptible(
+ tcpc_dev->event_loop_wait_que,
+ atomic_read(&tcpc_dev->pending_event) |
+ tcpc_dev->event_loop_thead_stop);
+ if (kthread_should_stop() || tcpc_dev->event_loop_thead_stop)
+ break;
+ do {
+ atomic_dec_if_positive(&tcpc_dev->pending_event);
+ } while (pd_policy_engine_run(tcpc_dev));
+ }
+
+ return 0;
+}
+
+int tcpci_event_init(struct tcpc_device *tcpc_dev)
+{
+ tcpc_dev->event_task = kthread_create(tcpc_event_thread, tcpc_dev,
+ "tcpc_event_%s.%p", dev_name(&tcpc_dev->dev), tcpc_dev);
+ tcpc_dev->event_loop_thead_stop = false;
+
+ init_waitqueue_head(&tcpc_dev->event_loop_wait_que);
+ atomic_set(&tcpc_dev->pending_event, 0);
+ wake_up_process(tcpc_dev->event_task);
+
+ return 0;
+}
+
+int tcpci_event_deinit(struct tcpc_device *tcpc_dev)
+{
+ tcpc_dev->event_loop_thead_stop = true;
+ wake_up_interruptible(&tcpc_dev->event_loop_wait_que);
+ kthread_stop(tcpc_dev->event_task);
+ return 0;
+}
diff --git a/drivers/usb/pd/richtek/tcpci_timer.c b/drivers/usb/pd/richtek/tcpci_timer.c
new file mode 100644
index 000000000000..521e7ab29b47
--- /dev/null
+++ b/drivers/usb/pd/richtek/tcpci_timer.c
@@ -0,0 +1,957 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * TCPC Interface for timer handler
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <linux/kthread.h>
+#include <linux/hrtimer.h>
+#include <linux/version.h>
+#include <linux/sched/rt.h>
+
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_timer.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_typec.h>
+#include <linux/hisi/usb/pd/richtek/rt1711h.h>
+
+#define RT_MASK64(i) (((u64)1) << i)
+
+#define TIMEOUT_VAL(val) (val * 1000)
+#define TIMEOUT_RANGE(min, max) ((min * 4000 + max * 1000) / 5)
+#define TIMEOUT_VAL_US(val) (val)
+
+/* Debug message Macro */
+#if TCPC_TIMER_DBG_EN
+#define TCPC_TIMER_DBG(tcpc, id) \
+{ \
+ RT_DBG_INFO("Trigger %s\n", tcpc_timer_name[id]); \
+}
+#else
+#define TCPC_TIMER_DBG(format, args...)
+#endif /* TCPC_TIMER_DBG_EN */
+
+#if TCPC_TIMER_INFO_EN
+#define TCPC_TIMER_EN_DBG(tcpc, id) \
+{ \
+ RT_DBG_INFO("Enable %s\n", tcpc_timer_name[id]); \
+}
+#else
+#define TCPC_TIMER_EN_DBG(format, args...)
+#endif /* TCPC_TIMER_INFO_EN */
+
+static inline u64 rt_get_value(u64 *p)
+{
+ unsigned long flags;
+ u64 data;
+
+ raw_local_irq_save(flags);
+ data = *p;
+ raw_local_irq_restore(flags);
+ return data;
+}
+
+static inline void rt_set_value(u64 *p, u64 data)
+{
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ *p = data;
+ raw_local_irq_restore(flags);
+}
+
+static inline void rt_clear_bit(int nr, u64 *addr)
+{
+ u64 mask = ((u64)1) << nr;
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ *addr &= ~mask;
+ raw_local_irq_restore(flags);
+}
+
+static inline void rt_set_bit(int nr, u64 *addr)
+{
+ u64 mask = ((u64)1) << nr;
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ *addr |= mask;
+ raw_local_irq_restore(flags);
+}
+
+const char *tcpc_timer_name[] = {
+#ifdef CONFIG_USB_POWER_DELIVERY
+ "PD_TIMER_BIST_CONT_MODE",
+ "PD_TIMER_DISCOVER_ID",
+ "PD_TIMER_HARD_RESET_COMPLETE",
+ "PD_TIMER_NO_RESPONSE",
+ "PD_TIMER_PS_HARD_RESET",
+ "PD_TIMER_PS_SOURCE_OFF",
+ "PD_TIMER_PS_SOURCE_ON",
+ "PD_TIMER_PS_TRANSITION",
+ "PD_TIMER_SENDER_RESPONSE",
+ "PD_TIMER_SINK_ACTIVITY",
+ "PD_TIMER_SINK_REQUEST",
+ "PD_TIMER_SINK_WAIT_CAP",
+ "PD_TIMER_SOURCE_ACTIVITY",
+ "PD_TIMER_SOURCE_CAPABILITY",
+ "PD_TIMER_SOURCE_START",
+ "PD_TIMER_VCONN_ON",
+ "PD_TIMER_VDM_MODE_ENTRY",
+ "PD_TIMER_VDM_MODE_EXIT",
+ "PD_TIMER_VDM_RESPONSE",
+ "PD_TIMER_SOURCE_TRANSITION",
+ "PD_TIMER_SRC_RECOVER",
+ "PD_TIMER_VSAFE0V_DELAY",
+ "PD_TIMER_VSAFE0V_TOUT",
+ "PD_TIMER_DISCARD",
+ "PD_TIMER_VBUS_STABLE",
+ "PD_TIMER_VBUS_PRESENT",
+ "PD_PE_VDM_POSTPONE",
+
+ "TYPEC_RT_TIMER_PE_IDLE",
+ "TYPEC_RT_TIMER_SAFE0V_DELAY",
+ "TYPEC_RT_TIMER_SAFE0V_TOUT",
+
+ "TYPEC_TRY_TIMER_DRP_TRY",
+ "TYPEC_TRY_TIMER_DRP_TRYWAIT",
+
+ "TYPEC_TIMER_CCDEBOUNCE",
+ "TYPEC_TIMER_PDDEBOUNCE",
+ "TYPEC_TIMER_ERROR_RECOVERY",
+ "TYPEC_TIMER_WAKEUP_TOUT",
+ "TYPEC_TIMER_DRP_SRC_TOGGLE",
+#else
+ "TYPEC_RT_TIMER_SAFE0V_DELAY",
+ "TYPEC_RT_TIMER_SAFE0V_TOUT",
+
+ "TYPEC_TRY_TIMER_DRP_TRY",
+ "TYPEC_TRY_TIMER_DRP_TRYWAIT",
+
+ "TYPEC_TIMER_CCDEBOUNCE",
+ "TYPEC_TIMER_PDDEBOUNCE",
+ "TYPEC_TIMER_WAKEUP_TOUT",
+ "TYPEC_TIMER_DRP_SRC_TOGGLE",
+#endif /* CONFIG_USB_POWER_DELIVERY */
+};
+
+#define PD_TIMER_VSAFE0V_DLY_TOUT TIMEOUT_VAL(400)
+
+#ifdef CONFIG_TCPC_VSAFE0V_DETECT
+#define TYPEC_RT_TIMER_SAFE0V_DLY_TOUT TIMEOUT_VAL(35)
+#else
+#define TYPEC_RT_TIMER_SAFE0V_DLY_TOUT TIMEOUT_VAL(100)
+#endif
+
+static const u32 tcpc_timer_timeout[PD_TIMER_NR] = {
+#ifdef CONFIG_USB_POWER_DELIVERY
+ TIMEOUT_RANGE(30, 60), /* PD_TIMER_BIST_CONT_MODE */
+ TIMEOUT_RANGE(40, 50), /* PD_TIMER_DISCOVER_ID */
+ TIMEOUT_RANGE(4, 5), /* PD_TIMER_HARD_RESET_COMPLETE (no used) */
+ TIMEOUT_RANGE(4500, 5500), /* PD_TIMER_NO_RESPONSE */
+ TIMEOUT_RANGE(25, 35), /* PD_TIMER_PS_HARD_RESET */
+ TIMEOUT_RANGE(750, 920), /* PD_TIMER_PS_SOURCE_OFF */
+ TIMEOUT_RANGE(390, 480), /* PD_TIMER_PS_SOURCE_ON, */
+ TIMEOUT_RANGE(450, 550), /* PD_TIMER_PS_TRANSITION */
+ TIMEOUT_RANGE(24, 30), /* PD_TIMER_SENDER_RESPONSE */
+ TIMEOUT_RANGE(120, 150), /* PD_TIMER_SINK_ACTIVITY (no used) */
+ TIMEOUT_RANGE(100, 100), /* PD_TIMER_SINK_REQUEST */
+ TIMEOUT_RANGE(310, 620), /* PD_TIMER_SINK_WAIT_CAP */
+ TIMEOUT_RANGE(40, 50), /* PD_TIMER_SOURCE_ACTIVITY (no used) */
+ TIMEOUT_RANGE(100, 200), /* PD_TIMER_SOURCE_CAPABILITY */
+ TIMEOUT_VAL(20), /* PD_TIMER_SOURCE_START */
+ TIMEOUT_VAL(100), /* PD_TIMER_VCONN_ON */
+ TIMEOUT_RANGE(40, 50), /* PD_TIMER_VDM_MODE_ENTRY */
+ TIMEOUT_RANGE(40, 50), /* PD_TIMER_VDM_MODE_EXIT */
+ TIMEOUT_RANGE(24, 30), /* PD_TIMER_VDM_RESPONSE */
+ TIMEOUT_RANGE(25, 35), /* PD_TIMER_SOURCE_TRANSITION */
+ TIMEOUT_RANGE(660, 1000), /* PD_TIMER_SRC_RECOVER */
+
+ /* PD_TIMER (out of spec) */
+ PD_TIMER_VSAFE0V_DLY_TOUT, /* PD_TIMER_VSAFE0V_DELAY */
+ TIMEOUT_VAL(650), /* PD_TIMER_VSAFE0V_TOUT */
+ TIMEOUT_VAL(3), /* PD_TIMER_DISCARD */
+ /* PD_TIMER_VBUS_STABLE */
+ TIMEOUT_VAL(CONFIG_USB_PD_VBUS_STABLE_TOUT),
+ /* PD_TIMER_VBUS_PRESENT */
+ TIMEOUT_VAL(CONFIG_USB_PD_VBUS_PRESENT_TOUT),
+ TIMEOUT_VAL_US(3500), /* PD_PE_VDM_POSTPONE */
+
+ /* TYPEC-RT-TIMER */
+ TIMEOUT_VAL(1), /* TYPEC_RT_TIMER_PE_IDLE */
+ TYPEC_RT_TIMER_SAFE0V_DLY_TOUT, /* TYPEC_RT_TIMER_SAFE0V_DELAY */
+ TIMEOUT_VAL(650), /* TYPEC_RT_TIMER_SAFE0V_TOUT */
+
+ /* TYPEC-TRY-TIMER */
+ TIMEOUT_RANGE(75, 150), /* TYPEC_TRY_TIMER_DRP_TRY */
+ TIMEOUT_RANGE(400, 800), /* TYPEC_TRY_TIMER_DRP_TRYWAIT */
+
+ /* TYPEC-DEBOUNCE-TIMER */
+ TIMEOUT_RANGE(100, 200), /* TYPEC_TIMER_CCDEBOUNCE */
+ TIMEOUT_RANGE(10, 10), /* TYPEC_TIMER_PDDEBOUNCE */
+ TIMEOUT_RANGE(25, 25), /* TYPEC_TIMER_ERROR_RECOVERY */
+ /* TYPEC_TIMER_WAKEUP_TOUT (out of spec) */
+ TIMEOUT_VAL(300 * 1000),
+ TIMEOUT_VAL(60), /* TYPEC_TIMER_DRP_SRC_TOGGLE */
+#else
+ /* TYPEC-RT-TIMER */
+ TYPEC_RT_TIMER_SAFE0V_DLY_TOUT, /* TYPEC_RT_TIMER_SAFE0V_DELAY */
+ TIMEOUT_VAL(650), /* TYPEC_RT_TIMER_SAFE0V_TOUT */
+
+ /* TYPEC-TRY-TIMER */
+ TIMEOUT_RANGE(75, 150), /* TYPEC_TRY_TIMER_DRP_TRY */
+ TIMEOUT_RANGE(400, 800), /* TYPEC_TRY_TIMER_DRP_TRYWAIT */
+
+ TIMEOUT_RANGE(100, 200), /* TYPEC_TIMER_CCDEBOUNCE */
+ TIMEOUT_RANGE(10, 10), /* TYPEC_TIMER_PDDEBOUNCE */
+ TYPEC_TIMER_SAFE0V_TOUT, /* TYPEC_TIMER_SAFE0V (out of spec) */
+ /* TYPEC_TIMER_WAKEUP_TOUT (out of spec) */
+ TIMEOUT_VAL(300 * 1000),
+ TIMEOUT_VAL(60), /* TYPEC_TIMER_DRP_SRC_TOGGLE */
+#endif /* CONFIG_USB_POWER_DELIVERY */
+};
+
+typedef enum hrtimer_restart (*tcpc_hrtimer_call)(struct hrtimer *timer);
+
+static inline void on_pe_timer_timeout(
+ struct tcpc_device *tcpc_dev, u32 timer_id)
+{
+#ifdef CONFIG_USB_POWER_DELIVERY
+ pd_event_t pd_event;
+
+ pd_event.event_type = PD_EVT_TIMER_MSG;
+ pd_event.msg = timer_id;
+ pd_event.pd_msg = NULL;
+
+ switch (timer_id) {
+ case PD_TIMER_VDM_MODE_ENTRY:
+ case PD_TIMER_VDM_MODE_EXIT:
+ case PD_TIMER_VDM_RESPONSE:
+ pd_put_vdm_event(tcpc_dev, &pd_event, false);
+ break;
+
+ case PD_TIMER_VSAFE0V_DELAY:
+ pd_put_vbus_safe0v_event(tcpc_dev);
+ break;
+
+#ifdef CONFIG_USB_PD_SAFE0V_TIMEOUT
+ case PD_TIMER_VSAFE0V_TOUT:
+ {
+ u16 power_status = 0;
+ int vbus_level = tcpc_dev->vbus_level;
+
+ tcpci_get_power_status(tcpc_dev, &power_status);
+ tcpci_vbus_level_init(tcpc_dev, power_status);
+
+ TCPC_INFO("VSafe0V TOUT: %d - %d\r\n",
+ tcpc_dev->vbus_level, vbus_level);
+ }
+ pd_put_vbus_safe0v_event(tcpc_dev);
+ break;
+#endif /* CONFIG_USB_PD_SAFE0V_TIMEOUT */
+
+#ifdef CONFIG_USB_PD_RETRY_CRC_DISCARD
+ case PD_TIMER_DISCARD:
+ tcpc_dev->pd_discard_pending = false;
+ pd_put_hw_event(tcpc_dev, PD_HW_TX_FAILED);
+ break;
+#endif /* CONFIG_USB_PD_RETRY_CRC_DISCARD */
+
+#if CONFIG_USB_PD_VBUS_STABLE_TOUT
+ case PD_TIMER_VBUS_STABLE:
+ pd_put_vbus_stable_event(tcpc_dev);
+ break;
+#endif /* CONFIG_USB_PD_VBUS_STABLE_TOUT */
+
+#if CONFIG_USB_PD_VBUS_PRESENT_TOUT
+ case PD_TIMER_VBUS_PRESENT:
+ pd_put_vbus_present_event(tcpc_dev);
+ break;
+#endif /* CONFIG_USB_PD_VBUS_PRESENT_TOUT */
+
+ case PD_PE_VDM_POSTPONE:
+ tcpc_dev->pd_postpone_vdm_timeout = true;
+ atomic_inc(&tcpc_dev->pending_event);
+ wake_up_interruptible(&tcpc_dev->event_loop_wait_que);
+ break;
+
+ default:
+ pd_put_event(tcpc_dev, &pd_event, false);
+ break;
+ }
+#endif
+
+ tcpc_disable_timer(tcpc_dev, timer_id);
+}
+
+#define TCPC_TIMER_TRIGGER() do \
+{ \
+ down(&tcpc_dev->timer_tick_lock); \
+ rt_set_bit(index, (u64 *)&tcpc_dev->timer_tick); \
+ up(&tcpc_dev->timer_tick_lock); \
+ wake_up_interruptible(&tcpc_dev->timer_wait_que); \
+} while (0)
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+static enum hrtimer_restart tcpc_timer_bist_cont_mode(struct hrtimer *timer)
+{
+ int index = PD_TIMER_BIST_CONT_MODE;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_discover_id(struct hrtimer *timer)
+{
+ int index = PD_TIMER_DISCOVER_ID;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_hard_reset_complete(
+ struct hrtimer *timer)
+{
+ int index = PD_TIMER_HARD_RESET_COMPLETE;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_no_response(struct hrtimer *timer)
+{
+ int index = PD_TIMER_NO_RESPONSE;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_ps_hard_reset(struct hrtimer *timer)
+{
+ int index = PD_TIMER_PS_HARD_RESET;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_ps_source_off(struct hrtimer *timer)
+{
+ int index = PD_TIMER_PS_SOURCE_OFF;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_ps_source_on(struct hrtimer *timer)
+{
+ int index = PD_TIMER_PS_SOURCE_ON;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_ps_transition(struct hrtimer *timer)
+{
+ int index = PD_TIMER_PS_TRANSITION;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_sender_response(struct hrtimer *timer)
+{
+ int index = PD_TIMER_SENDER_RESPONSE;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_sink_activity(struct hrtimer *timer)
+{
+ int index = PD_TIMER_SINK_ACTIVITY;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_sink_request(struct hrtimer *timer)
+{
+ int index = PD_TIMER_SINK_REQUEST;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_sink_wait_cap(struct hrtimer *timer)
+{
+ int index = PD_TIMER_SINK_WAIT_CAP;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_source_activity(struct hrtimer *timer)
+{
+ int index = PD_TIMER_SOURCE_ACTIVITY;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_source_capability(struct hrtimer *timer)
+{
+ int index = PD_TIMER_SOURCE_CAPABILITY;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_source_start(struct hrtimer *timer)
+{
+ int index = PD_TIMER_SOURCE_START;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_vconn_on(struct hrtimer *timer)
+{
+ int index = PD_TIMER_VCONN_ON;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_vdm_mode_entry(struct hrtimer *timer)
+{
+ int index = PD_TIMER_VDM_MODE_ENTRY;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_vdm_mode_exit(struct hrtimer *timer)
+{
+ int index = PD_TIMER_VDM_MODE_EXIT;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_vdm_response(struct hrtimer *timer)
+{
+ int index = PD_TIMER_VDM_RESPONSE;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_source_transition(struct hrtimer *timer)
+{
+ int index = PD_TIMER_SOURCE_TRANSITION;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_src_recover(struct hrtimer *timer)
+{
+ int index = PD_TIMER_SRC_RECOVER;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_vsafe0v_delay(struct hrtimer *timer)
+{
+ int index = PD_TIMER_VSAFE0V_DELAY;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_vsafe0v_tout(struct hrtimer *timer)
+{
+ int index = PD_TIMER_VSAFE0V_TOUT;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_error_recovery(struct hrtimer *timer)
+{
+ int index = TYPEC_TIMER_ERROR_RECOVERY;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_pd_discard(struct hrtimer *timer)
+{
+ int index = PD_TIMER_DISCARD;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_vbus_stable(struct hrtimer *timer)
+{
+ int index = PD_TIMER_VBUS_STABLE;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_vbus_present(struct hrtimer *timer)
+{
+ int index = PD_TIMER_VBUS_PRESENT;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart pd_pe_vdm_postpone_timeout(struct hrtimer *timer)
+{
+ int index = PD_PE_VDM_POSTPONE;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_rt_pe_idle(struct hrtimer *timer)
+{
+ int index = TYPEC_RT_TIMER_PE_IDLE;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+static enum hrtimer_restart tcpc_timer_rt_vsafe0v_delay(struct hrtimer *timer)
+{
+ int index = TYPEC_RT_TIMER_SAFE0V_DELAY;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_rt_vsafe0v_tout(struct hrtimer *timer)
+{
+ int index = TYPEC_RT_TIMER_SAFE0V_TOUT;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_try_drp_try(struct hrtimer *timer)
+{
+ int index = TYPEC_TRY_TIMER_DRP_TRY;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_try_drp_trywait(struct hrtimer *timer)
+{
+ int index = TYPEC_TRY_TIMER_DRP_TRYWAIT;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_ccdebounce(struct hrtimer *timer)
+{
+ int index = TYPEC_TIMER_CCDEBOUNCE;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_pddebounce(struct hrtimer *timer)
+{
+ int index = TYPEC_TIMER_PDDEBOUNCE;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_wakeup(struct hrtimer *timer)
+{
+ int index = TYPEC_TIMER_WAKEUP;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart tcpc_timer_drp_src_toggle(struct hrtimer *timer)
+{
+ int index = TYPEC_TIMER_DRP_SRC_TOGGLE;
+ struct tcpc_device *tcpc_dev =
+ container_of(timer, struct tcpc_device, tcpc_timer[index]);
+
+ TCPC_TIMER_TRIGGER();
+ return HRTIMER_NORESTART;
+}
+
+static tcpc_hrtimer_call tcpc_timer_call[PD_TIMER_NR] = {
+#ifdef CONFIG_USB_POWER_DELIVERY
+ [PD_TIMER_BIST_CONT_MODE] = tcpc_timer_bist_cont_mode,
+ [PD_TIMER_DISCOVER_ID] = tcpc_timer_discover_id,
+ [PD_TIMER_HARD_RESET_COMPLETE] = tcpc_timer_hard_reset_complete,
+ [PD_TIMER_NO_RESPONSE] = tcpc_timer_no_response,
+ [PD_TIMER_PS_HARD_RESET] = tcpc_timer_ps_hard_reset,
+ [PD_TIMER_PS_SOURCE_OFF] = tcpc_timer_ps_source_off,
+ [PD_TIMER_PS_SOURCE_ON] = tcpc_timer_ps_source_on,
+ [PD_TIMER_PS_TRANSITION] = tcpc_timer_ps_transition,
+ [PD_TIMER_SENDER_RESPONSE] = tcpc_timer_sender_response,
+ [PD_TIMER_SINK_ACTIVITY] = tcpc_timer_sink_activity,
+ [PD_TIMER_SINK_REQUEST] = tcpc_timer_sink_request,
+ [PD_TIMER_SINK_WAIT_CAP] = tcpc_timer_sink_wait_cap,
+ [PD_TIMER_SOURCE_ACTIVITY] = tcpc_timer_source_activity,
+ [PD_TIMER_SOURCE_CAPABILITY] = tcpc_timer_source_capability,
+ [PD_TIMER_SOURCE_START] = tcpc_timer_source_start,
+ [PD_TIMER_VCONN_ON] = tcpc_timer_vconn_on,
+ [PD_TIMER_VDM_MODE_ENTRY] = tcpc_timer_vdm_mode_entry,
+ [PD_TIMER_VDM_MODE_EXIT] = tcpc_timer_vdm_mode_exit,
+ [PD_TIMER_VDM_RESPONSE] = tcpc_timer_vdm_response,
+ [PD_TIMER_SOURCE_TRANSITION] = tcpc_timer_source_transition,
+ [PD_TIMER_SRC_RECOVER] = tcpc_timer_src_recover,
+ [PD_TIMER_VSAFE0V_DELAY] = tcpc_timer_vsafe0v_delay,
+ [PD_TIMER_VSAFE0V_TOUT] = tcpc_timer_vsafe0v_tout,
+ [PD_TIMER_DISCARD] = tcpc_timer_pd_discard,
+ [PD_TIMER_VBUS_STABLE] = tcpc_timer_vbus_stable,
+ [PD_TIMER_VBUS_PRESENT] = tcpc_timer_vbus_present,
+ [PD_PE_VDM_POSTPONE] = pd_pe_vdm_postpone_timeout,
+
+ [TYPEC_RT_TIMER_PE_IDLE] = tcpc_timer_rt_pe_idle,
+ [TYPEC_RT_TIMER_SAFE0V_DELAY] = tcpc_timer_rt_vsafe0v_delay,
+ [TYPEC_RT_TIMER_SAFE0V_TOUT] = tcpc_timer_rt_vsafe0v_tout,
+
+ [TYPEC_TRY_TIMER_DRP_TRY] = tcpc_timer_try_drp_try,
+ [TYPEC_TRY_TIMER_DRP_TRYWAIT] = tcpc_timer_try_drp_trywait,
+
+ [TYPEC_TIMER_CCDEBOUNCE] = tcpc_timer_ccdebounce,
+ [TYPEC_TIMER_PDDEBOUNCE] = tcpc_timer_pddebounce,
+ [TYPEC_TIMER_ERROR_RECOVERY] = tcpc_timer_error_recovery,
+ [TYPEC_TIMER_WAKEUP] = tcpc_timer_wakeup,
+ [TYPEC_TIMER_DRP_SRC_TOGGLE] = tcpc_timer_drp_src_toggle,
+#else
+ [TYPEC_RT_TIMER_SAFE0V_DELAY] = tcpc_timer_rt_vsafe0v_delay,
+ [TYPEC_RT_TIMER_SAFE0V_TOUT] = tcpc_timer_rt_vsafe0v_tout,
+
+ [TYPEC_TRY_TIMER_DRP_TRY] = tcpc_timer_try_drp_try,
+ [TYPEC_TRY_TIMER_DRP_TRYWAIT] = tcpc_timer_try_drp_trywait,
+
+ [TYPEC_TIMER_CCDEBOUNCE] = tcpc_timer_ccdebounce,
+ [TYPEC_TIMER_PDDEBOUNCE] = tcpc_timer_pddebounce,
+ [TYPEC_TIMER_WAKEUP] = tcpc_timer_wakup,
+ [TYPEC_TIMER_DRP_SRC_TOGGLE] = tcpc_timer_drp_src_toggle,
+#endif /* CONFIG_USB_POWER_DELIVERY */
+};
+
+/*
+ * [BLOCK] Control Timer
+ */
+
+static inline void tcpc_reset_timer_range(
+ struct tcpc_device *tcpc, int start, int end)
+{
+ int i;
+ u64 mask;
+
+ down(&tcpc->timer_enable_mask_lock);
+ mask = rt_get_value((u64 *)&tcpc->timer_enable_mask);
+ up(&tcpc->timer_enable_mask_lock);
+
+ for (i = start; i <= end; i++) {
+ if (mask & (((u64)1) << i)) {
+ hrtimer_try_to_cancel(&tcpc->tcpc_timer[i]);
+ down(&tcpc->timer_enable_mask_lock);
+ rt_clear_bit(i, (u64 *)&tcpc->timer_enable_mask);
+ up(&tcpc->timer_enable_mask_lock);
+ }
+ }
+}
+
+void tcpc_restart_timer(struct tcpc_device *tcpc, u32 timer_id)
+{
+ u64 mask;
+
+ down(&tcpc->timer_enable_mask_lock);
+ mask = rt_get_value((u64 *)&tcpc->timer_enable_mask);
+ up(&tcpc->timer_enable_mask_lock);
+ if (mask & (((u64)1) << timer_id))
+ tcpc_disable_timer(tcpc, timer_id);
+ tcpc_enable_timer(tcpc, timer_id);
+}
+
+void tcpc_enable_timer(struct tcpc_device *tcpc, u32 timer_id)
+{
+ u32 r, mod;
+ char buf[1024] = { 0 };
+
+ TCPC_TIMER_EN_DBG(tcpc, timer_id);
+ if (timer_id >= PD_TIMER_NR)
+ snprintf(buf, sizeof(buf),
+ "the timer_id %d is over PD_TIMER_NR\n",
+ timer_id);
+ mutex_lock(&tcpc->timer_lock);
+ if (timer_id >= TYPEC_TIMER_START_ID)
+ tcpc_reset_timer_range(tcpc, TYPEC_TIMER_START_ID, PD_TIMER_NR);
+
+ down(&tcpc->timer_enable_mask_lock);
+ rt_set_bit(timer_id, (u64 *)&tcpc->timer_enable_mask);
+ up(&tcpc->timer_enable_mask_lock);
+ r = tcpc_timer_timeout[timer_id] / 1000000;
+ mod = tcpc_timer_timeout[timer_id] % 1000000;
+
+ mutex_unlock(&tcpc->timer_lock);
+ hrtimer_start(&tcpc->tcpc_timer[timer_id],
+ ktime_set(r, mod * 1000), HRTIMER_MODE_REL);
+}
+
+void tcpc_disable_timer(struct tcpc_device *tcpc_dev, u32 timer_id)
+{
+ u64 mask;
+ char buf[1024] = { 0 };
+
+ down(&tcpc_dev->timer_enable_mask_lock);
+ mask = rt_get_value((u64 *)&tcpc_dev->timer_enable_mask);
+ up(&tcpc_dev->timer_enable_mask_lock);
+
+ if (timer_id >= PD_TIMER_NR) {
+ snprintf(buf, sizeof(buf),
+ "the timer_id %d is over PD_TIMER_NR\n",
+ timer_id);
+ }
+ if (mask & (((u64)1) << timer_id)) {
+ hrtimer_try_to_cancel(&tcpc_dev->tcpc_timer[timer_id]);
+ rt_clear_bit(timer_id,
+ (u64 *)&tcpc_dev->timer_enable_mask);
+ }
+}
+
+void tcpc_timer_reset(struct tcpc_device *tcpc_dev)
+{
+ u64 mask;
+ int i;
+
+ down(&tcpc_dev->timer_enable_mask_lock);
+ mask = rt_get_value((u64 *)&tcpc_dev->timer_enable_mask);
+ up(&tcpc_dev->timer_enable_mask_lock);
+ for (i = 0; i < PD_TIMER_NR; i++)
+ if (mask & (((u64)1) << i))
+ hrtimer_try_to_cancel(&tcpc_dev->tcpc_timer[i]);
+ rt_set_value((u64 *)&tcpc_dev->timer_enable_mask, 0);
+}
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+void tcpc_reset_pe_timer(struct tcpc_device *tcpc_dev)
+{
+ mutex_lock(&tcpc_dev->timer_lock);
+ tcpc_reset_timer_range(tcpc_dev, 0, PD_PE_TIMER_END_ID);
+ mutex_unlock(&tcpc_dev->timer_lock);
+}
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+void tcpc_reset_typec_debounce_timer(struct tcpc_device *tcpc)
+{
+ mutex_lock(&tcpc->timer_lock);
+ tcpc_reset_timer_range(tcpc, TYPEC_TIMER_START_ID, PD_TIMER_NR);
+ mutex_unlock(&tcpc->timer_lock);
+}
+
+void tcpc_reset_typec_try_timer(struct tcpc_device *tcpc)
+{
+ mutex_lock(&tcpc->timer_lock);
+ tcpc_reset_timer_range(tcpc,
+ TYPEC_TRY_TIMER_START_ID, TYPEC_TIMER_START_ID);
+ mutex_unlock(&tcpc->timer_lock);
+}
+
+static void tcpc_handle_timer_triggered(struct tcpc_device *tcpc_dev)
+{
+ u64 triggered_timer;
+ int i = 0;
+
+ down(&tcpc_dev->timer_tick_lock);
+ triggered_timer = rt_get_value((u64 *)&tcpc_dev->timer_tick);
+ up(&tcpc_dev->timer_tick_lock);
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ for (i = 0; i < PD_PE_TIMER_END_ID; i++) {
+ if (triggered_timer & RT_MASK64(i)) {
+ TCPC_TIMER_DBG(tcpc_dev, i);
+ on_pe_timer_timeout(tcpc_dev, i);
+ down(&tcpc_dev->timer_tick_lock);
+ rt_clear_bit(i, (u64 *)&tcpc_dev->timer_tick);
+ up(&tcpc_dev->timer_tick_lock);
+ }
+ }
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+ mutex_lock(&tcpc_dev->typec_lock);
+ for (; i < PD_TIMER_NR; i++) {
+ if (triggered_timer & RT_MASK64(i)) {
+ TCPC_TIMER_DBG(tcpc_dev, i);
+ tcpc_typec_handle_timeout(tcpc_dev, i);
+ down(&tcpc_dev->timer_tick_lock);
+ rt_clear_bit(i, (u64 *)&tcpc_dev->timer_tick);
+ up(&tcpc_dev->timer_tick_lock);
+ }
+ }
+ mutex_unlock(&tcpc_dev->typec_lock);
+}
+
+static int tcpc_timer_thread(void *param)
+{
+ struct tcpc_device *tcpc_dev = param;
+
+ u64 *timer_tick;
+ struct sched_param sch_param = {.sched_priority = MAX_RT_PRIO - 1};
+
+ down(&tcpc_dev->timer_tick_lock);
+ timer_tick = &tcpc_dev->timer_tick;
+ up(&tcpc_dev->timer_tick_lock);
+
+ sched_setscheduler(current, SCHED_FIFO, &sch_param);
+ while (true) {
+ wait_event_interruptible(tcpc_dev->timer_wait_que,
+ ((*timer_tick) ? true : false) |
+ tcpc_dev->timer_thead_stop);
+ if (kthread_should_stop() || tcpc_dev->timer_thead_stop)
+ break;
+ do {
+ tcpc_handle_timer_triggered(tcpc_dev);
+ } while (*timer_tick);
+ }
+ return 0;
+}
+
+int tcpci_timer_init(struct tcpc_device *tcpc_dev)
+{
+ int i;
+
+ pr_info("PD Timer number = %d\n", PD_TIMER_NR);
+ tcpc_dev->timer_task = kthread_create(tcpc_timer_thread, tcpc_dev,
+ "tcpc_timer_%s.%p", dev_name(&tcpc_dev->dev), tcpc_dev);
+ init_waitqueue_head(&tcpc_dev->timer_wait_que);
+ down(&tcpc_dev->timer_tick_lock);
+ tcpc_dev->timer_tick = 0;
+ up(&tcpc_dev->timer_tick_lock);
+ rt_set_value((u64 *)&tcpc_dev->timer_enable_mask, 0);
+ wake_up_process(tcpc_dev->timer_task);
+ for (i = 0; i < PD_TIMER_NR; i++) {
+ hrtimer_init(&tcpc_dev->tcpc_timer[i],
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tcpc_dev->tcpc_timer[i].function = tcpc_timer_call[i];
+ }
+
+ pr_info("%s : init OK\n", __func__);
+ return 0;
+}
+
+int tcpci_timer_deinit(struct tcpc_device *tcpc_dev)
+{
+ u64 mask;
+ int i;
+
+ down(&tcpc_dev->timer_enable_mask_lock);
+ mask = rt_get_value((u64 *)&tcpc_dev->timer_enable_mask);
+ up(&tcpc_dev->timer_enable_mask_lock);
+
+ mutex_lock(&tcpc_dev->timer_lock);
+ wake_up_interruptible(&tcpc_dev->timer_wait_que);
+ kthread_stop(tcpc_dev->timer_task);
+ for (i = 0; i < PD_TIMER_NR; i++) {
+ if (mask & (1 << i))
+ hrtimer_try_to_cancel(&tcpc_dev->tcpc_timer[i]);
+ }
+
+ pr_info("%s : de init OK\n", __func__);
+ mutex_unlock(&tcpc_dev->timer_lock);
+ return 0;
+}
diff --git a/drivers/usb/pd/richtek/tcpci_typec.c b/drivers/usb/pd/richtek/tcpci_typec.c
new file mode 100644
index 000000000000..65b08a399b77
--- /dev/null
+++ b/drivers/usb/pd/richtek/tcpci_typec.c
@@ -0,0 +1,1656 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * TCPC Type-C Driver for Richtek
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/cpu.h>
+
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_typec.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_timer.h>
+#include <linux/hisi/usb/hub/hisi_hub.h>
+#include <linux/hisi/log/hisi_log.h>
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SOURCE
+#define CONFIG_TYPEC_CAP_TRY_STATE
+#endif
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SINK
+#undef CONFIG_TYPEC_CAP_TRY_STATE
+#define CONFIG_TYPEC_CAP_TRY_STATE
+#endif
+
+enum TYPEC_WAIT_PS_STATE {
+ TYPEC_WAIT_PS_DISABLE = 0,
+ TYPEC_WAIT_PS_SNK_VSAFE5V,
+ TYPEC_WAIT_PS_SRC_VSAFE0V,
+ TYPEC_WAIT_PS_SRC_VSAFE5V,
+};
+
+#if TYPEC_DBG_ENABLE
+static const char *const typec_wait_ps_name[] = {
+ "Disable",
+ "SNK_VSafe5V",
+ "SRC_VSafe0V",
+ "SRC_VSafe5V",
+};
+#endif /* TYPEC_DBG_ENABLE */
+
+enum TYPEC_HOST_OR_DEVICE {
+ TYPEC_INIT = 0,
+ TYPEC_HOST,
+ TYPEC_DEVICE
+};
+
+static int oldstatus = TYPEC_INIT;
+
+static inline void typec_wait_ps_change(struct tcpc_device *tcpc_dev,
+ enum TYPEC_WAIT_PS_STATE state)
+{
+#if TYPEC_DBG_ENABLE
+ u8 old_state;
+ u8 new_state = (u8)state;
+
+ old_state = tcpc_dev->typec_wait_ps_change;
+ if (new_state != old_state)
+ TYPEC_DBG("wait_ps=%s\r\n", typec_wait_ps_name[new_state]);
+#endif
+ hisilog_err("%s: typec_wait_ps_change!!!+++++++++++\n", __func__);
+
+#ifdef CONFIG_TYPEC_ATTACHED_SRC_SAFE0V_TIMEOUT
+ if (state == TYPEC_WAIT_PS_SRC_VSAFE0V)
+ tcpc_enable_timer(tcpc_dev, TYPEC_RT_TIMER_SAFE0V_TOUT);
+#endif /* CONFIG_TYPEC_ATTACHED_SRC_SAFE0V_TIMEOUT */
+
+ if (tcpc_dev->typec_wait_ps_change == TYPEC_WAIT_PS_SRC_VSAFE0V &&
+ state != TYPEC_WAIT_PS_SRC_VSAFE0V) {
+ tcpc_disable_timer(tcpc_dev, TYPEC_RT_TIMER_SAFE0V_DELAY);
+
+#ifdef CONFIG_TYPEC_ATTACHED_SRC_SAFE0V_TIMEOUT
+ tcpc_disable_timer(tcpc_dev, TYPEC_RT_TIMER_SAFE0V_TOUT);
+#endif /* CONFIG_TYPEC_ATTACHED_SRC_SAFE0V_TIMEOUT */
+ }
+
+ tcpc_dev->typec_wait_ps_change = (u8)state;
+ hisilog_err("%s: typec_wait_ps_change!!!-----------\n", __func__);
+}
+
+/* #define TYPEC_EXIT_ATTACHED_SRC_NO_DEBOUNCE */
+#define TYPEC_EXIT_ATTACHED_SNK_VIA_VBUS
+
+static inline int typec_enable_low_power_mode(
+ struct tcpc_device *tcpc_dev, int pull);
+
+#define typec_get_cc1() \
+ tcpc_dev->typec_remote_cc[0]
+#define typec_get_cc2() \
+ tcpc_dev->typec_remote_cc[1]
+#define typec_get_cc_res() \
+ (tcpc_dev->typec_polarity ? typec_get_cc2() : typec_get_cc1())
+
+#define typec_check_cc1(cc) \
+ (typec_get_cc1() == (cc))
+
+#define typec_check_cc2(cc) \
+ (typec_get_cc2() == (cc))
+
+#define typec_check_cc(cc1, cc2) \
+ (typec_check_cc1(cc1) && typec_check_cc2(cc2))
+
+#define typec_check_cc1_unequal(cc) \
+ (typec_get_cc1() != (cc))
+
+#define typec_check_cc2_unequal(cc) \
+ (typec_get_cc2() != (cc))
+
+#define typec_check_cc_unequal(cc1, cc2) \
+ (typec_check_cc1_unequal(cc1) && typec_check_cc2_unequal(cc2))
+
+#define typec_is_drp_toggling() \
+ (typec_get_cc1() == TYPEC_CC_DRP_TOGGLING)
+
+#define typec_is_cc_open() \
+ typec_check_cc(TYPEC_CC_VOLT_OPEN, TYPEC_CC_VOLT_OPEN)
+
+/* TYPEC_GET_CC_STATUS */
+
+/*
+ * [BLOCK] TYPEC Connection State Definition
+ */
+
+enum TYPEC_CONNECTION_STATE {
+ typec_disabled = 0,
+ typec_errorrecovery,
+
+ typec_unattached_snk,
+ typec_unattached_src,
+
+ typec_attachwait_snk,
+ typec_attachwait_src,
+
+ typec_attached_snk,
+ typec_attached_src,
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SOURCE
+ /* Require : Assert Rp
+ * Exit(-> Attached.SRC) : Detect Rd (tPDDebounce).
+ * Exit(-> TryWait.SNK) : Not detect Rd after tDRPTry
+ */
+ typec_try_src,
+
+ /* Require : Assert Rd
+ * Exit(-> Attached.SNK) : Detect Rp (tCCDebounce) and Vbus present.
+ * Exit(-> Unattached.SNK) : Not detect Rp (tPDDebounce)
+ */
+
+ typec_trywait_snk,
+ typec_trywait_snk_pe,
+#endif
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SINK
+
+ /* Require : Assert Rd
+ * Wait for tDRPTry and only then begin monitoring CC.
+ * Exit (-> Attached.SNK) : Detect Rp (tPDDebounce) and Vbus present.
+ * Exit (-> TryWait.SRC) : Not detect Rp for tPDDebounce.
+ */
+ typec_try_snk,
+
+ /*
+ * Require : Assert Rp
+ * Exit (-> Attached.SRC) : Detect Rd (tCCDebounce)
+ * Exit (-> Unattached.SNK) : Not detect Rd after tDRPTry
+ */
+
+ typec_trywait_src,
+ typec_trywait_src_pe,
+#endif /* CONFIG_TYPEC_CAP_TRY_SINK */
+
+ typec_audioaccessory,
+ typec_debugaccessory,
+
+#ifdef CONFIG_TYPEC_CAP_DBGACC_SNK
+ typec_attached_dbgacc_snk,
+#endif /* CONFIG_TYPEC_CAP_DBGACC_SNK */
+
+#ifdef CONFIG_TYPEC_CAP_CUSTOM_SRC
+ typec_attached_custom_src,
+#endif /* CONFIG_TYPEC_CAP_CUSTOM_SRC */
+
+ typec_unattachwait_pe, /* Wait Policy Engine go to Idle */
+};
+
+static const char *const typec_state_name[] = {
+ "Disabled",
+ "ErrorRecovery",
+
+ "Unattached.SNK",
+ "Unattached.SRC",
+
+ "AttachWait.SNK",
+ "AttachWait.SRC",
+
+ "Attached.SNK",
+ "Attached.SRC",
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SOURCE
+ "Try.SRC",
+ "TryWait.SNK",
+ "TryWait.SNK.PE",
+#endif /* CONFIG_TYPEC_CAP_TRY_SOURCE */
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SINK
+ "Try.SNK",
+ "TryWait.SRC",
+ "TryWait.SRC.PE",
+#endif /* CONFIG_TYPEC_CAP_TRY_SINK */
+
+ "AudioAccessory",
+ "DebugAccessory",
+
+#ifdef CONFIG_TYPEC_CAP_DBGACC_SNK
+ "DBGACC.SNK",
+#endif /* CONFIG_TYPEC_CAP_DBGACC_SNK */
+
+#ifdef CONFIG_TYPEC_CAP_CUSTOM_SRC
+ "Custom.SRC",
+#endif /* CONFIG_TYPEC_CAP_CUSTOM_SRC */
+
+ "UnattachWait.PE",
+};
+
+static inline void typec_transfer_state(struct tcpc_device *tcpc_dev,
+ enum TYPEC_CONNECTION_STATE state)
+{
+ TYPEC_INFO("** %s\r\n", typec_state_name[state]);
+ tcpc_dev->typec_state = (u8)state;
+}
+
+#define TYPEC_NEW_STATE(state) \
+ (typec_transfer_state(tcpc_dev, state))
+
+/*
+ * [BLOCK] TypeC Alert Attach Status Changed
+ */
+
+static const char *const typec_attach_name[] = {
+ "NULL",
+ "SINK",
+ "SOURCE",
+ "AUDIO",
+ "DEBUG",
+
+#ifdef CONFIG_TYPEC_CAP_DBGACC_SNK
+ "DBGACC_SNK",
+#endif /* CONFIG_TYPEC_CAP_DBGACC_SNK */
+
+#ifdef CONFIG_TYPEC_CAP_CUSTOM_SRC
+ "CUSTOM_SRC",
+#endif /* CONFIG_TYPEC_CAP_CUSTOM_SRC */
+};
+
+static int typec_alert_attach_state_change(struct tcpc_device *tcpc_dev)
+{
+ int ret = 0;
+
+ if (tcpc_dev->typec_attach_old == tcpc_dev->typec_attach_new) {
+ TYPEC_DBG("Attached-> %s(repeat)\r\n",
+ typec_attach_name[tcpc_dev->typec_attach_new]);
+ return 0;
+ }
+
+ TYPEC_INFO("Attached-> %s\r\n",
+ typec_attach_name[tcpc_dev->typec_attach_new]);
+
+ /*Report function */
+ ret = tcpci_report_usb_port_changed(tcpc_dev);
+
+ tcpc_dev->typec_attach_old = tcpc_dev->typec_attach_new;
+ return ret;
+}
+
+/*
+ * [BLOCK] Unattached Entry
+ */
+
+static inline int typec_enable_low_power_mode(
+ struct tcpc_device *tcpc_dev, int pull)
+{
+ int ret = 0;
+
+#ifdef CONFIG_TYPEC_CHECK_LEGACY_CABLE
+ if (tcpc_dev->typec_legacy_cable) {
+ TYPEC_DBG("LPM_LCOnly\r\n");
+ return 0;
+ }
+#endif /* CONFIG_TYPEC_CHECK_LEGACY_CABLE */
+
+ if (tcpc_dev->typec_cable_only) {
+ TYPEC_DBG("LPM_RaOnly\r\n");
+
+#ifdef CONFIG_TYPEC_CAP_LPM_WAKEUP_WATCHDOG
+ if (tcpc_dev->tcpc_flags & TCPC_FLAGS_LPM_WAKEUP_WATCHDOG)
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_WAKEUP);
+#endif /* CONFIG_TYPEC_CAP_LPM_WAKEUP_WATCHDOG */
+
+ return 0;
+ }
+
+ if (!tcpc_dev->typec_lpm)
+ ret = tcpci_set_low_power_mode(tcpc_dev, true, pull);
+
+ tcpc_dev->typec_lpm = true;
+ return ret;
+}
+
+static inline int typec_disable_low_power_mode(
+ struct tcpc_device *tcpc_dev)
+{
+ int ret = 0;
+
+ if (tcpc_dev->typec_lpm)
+ ret = tcpci_set_low_power_mode(tcpc_dev, false, TYPEC_CC_DRP);
+
+ tcpc_dev->typec_lpm = false;
+ return ret;
+}
+
+static void typec_unattached_power_entry(struct tcpc_device *tcpc_dev)
+{
+ typec_wait_ps_change(tcpc_dev, TYPEC_WAIT_PS_DISABLE);
+
+ hisilog_err("%s:!!!+++++++++++\n",
+ __func__);
+
+ if (tcpc_dev->typec_power_ctrl) {
+ tcpc_dev->typec_power_ctrl = false;
+ tcpci_set_vconn(tcpc_dev, false);
+ tcpci_disable_vbus_control(tcpc_dev);
+ }
+ hisilog_err("%s:!!!-----------\n",
+ __func__);
+}
+
+static void typec_unattached_entry(struct tcpc_device *tcpc_dev)
+{
+ typec_unattached_power_entry(tcpc_dev);
+
+ switch (tcpc_dev->typec_role) {
+ case TYPEC_ROLE_SNK:
+ TYPEC_NEW_STATE(typec_unattached_snk);
+ TYPEC_DBG("TYPEC_ROLE_SNK\r\n");
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_RD);
+ typec_enable_low_power_mode(tcpc_dev, TYPEC_CC_RD);
+ break;
+ case TYPEC_ROLE_SRC:
+ TYPEC_NEW_STATE(typec_unattached_src);
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_RP);
+ typec_enable_low_power_mode(tcpc_dev, TYPEC_CC_RP);
+ break;
+ default:
+ switch (tcpc_dev->typec_state) {
+ case typec_attachwait_snk:
+ case typec_audioaccessory:
+ TYPEC_NEW_STATE(typec_unattached_src);
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_RP);
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_DRP_SRC_TOGGLE);
+ break;
+ default:
+ gpio_hub_switch_to_hub();
+ gpio_hub_typec_power_off();
+ if (oldstatus == TYPEC_DEVICE) {
+ TYPEC_DBG("device off, otg host:%d:%d\r\n",
+ oldstatus, tcpc_dev->typec_state);
+ gpio_hub_power_on();
+
+ hisi_usb_otg_event(CHARGER_DISCONNECT_EVENT);
+ hisi_usb_otg_event(ID_FALL_EVENT);
+ oldstatus = TYPEC_HOST;
+ } else if (oldstatus == TYPEC_INIT) {
+ TYPEC_DBG("init otg host no insert.\r\n");
+ gpio_hub_power_on();
+ hisi_usb_otg_event(CHARGER_DISCONNECT_EVENT);
+ hisi_usb_otg_event(ID_FALL_EVENT);
+ oldstatus = TYPEC_HOST;
+ } else {
+ TYPEC_DBG("host off, otg host:%d:%d\r\n",
+ oldstatus, tcpc_dev->typec_state);
+ }
+ TYPEC_NEW_STATE(typec_unattached_snk);
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_DRP);
+ typec_enable_low_power_mode(tcpc_dev, TYPEC_CC_DRP);
+ break;
+ }
+ break;
+ }
+}
+
+static void typec_unattach_wait_pe_idle_entry(struct tcpc_device *tcpc_dev)
+{
+ tcpc_dev->typec_attach_new = TYPEC_UNATTACHED;
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ if (tcpc_dev->typec_attach_old) {
+ TYPEC_NEW_STATE(typec_unattachwait_pe);
+ return;
+ }
+#endif
+
+ typec_unattached_entry(tcpc_dev);
+}
+
+/*
+ * [BLOCK] Attached Entry
+ */
+
+static inline int typec_set_polarity(struct tcpc_device *tcpc_dev,
+ bool polarity)
+{
+ tcpc_dev->typec_polarity = polarity;
+ return tcpci_set_polarity(tcpc_dev, polarity);
+}
+
+static inline int typec_set_plug_orient(struct tcpc_device *tcpc_dev,
+ u8 res, bool polarity)
+{
+ int rv = typec_set_polarity(tcpc_dev, polarity);
+
+ if (rv)
+ return rv;
+
+ return tcpci_set_cc(tcpc_dev, res);
+}
+
+static void typec_source_attached_with_vbus_entry(struct tcpc_device *tcpc_dev)
+{
+ tcpc_dev->typec_attach_new = TYPEC_ATTACHED_SRC;
+ typec_wait_ps_change(tcpc_dev, TYPEC_WAIT_PS_DISABLE);
+}
+
+static inline void typec_source_attached_entry(struct tcpc_device *tcpc_dev)
+{
+ TYPEC_NEW_STATE(typec_attached_src);
+
+ TYPEC_DBG("typec otg host attach %s\r\n", __func__);
+ oldstatus = TYPEC_HOST;
+ gpio_hub_switch_to_typec();
+ gpio_hub_typec_power_on();
+ typec_wait_ps_change(tcpc_dev, TYPEC_WAIT_PS_SRC_VSAFE5V);
+
+ tcpc_disable_timer(tcpc_dev, TYPEC_TRY_TIMER_DRP_TRY);
+
+ typec_set_plug_orient(tcpc_dev,
+ tcpc_dev->typec_local_rp_level,
+ typec_check_cc2(TYPEC_CC_VOLT_RD));
+
+ tcpc_dev->typec_power_ctrl = true;
+ tcpci_set_vconn(tcpc_dev, true);
+ tcpci_source_vbus(tcpc_dev,
+ TCP_VBUS_CTRL_TYPEC, TCPC_VBUS_SOURCE_5V, -1);
+}
+
+static inline void typec_sink_attached_entry(struct tcpc_device *tcpc_dev)
+{
+ TYPEC_NEW_STATE(typec_attached_snk);
+ typec_wait_ps_change(tcpc_dev, TYPEC_WAIT_PS_DISABLE);
+
+#ifdef CONFIG_TYPEC_CHECK_LEGACY_CABLE
+ tcpc_dev->typec_legacy_cable = false;
+ tcpc_dev->typec_legacy_cable_suspect = 0;
+#endif /* CONFIG_TYPEC_CHECK_LEGAY_CABLE */
+
+ tcpc_dev->typec_attach_new = TYPEC_ATTACHED_SNK;
+
+#ifdef CONFIG_TYPEC_CAP_TRY_STATE
+ if (tcpc_dev->typec_role >= TYPEC_ROLE_DRP)
+ tcpc_reset_typec_try_timer(tcpc_dev);
+#endif /* CONFIG_TYPEC_CAP_TRY_STATE */
+
+ typec_set_plug_orient(tcpc_dev, TYPEC_CC_RD,
+ typec_check_cc2_unequal(TYPEC_CC_VOLT_OPEN));
+ tcpc_dev->typec_remote_rp_level = typec_get_cc_res();
+
+ tcpc_dev->typec_power_ctrl = true;
+ tcpci_sink_vbus(tcpc_dev, TCP_VBUS_CTRL_TYPEC, TCPC_VBUS_SINK_5V, -1);
+}
+
+static inline void typec_custom_src_attached_entry(
+ struct tcpc_device *tcpc_dev)
+{
+#ifdef CONFIG_TYPEC_CAP_CUSTOM_SRC
+ int cc1 = typec_get_cc1();
+ int cc2 = typec_get_cc2();
+
+ if (cc1 == TYPEC_CC_VOLT_SNK_DFT && cc2 == TYPEC_CC_VOLT_SNK_DFT) {
+ TYPEC_NEW_STATE(typec_attached_custom_src);
+ tcpc_dev->typec_attach_new = TYPEC_ATTACHED_CUSTOM_SRC;
+ TYPEC_DBG("typec host mode, device attached\r\n");
+ oldstatus = TYPEC_DEVICE;
+ gpio_hub_power_off();
+ gpio_hub_typec_power_off();
+
+ hisi_usb_otg_event(ID_RISE_EVENT);
+ hisi_usb_otg_event(CHARGER_CONNECT_EVENT);
+ return;
+ }
+#endif /* CONFIG_TYPEC_CAP_CUSTOM_SRC */
+
+#ifdef CONFIG_TYPEC_CAP_DBGACC_SNK
+ TYPEC_DBG("[Warning] Same Rp (%d)\r\n", cc1);
+#else
+ TYPEC_DBG("[Warning] CC Both Rp\r\n");
+#endif
+}
+
+#ifdef CONFIG_TYPEC_CAP_DBGACC_SNK
+
+static inline u8 typec_get_sink_dbg_acc_rp_level(
+ int cc1, int cc2)
+{
+ if (cc2 == TYPEC_CC_VOLT_SNK_DFT)
+ return cc1;
+ else
+ return TYPEC_CC_VOLT_SNK_DFT;
+}
+
+static inline void typec_sink_dbg_acc_attached_entry(
+ struct tcpc_device *tcpc_dev)
+{
+ bool polarity;
+ u8 rp_level;
+
+ int cc1 = typec_get_cc1();
+ int cc2 = typec_get_cc2();
+
+ if (cc1 == cc2) {
+ typec_custom_src_attached_entry(tcpc_dev);
+ return;
+ }
+
+ TYPEC_NEW_STATE(typec_attached_dbgacc_snk);
+
+ tcpc_dev->typec_attach_new = TYPEC_ATTACHED_DBGACC_SNK;
+
+ polarity = cc2 > cc1;
+
+ if (polarity)
+ rp_level = typec_get_sink_dbg_acc_rp_level(cc2, cc1);
+ else
+ rp_level = typec_get_sink_dbg_acc_rp_level(cc1, cc2);
+
+ typec_set_plug_orient(tcpc_dev, TYPEC_CC_RD, polarity);
+ tcpc_dev->typec_remote_rp_level = rp_level;
+
+ tcpc_dev->typec_power_ctrl = true;
+ tcpci_sink_vbus(tcpc_dev, TCP_VBUS_CTRL_TYPEC, TCPC_VBUS_SINK_5V, -1);
+}
+#else
+static inline void typec_sink_dbg_acc_attached_entry(
+ struct tcpc_device *tcpc_dev)
+{
+ typec_custom_src_attached_entry(tcpc_dev);
+}
+#endif /* CONFIG_TYPEC_CAP_DBGACC_SNK */
+
+/*
+ * [BLOCK] Try.SRC / TryWait.SNK
+ */
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SOURCE
+
+static inline void typec_try_src_entry(struct tcpc_device *tcpc_dev)
+{
+ TYPEC_NEW_STATE(typec_try_src);
+ tcpc_dev->typec_drp_try_timeout = false;
+
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_RP);
+ tcpc_enable_timer(tcpc_dev, TYPEC_TRY_TIMER_DRP_TRY);
+}
+
+static inline void typec_trywait_snk_entry(struct tcpc_device *tcpc_dev)
+{
+ TYPEC_NEW_STATE(typec_trywait_snk);
+ typec_wait_ps_change(tcpc_dev, TYPEC_WAIT_PS_DISABLE);
+
+ tcpci_set_vconn(tcpc_dev, false);
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_RD);
+ tcpci_source_vbus(tcpc_dev,
+ TCP_VBUS_CTRL_TYPEC, TCPC_VBUS_SOURCE_0V, 0);
+ tcpc_disable_timer(tcpc_dev, TYPEC_TRY_TIMER_DRP_TRY);
+
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_PDDEBOUNCE);
+}
+
+static inline void typec_trywait_snk_pe_entry(struct tcpc_device *tcpc_dev)
+{
+ tcpc_dev->typec_attach_new = TYPEC_UNATTACHED;
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ if (tcpc_dev->typec_attach_old) {
+ TYPEC_NEW_STATE(typec_trywait_snk_pe);
+ return;
+ }
+#endif
+
+ typec_trywait_snk_entry(tcpc_dev);
+}
+
+#endif /* #ifdef CONFIG_TYPEC_CAP_TRY_SOURCE */
+
+/*
+ * [BLOCK] Try.SNK / TryWait.SRC
+ */
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SINK
+
+static inline void typec_try_snk_entry(struct tcpc_device *tcpc_dev)
+{
+ TYPEC_NEW_STATE(typec_try_snk);
+ tcpc_dev->typec_drp_try_timeout = false;
+
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_RD);
+ tcpc_enable_timer(tcpc_dev, TYPEC_TRY_TIMER_DRP_TRY);
+}
+
+static inline void typec_trywait_src_entry(struct tcpc_device *tcpc_dev)
+{
+ TYPEC_NEW_STATE(typec_trywait_src);
+ tcpc_dev->typec_drp_try_timeout = false;
+
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_RP);
+ tcpci_sink_vbus(tcpc_dev, TCP_VBUS_CTRL_TYPEC, TCPC_VBUS_SINK_0V, 0);
+ tcpc_enable_timer(tcpc_dev, TYPEC_TRY_TIMER_DRP_TRY);
+}
+
+#ifndef CONFIG_USB_POWER_DELIVERY
+static inline void typec_trywait_src_pe_entry(struct tcpc_device *tcpc_dev)
+{
+ tcpc_dev->typec_attach_new = TYPEC_UNATTACHED;
+
+ if (tcpc_dev->typec_attach_old) {
+ TYPEC_NEW_STATE(typec_trywait_src_pe);
+ return;
+ }
+
+ typec_trywait_src_entry(tcpc_dev);
+}
+#endif
+
+#endif /* CONFIG_TYPEC_CAP_TRY_SINK */
+
+/*
+ * [BLOCK] Attach / Detach
+ */
+
+static inline void typec_cc_snk_detect_vsafe5v_entry(
+ struct tcpc_device *tcpc_dev)
+{
+ typec_wait_ps_change(tcpc_dev, TYPEC_WAIT_PS_DISABLE);
+
+ if (typec_check_cc_unequal(TYPEC_CC_VOLT_OPEN, TYPEC_CC_VOLT_OPEN)) {
+ typec_sink_dbg_acc_attached_entry(tcpc_dev);
+ return;
+ }
+
+ TYPEC_DBG("typec device mode, attached\r\n");
+ oldstatus = TYPEC_DEVICE;
+ gpio_hub_power_off();
+ gpio_hub_typec_power_off();
+ hisi_usb_otg_event(ID_RISE_EVENT);
+ gpio_hub_switch_to_typec();
+ hisi_usb_otg_event(CHARGER_CONNECT_EVENT);
+#ifdef CONFIG_TYPEC_CAP_TRY_SOURCE
+ if (tcpc_dev->typec_role == TYPEC_ROLE_TRY_SRC) {
+ if (tcpc_dev->typec_state == typec_attachwait_snk) {
+ typec_try_src_entry(tcpc_dev);
+ return;
+ }
+ }
+#endif /* CONFIG_TYPEC_CAP_TRY_SOURCE */
+
+ typec_sink_attached_entry(tcpc_dev);
+}
+
+static inline void typec_cc_snk_detect_entry(struct tcpc_device *tcpc_dev)
+{
+ /* If Port Partner act as Source without VBUS, wait vSafe5V */
+ if (tcpci_check_vbus_valid(tcpc_dev))
+ typec_cc_snk_detect_vsafe5v_entry(tcpc_dev);
+ else
+ typec_wait_ps_change(tcpc_dev, TYPEC_WAIT_PS_SNK_VSAFE5V);
+}
+
+static inline void typec_cc_src_detect_vsafe0v_entry(
+ struct tcpc_device *tcpc_dev)
+{
+ typec_wait_ps_change(tcpc_dev, TYPEC_WAIT_PS_DISABLE);
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SINK
+ if (tcpc_dev->typec_role == TYPEC_ROLE_TRY_SNK) {
+ if (tcpc_dev->typec_state == typec_attachwait_src) {
+ typec_try_snk_entry(tcpc_dev);
+ return;
+ }
+ }
+#endif /* CONFIG_TYPEC_CAP_TRY_SINK */
+
+ typec_source_attached_entry(tcpc_dev);
+}
+
+static inline void typec_cc_src_detect_entry(
+ struct tcpc_device *tcpc_dev)
+{
+ /* If Port Partner act as Sink with low VBUS, wait vSafe0v */
+ bool vbus_absent = tcpci_check_vsafe0v(tcpc_dev, true);
+
+ if (vbus_absent)
+ typec_cc_src_detect_vsafe0v_entry(tcpc_dev);
+ else
+ typec_wait_ps_change(tcpc_dev, TYPEC_WAIT_PS_SRC_VSAFE0V);
+}
+
+static inline void typec_cc_src_remove_entry(struct tcpc_device *tcpc_dev)
+{
+ typec_wait_ps_change(tcpc_dev, TYPEC_WAIT_PS_DISABLE);
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SOURCE
+ if (tcpc_dev->typec_role == TYPEC_ROLE_TRY_SRC) {
+ switch (tcpc_dev->typec_state) {
+ case typec_attached_src:
+ typec_trywait_snk_pe_entry(tcpc_dev);
+ return;
+ case typec_try_src:
+ typec_trywait_snk_entry(tcpc_dev);
+ return;
+ }
+ }
+#endif /* CONFIG_TYPEC_CAP_TRY_SOURCE */
+
+ typec_unattach_wait_pe_idle_entry(tcpc_dev);
+}
+
+static inline void typec_cc_snk_remove_entry(struct tcpc_device *tcpc_dev)
+{
+ typec_wait_ps_change(tcpc_dev, TYPEC_WAIT_PS_DISABLE);
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SINK
+ if (tcpc_dev->typec_state == typec_try_snk) {
+ typec_trywait_src_entry(tcpc_dev);
+ return;
+ }
+#endif /* CONFIG_TYPEC_CAP_TRY_SINK */
+
+ typec_unattach_wait_pe_idle_entry(tcpc_dev);
+}
+
+#ifdef CONFIG_TYPEC_CHECK_LEGACY_CABLE
+
+static inline bool typec_check_legacy_cable(
+ struct tcpc_device *tcpc_dev)
+{
+ bool check_legacy = false;
+
+ if (typec_check_cc(TYPEC_CC_VOLT_RD, TYPEC_CC_VOLT_OPEN) ||
+ typec_check_cc(TYPEC_CC_VOLT_OPEN, TYPEC_CC_VOLT_RD))
+ check_legacy = true;
+
+ if (check_legacy &&
+ tcpc_dev->typec_legacy_cable_suspect >=
+ TCPC_LEGACY_CABLE_CONFIRM) {
+ TYPEC_INFO("LC->Suspect\r\n");
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_RP_1_5);
+ mdelay(1);
+
+ if (tcpci_get_cc(tcpc_dev) != 0) {
+ TYPEC_INFO("LC->Confirm\r\n");
+ tcpc_dev->typec_legacy_cable = true;
+ tcpc_dev->typec_legacy_cable_suspect =
+ TCPC_LEGACY_CABLE_CONFIRM;
+ return true;
+ }
+
+ tcpc_dev->typec_legacy_cable = false;
+ tcpc_dev->typec_legacy_cable_suspect = 0;
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_RP);
+ }
+
+ return false;
+}
+
+#endif /* CONFIG_TYPEC_CHECK_LEGACY_CABLE */
+
+static inline bool typec_cc_change_source_entry(struct tcpc_device *tcpc_dev)
+{
+ int cc1, cc2;
+
+ cc1 = typec_get_cc1();
+ cc2 = typec_get_cc2();
+
+ if ((cc1 == TYPEC_CC_VOLT_RD) && (cc2 == TYPEC_CC_VOLT_RD)) {
+ TYPEC_NEW_STATE(typec_debugaccessory);
+ TYPEC_DBG("[Debug] CC1&2 Both Rd\r\n");
+ tcpc_dev->typec_attach_new = TYPEC_ATTACHED_DEBUG;
+ } else if ((cc1 == TYPEC_CC_VOLT_RA) && (cc2 == TYPEC_CC_VOLT_RA)) {
+ TYPEC_NEW_STATE(typec_audioaccessory);
+ TYPEC_DBG("[Audio] CC1&2 Both Ra\r\n");
+ tcpc_dev->typec_attach_new = TYPEC_ATTACHED_AUDIO;
+ } else {
+ if ((cc1 == TYPEC_CC_VOLT_RD) || (cc2 == TYPEC_CC_VOLT_RD)) {
+ typec_cc_src_detect_entry(tcpc_dev);
+ } else {
+ if ((cc1 == TYPEC_CC_VOLT_RA) ||
+ (cc2 == TYPEC_CC_VOLT_RA))
+ TYPEC_DBG("[Cable] Ra Only\r\n");
+ typec_cc_src_remove_entry(tcpc_dev);
+ }
+ }
+
+ return true;
+}
+
+static inline bool typec_cc_change_sink_entry(struct tcpc_device *tcpc_dev)
+{
+ if (typec_is_cc_open())
+ typec_cc_snk_remove_entry(tcpc_dev);
+ else
+ typec_cc_snk_detect_entry(tcpc_dev);
+
+ return true;
+}
+
+static inline bool typec_is_act_as_sink_role(
+ struct tcpc_device *tcpc_dev)
+{
+ bool as_sink = true;
+ u8 cc_sum;
+
+ switch (tcpc_dev->typec_local_cc & 0x07) {
+ case TYPEC_CC_RP:
+ as_sink = false;
+ break;
+ case TYPEC_CC_RD:
+ as_sink = true;
+ break;
+ case TYPEC_CC_DRP:
+ cc_sum = typec_get_cc1() + typec_get_cc2();
+ as_sink = (cc_sum >= TYPEC_CC_VOLT_SNK_DFT);
+ break;
+ }
+
+ return as_sink;
+}
+
+static inline bool typec_handle_cc_changed_entry(struct tcpc_device *tcpc_dev)
+{
+ TYPEC_INFO("[CC_Change] %d/%d\r\n", typec_get_cc1(), typec_get_cc2());
+
+ tcpc_dev->typec_attach_new = tcpc_dev->typec_attach_old;
+
+ if (typec_is_act_as_sink_role(tcpc_dev))
+ typec_cc_change_sink_entry(tcpc_dev);
+ else
+ typec_cc_change_source_entry(tcpc_dev);
+
+ typec_alert_attach_state_change(tcpc_dev);
+ return true;
+}
+
+/*
+ * [BLOCK] Handle cc-change event
+ */
+
+static inline void typec_attach_wait_entry(struct tcpc_device *tcpc_dev)
+{
+ bool as_sink;
+
+ if (tcpc_dev->typec_attach_old ||
+ tcpc_dev->typec_state == typec_attached_src) {
+ tcpc_reset_typec_debounce_timer(tcpc_dev);
+ TYPEC_DBG("Attached, Ignore cc_attach\r\n");
+ return;
+ }
+
+ switch (tcpc_dev->typec_state) {
+#ifdef CONFIG_TYPEC_CAP_TRY_SOURCE
+ case typec_try_src:
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_PDDEBOUNCE);
+ return;
+
+ case typec_trywait_snk:
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_CCDEBOUNCE);
+ return;
+#endif
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SINK
+ case typec_try_snk:
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_PDDEBOUNCE);
+ return;
+
+ case typec_trywait_src:
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_CCDEBOUNCE);
+ return;
+#endif
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ case typec_unattachwait_pe:
+ TYPEC_INFO("Force PE Idle\r\n");
+ tcpc_dev->pd_wait_pe_idle = false;
+ tcpc_disable_timer(tcpc_dev, TYPEC_RT_TIMER_PE_IDLE);
+ typec_unattached_power_entry(tcpc_dev);
+ break;
+#endif
+ }
+
+ as_sink = typec_is_act_as_sink_role(tcpc_dev);
+
+#ifdef CONFIG_TYPEC_CHECK_LEGACY_CABLE
+ if (!as_sink && typec_check_legacy_cable(tcpc_dev))
+ return;
+#endif /* CONFIG_TYPEC_CHECK_LEGACY_CABLE */
+
+ if (as_sink)
+ TYPEC_NEW_STATE(typec_attachwait_snk);
+ else
+ TYPEC_NEW_STATE(typec_attachwait_src);
+
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_CCDEBOUNCE);
+}
+
+#ifdef TYPEC_EXIT_ATTACHED_SNK_VIA_VBUS
+static inline int typec_attached_snk_cc_detach(struct tcpc_device *tcpc_dev)
+{
+ int vbus_valid = tcpci_check_vbus_valid(tcpc_dev);
+ bool detach_by_cc = false;
+
+ /* For Ellisys Test, Applying Low VBUS (3.67v) as Sink */
+ if (vbus_valid) {
+ detach_by_cc = true;
+ TYPEC_DBG("Detach_CC (LowVBUS)\r\n");
+ }
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ /* For Source detach during HardReset */
+ if ((!vbus_valid) &&
+ tcpc_dev->pd_wait_hard_reset_complete) {
+ detach_by_cc = true;
+ TYPEC_DBG("Detach_CC (HardReset)\r\n");
+ }
+#endif
+
+ if (detach_by_cc)
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_PDDEBOUNCE);
+
+ return 0;
+}
+#endif /* TYPEC_EXIT_ATTACHED_SNK_VIA_VBUS */
+
+static inline void typec_detach_wait_entry(struct tcpc_device *tcpc_dev)
+{
+#ifdef CONFIG_TYPEC_CHECK_LEGACY_CABLE
+ bool suspect_legacy = false;
+
+ if (tcpc_dev->typec_state == typec_attachwait_src) {
+ suspect_legacy = true;
+ } else if (tcpc_dev->typec_state == typec_attached_src) {
+ if (tcpc_dev->typec_attach_old != TYPEC_ATTACHED_SRC) {
+ suspect_legacy = true;
+ } else {
+ tcpc_dev->typec_legacy_cable = false;
+ tcpc_dev->typec_legacy_cable_suspect = 0;
+ }
+ }
+
+ if (suspect_legacy) {
+ tcpc_dev->typec_legacy_cable_suspect++;
+ TYPEC_DBG("LC_suspect: %d\r\n",
+ tcpc_dev->typec_legacy_cable_suspect);
+ }
+#endif /* CONFIG_TYPEC_CHECK_LEGACY_CABLE */
+
+ switch (tcpc_dev->typec_state) {
+#ifdef TYPEC_EXIT_ATTACHED_SNK_VIA_VBUS
+ case typec_attached_snk:
+ typec_attached_snk_cc_detach(tcpc_dev);
+ break;
+#endif /* TYPEC_EXIT_ATTACHED_SNK_VIA_VBUS */
+
+ case typec_audioaccessory:
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_CCDEBOUNCE);
+ break;
+
+#ifdef TYPEC_EXIT_ATTACHED_SRC_NO_DEBOUNCE
+ case typec_attached_src:
+ TYPEC_INFO("Exit Attached.SRC immediately\r\n");
+ tcpc_reset_typec_debounce_timer(tcpc_dev);
+
+ /* force to terminate TX */
+ tcpci_init(tcpc_dev, true);
+
+ typec_cc_src_remove_entry(tcpc_dev);
+ typec_alert_attach_state_change(tcpc_dev);
+ break;
+#endif /* TYPEC_EXIT_ATTACHED_SRC_NO_DEBOUNCE */
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SOURCE
+ case typec_try_src:
+ if (tcpc_dev->typec_drp_try_timeout) {
+ tcpc_enable_timer(tcpc_dev,
+ TYPEC_TIMER_PDDEBOUNCE);
+ } else {
+ tcpc_reset_typec_debounce_timer(tcpc_dev);
+ TYPEC_DBG("[Try] Igrone cc_detach\r\n");
+ }
+ break;
+#endif /* CONFIG_TYPEC_CAP_TRY_SOURCE */
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SINK
+ case typec_trywait_src:
+ if (tcpc_dev->typec_drp_try_timeout) {
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_PDDEBOUNCE);
+ } else {
+ tcpc_reset_typec_debounce_timer(tcpc_dev);
+ TYPEC_DBG("[Try] Igrone cc_detach\r\n");
+ }
+ break;
+#endif /* CONFIG_TYPEC_CAP_TRY_SINK */
+ default:
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_PDDEBOUNCE);
+ break;
+ }
+}
+
+static inline bool typec_is_cc_attach(struct tcpc_device *tcpc_dev)
+{
+ bool cc_attach = false;
+ int cc1 = typec_get_cc1();
+ int cc2 = typec_get_cc2();
+ int cc_res = typec_get_cc_res();
+
+ tcpc_dev->typec_cable_only = false;
+
+ if (tcpc_dev->typec_attach_old) {
+ if ((cc_res != TYPEC_CC_VOLT_OPEN) &&
+ (cc_res != TYPEC_CC_VOLT_RA))
+ cc_attach = true;
+ } else {
+ if (cc1 != TYPEC_CC_VOLT_OPEN)
+ cc_attach = true;
+
+ if (cc2 != TYPEC_CC_VOLT_OPEN)
+ cc_attach = true;
+
+ /* Cable Only, no device */
+ if ((cc1 + cc2) == TYPEC_CC_VOLT_RA) {
+ cc_attach = false;
+ tcpc_dev->typec_cable_only = true;
+ }
+ }
+
+ return cc_attach;
+}
+
+int tcpc_typec_handle_cc_change(struct tcpc_device *tcpc_dev)
+{
+ int ret = tcpci_get_cc(tcpc_dev);
+
+ if (ret < 0)
+ return ret;
+
+ if (typec_is_drp_toggling()) {
+ TYPEC_DBG("[Waring] DRP Toggling\r\n");
+ if (tcpc_dev->typec_lpm)
+ tcpci_set_low_power_mode(tcpc_dev, true, TYPEC_CC_DRP);
+ return 0;
+ }
+
+ TYPEC_INFO("[CC_Alert] %d/%d\r\n", typec_get_cc1(), typec_get_cc2());
+
+ typec_disable_low_power_mode(tcpc_dev);
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ if (tcpc_dev->pd_wait_pr_swap_complete) {
+ TYPEC_DBG("[PR.Swap] Ignore CC_Alert\r\n");
+ return 0;
+ }
+
+ if (tcpc_dev->pd_wait_error_recovery) {
+ TYPEC_DBG("[Recovery] Ignore CC_Alert\r\n");
+ return 0;
+ }
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SINK
+ if ((tcpc_dev->typec_state == typec_try_snk) &&
+ (!tcpc_dev->typec_drp_try_timeout)) {
+ TYPEC_DBG("[Try.SNK] Ignore CC_Alert\r\n");
+ return 0;
+ }
+
+ if (tcpc_dev->typec_state == typec_trywait_src_pe) {
+ TYPEC_DBG("[Try.PE] Ignore CC_Alert\r\n");
+ return 0;
+ }
+#endif /* CONFIG_TYPEC_CAP_TRY_SINK */
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SOURCE
+ if (tcpc_dev->typec_state == typec_trywait_snk_pe) {
+ TYPEC_DBG("[Try.PE] Ignore CC_Alert\r\n");
+ return 0;
+ }
+#endif /* CONFIG_TYPEC_CAP_TRY_SOURCE */
+
+ if (tcpc_dev->typec_state == typec_attachwait_snk ||
+ tcpc_dev->typec_state == typec_attachwait_src)
+ typec_wait_ps_change(tcpc_dev, TYPEC_WAIT_PS_DISABLE);
+
+ if (typec_is_cc_attach(tcpc_dev))
+ typec_attach_wait_entry(tcpc_dev);
+ else
+ typec_detach_wait_entry(tcpc_dev);
+
+ return 0;
+}
+
+/*
+ * [BLOCK] Handle timeout event
+ */
+
+#ifdef CONFIG_TYPEC_CAP_TRY_STATE
+static inline int typec_handle_drp_try_timeout(struct tcpc_device *tcpc_dev)
+{
+ bool not_detect, en_timer;
+
+ tcpc_dev->typec_drp_try_timeout = true;
+ tcpc_disable_timer(tcpc_dev, TYPEC_TRY_TIMER_DRP_TRY);
+
+ if (typec_is_drp_toggling()) {
+ TYPEC_DBG("[Waring] DRP Toggling\r\n");
+ return 0;
+ }
+
+ not_detect = typec_is_cc_open();
+
+ switch (tcpc_dev->typec_state) {
+#ifdef CONFIG_TYPEC_CAP_TRY_SOURCE
+ case typec_try_src:
+ en_timer = not_detect;
+ break;
+#endif /* CONFIG_TYPEC_CAP_TRY_SOURCE */
+
+#ifdef CONFIG_TYPEC_CAP_TRY_SINK
+ case typec_trywait_src:
+ en_timer = not_detect;
+ break;
+
+ case typec_try_snk:
+ en_timer = true;
+ break;
+#endif /* CONFIG_TYPEC_CAP_TRY_SINK */
+
+ default:
+ en_timer = false;
+ break;
+ }
+
+ if (en_timer)
+ tcpc_enable_timer(tcpc_dev, TYPEC_TIMER_PDDEBOUNCE);
+
+ return 0;
+}
+#endif /* CONFIG_TYPEC_CAP_TRY_STATE */
+
+static inline int typec_handle_debounce_timeout(struct tcpc_device *tcpc_dev)
+{
+ if (typec_is_drp_toggling()) {
+ TYPEC_DBG("[Waring] DRP Toggling\r\n");
+ return 0;
+ }
+
+ typec_handle_cc_changed_entry(tcpc_dev);
+ return 0;
+}
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+
+static inline int typec_handle_error_recovery_timeout(
+ struct tcpc_device *tcpc_dev)
+{
+ /* TODO: Check it later */
+ tcpc_dev->typec_attach_new = TYPEC_UNATTACHED;
+
+ mutex_lock(&tcpc_dev->access_lock);
+ tcpc_dev->pd_wait_error_recovery = false;
+ mutex_unlock(&tcpc_dev->access_lock);
+
+ typec_unattach_wait_pe_idle_entry(tcpc_dev);
+ typec_alert_attach_state_change(tcpc_dev);
+
+ return 0;
+}
+
+static inline int typec_handle_pe_idle(struct tcpc_device *tcpc_dev)
+{
+ switch (tcpc_dev->typec_state) {
+#ifdef CONFIG_TYPEC_CAP_TRY_SOURCE
+ case typec_trywait_snk_pe:
+ typec_trywait_snk_entry(tcpc_dev);
+ break;
+#endif
+
+ case typec_unattachwait_pe:
+ typec_unattached_entry(tcpc_dev);
+ break;
+
+ default:
+ TYPEC_DBG("dummy_pe_idle\r\n");
+ break;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+static inline int typec_handle_src_reach_vsafe0v(struct tcpc_device *tcpc_dev)
+{
+ if (typec_is_drp_toggling()) {
+ TYPEC_DBG("[Waring] DRP Toggling\r\n");
+ return 0;
+ }
+
+ typec_cc_src_detect_vsafe0v_entry(tcpc_dev);
+ return 0;
+}
+
+static inline int typec_handle_src_toggle_timeout(struct tcpc_device *tcpc_dev)
+{
+ if (tcpc_dev->typec_state == typec_unattached_src) {
+ TYPEC_NEW_STATE(typec_unattached_snk);
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_DRP);
+ typec_enable_low_power_mode(tcpc_dev, TYPEC_CC_DRP);
+ }
+
+ return 0;
+}
+
+int tcpc_typec_handle_timeout(struct tcpc_device *tcpc_dev, u32 timer_id)
+{
+ int ret = 0;
+
+#ifdef CONFIG_TYPEC_CAP_TRY_STATE
+ if (timer_id == TYPEC_TRY_TIMER_DRP_TRY)
+ return typec_handle_drp_try_timeout(tcpc_dev);
+#endif /* CONFIG_TYPEC_CAP_TRY_STATE */
+
+ if (timer_id >= TYPEC_TIMER_START_ID)
+ tcpc_reset_typec_debounce_timer(tcpc_dev);
+ else if (timer_id >= TYPEC_RT_TIMER_START_ID)
+ tcpc_disable_timer(tcpc_dev, timer_id);
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ if (tcpc_dev->pd_wait_pr_swap_complete) {
+ TYPEC_DBG("[PR.Swap] Igrone timer_evt\r\n");
+ return 0;
+ }
+
+ if (tcpc_dev->pd_wait_error_recovery &&
+ (timer_id != TYPEC_TIMER_ERROR_RECOVERY)) {
+ TYPEC_DBG("[Recovery] Igrone timer_evt\r\n");
+ return 0;
+ }
+#endif
+
+ switch (timer_id) {
+ case TYPEC_TIMER_CCDEBOUNCE:
+ case TYPEC_TIMER_PDDEBOUNCE:
+ ret = typec_handle_debounce_timeout(tcpc_dev);
+ break;
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ case TYPEC_TIMER_ERROR_RECOVERY:
+ ret = typec_handle_error_recovery_timeout(tcpc_dev);
+ break;
+
+ case TYPEC_RT_TIMER_PE_IDLE:
+ ret = typec_handle_pe_idle(tcpc_dev);
+ break;
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+ case TYPEC_RT_TIMER_SAFE0V_DELAY:
+ ret = typec_handle_src_reach_vsafe0v(tcpc_dev);
+ break;
+
+ case TYPEC_TIMER_WAKEUP:
+ if (tcpc_dev->typec_lpm || tcpc_dev->typec_cable_only) {
+ tcpc_dev->typec_lpm = true;
+ ret =
+ tcpci_set_low_power_mode(
+ tcpc_dev, true,
+ (tcpc_dev->typec_role ==
+ TYPEC_ROLE_SRC) ?
+ TYPEC_CC_RP :
+ TYPEC_CC_DRP);
+ }
+ break;
+
+#ifdef CONFIG_TYPEC_ATTACHED_SRC_SAFE0V_TIMEOUT
+ case TYPEC_RT_TIMER_SAFE0V_TOUT:
+ ret = tcpc_typec_handle_vsafe0v(tcpc_dev);
+ break;
+#endif /* CONFIG_TYPEC_ATTACHED_SRC_SAFE0V_TIMEOUT */
+
+ case TYPEC_TIMER_DRP_SRC_TOGGLE:
+ ret = typec_handle_src_toggle_timeout(tcpc_dev);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * [BLOCK] Handle ps-change event
+ */
+
+static inline int typec_handle_vbus_present(struct tcpc_device *tcpc_dev)
+{
+ switch (tcpc_dev->typec_wait_ps_change) {
+ case TYPEC_WAIT_PS_SNK_VSAFE5V:
+ typec_cc_snk_detect_vsafe5v_entry(tcpc_dev);
+ typec_alert_attach_state_change(tcpc_dev);
+ break;
+ case TYPEC_WAIT_PS_SRC_VSAFE5V:
+ typec_source_attached_with_vbus_entry(tcpc_dev);
+ typec_alert_attach_state_change(tcpc_dev);
+ break;
+ }
+
+ return 0;
+}
+
+static inline int typec_attached_snk_vbus_absent(struct tcpc_device *tcpc_dev)
+{
+#ifdef TYPEC_EXIT_ATTACHED_SNK_VIA_VBUS
+#ifdef CONFIG_USB_POWER_DELIVERY
+ if (tcpc_dev->pd_wait_hard_reset_complete ||
+ tcpc_dev->pd_hard_reset_event_pending) {
+ if (typec_get_cc_res() != TYPEC_CC_VOLT_OPEN) {
+ TYPEC_DBG
+ ("Ignore vbus_absent(snk), HReset & CC!=0\r\n");
+ return 0;
+ }
+ }
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+ typec_unattach_wait_pe_idle_entry(tcpc_dev);
+ typec_alert_attach_state_change(tcpc_dev);
+#endif /* TYPEC_EXIT_ATTACHED_SNK_VIA_VBUS */
+
+ return 0;
+}
+
+static inline int typec_handle_vbus_absent(struct tcpc_device *tcpc_dev)
+{
+#ifdef CONFIG_USB_POWER_DELIVERY
+ if (tcpc_dev->pd_wait_pr_swap_complete) {
+ TYPEC_DBG("[PR.Swap] Igrone vbus_absent\r\n");
+ return 0;
+ }
+
+ if (tcpc_dev->pd_wait_error_recovery) {
+ TYPEC_DBG("[Recovery] Igrone vbus_absent\r\n");
+ return 0;
+ }
+#endif
+
+ if (tcpc_dev->typec_state == typec_attached_snk)
+ typec_attached_snk_vbus_absent(tcpc_dev);
+
+#ifndef CONFIG_TCPC_VSAFE0V_DETECT
+ tcpc_typec_handle_vsafe0v(tcpc_dev);
+#endif /* #ifdef CONFIG_TCPC_VSAFE0V_DETECT */
+
+ return 0;
+}
+
+int tcpc_typec_handle_ps_change(struct tcpc_device *tcpc_dev, int vbus_level)
+{
+#ifdef CONFIG_TYPEC_CHECK_LEGACY_CABLE
+ if (tcpc_dev->typec_legacy_cable) {
+ if (vbus_level) {
+ TYPEC_INFO("LC->Attached\r\n");
+ tcpc_dev->typec_legacy_cable = false;
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_RD);
+ } else {
+ TYPEC_INFO("LC->Detached\r\n");
+ tcpc_dev->typec_legacy_cable = false;
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_DRP);
+ typec_enable_low_power_mode(tcpc_dev, TYPEC_CC_DRP);
+ }
+ return 0;
+ }
+#endif /* CONFIG_TYPEC_CHECK_LEGACY_CABLE */
+
+ if (typec_is_drp_toggling()) {
+ TYPEC_DBG("[Waring] DRP Toggling\r\n");
+ return 0;
+ }
+
+ if (vbus_level)
+ return typec_handle_vbus_present(tcpc_dev);
+ else
+ return typec_handle_vbus_absent(tcpc_dev);
+}
+
+/*
+ * [BLOCK] Handle PE event
+ */
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+
+int tcpc_typec_advertise_explicit_contract(struct tcpc_device *tcpc_dev)
+{
+ if (tcpc_dev->typec_local_rp_level == TYPEC_CC_RP_DFT)
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_RP_1_5);
+
+ return 0;
+}
+
+int tcpc_typec_handle_pe_pr_swap(struct tcpc_device *tcpc_dev)
+{
+ int ret = 0;
+
+ mutex_lock(&tcpc_dev->typec_lock);
+ switch (tcpc_dev->typec_state) {
+ case typec_attached_snk:
+ TYPEC_NEW_STATE(typec_attached_src);
+ tcpc_dev->typec_attach_old = TYPEC_ATTACHED_SRC;
+ tcpci_set_cc(tcpc_dev, tcpc_dev->typec_local_rp_level);
+ break;
+ case typec_attached_src:
+ TYPEC_NEW_STATE(typec_attached_snk);
+ tcpc_dev->typec_attach_old = TYPEC_ATTACHED_SNK;
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_RD);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&tcpc_dev->typec_lock);
+ return ret;
+}
+
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+/*
+ * [BLOCK] Handle reach vSafe0V event
+ */
+
+int tcpc_typec_handle_vsafe0v(struct tcpc_device *tcpc_dev)
+{
+ if (tcpc_dev->typec_wait_ps_change == TYPEC_WAIT_PS_SRC_VSAFE0V)
+ typec_handle_src_reach_vsafe0v(tcpc_dev);
+
+ return 0;
+}
+
+/*
+ * [BLOCK] TCPCI TypeC I/F
+ */
+
+static const char *const typec_role_name[] = {
+ "UNKNOWN",
+ "SNK",
+ "SRC",
+ "DRP",
+ "TrySRC",
+ "TrySNK",
+};
+
+#ifndef CONFIG_USB_POWER_DELIVERY
+int tcpc_typec_swap_role(struct tcpc_device *tcpc_dev)
+{
+ if (tcpc_dev->typec_role < TYPEC_ROLE_DRP)
+ return -1;
+ TYPEC_INFO("%s\r\n", __func__);
+
+ switch (tcpc_dev->typec_state) {
+ case typec_attached_src:
+#ifdef CONFIG_TYPEC_CAP_TRY_SOURCE
+ typec_trywait_snk_pe_entry(tcpc_dev);
+#else
+ TYPEC_INFO("SRC->SNK (X)\r\n");
+#endif /* CONFIG_TYPEC_CAP_TRY_SOURCR */
+ break;
+ case typec_attached_snk:
+#ifdef CONFIG_TYPEC_CAP_TRY_SINK
+ typec_trywait_src_pe_entry(tcpc_dev);
+#else
+ TYPEC_INFO("SNK->SRC (X)\r\n");
+#endif /* CONFIG_TYPEC_CAP_TRY_SINK */
+ break;
+ }
+ return typec_alert_attach_state_change(tcpc_dev);
+}
+#endif /* ifndef CONFIG_USB_POWER_DELIVERY */
+
+int tcpc_typec_set_rp_level(struct tcpc_device *tcpc_dev, u8 res)
+{
+ switch (res) {
+ case TYPEC_CC_RP_DFT:
+ case TYPEC_CC_RP_1_5:
+ case TYPEC_CC_RP_3_0:
+ TYPEC_INFO("TypeC-Rp: %d\r\n", res);
+ tcpc_dev->typec_local_rp_level = res;
+ break;
+
+ default:
+ TYPEC_INFO("TypeC-Unknown-Rp (%d)\r\n", res);
+ return -1;
+ }
+
+#ifdef CONFIG_USB_PD_DBG_ALWAYS_LOCAL_RP
+ tcpci_set_cc(tcpc_dev, tcpc_dev->typec_local_rp_level);
+#else
+ if ((tcpc_dev->typec_attach_old != TYPEC_UNATTACHED) &&
+ (tcpc_dev->typec_attach_new != TYPEC_UNATTACHED)) {
+ return tcpci_set_cc(tcpc_dev, res);
+ }
+#endif
+
+ return 0;
+}
+
+int tcpc_typec_change_role(
+ struct tcpc_device *tcpc_dev, u8 typec_role)
+{
+ u8 local_cc;
+ bool force_unattach = false;
+
+ if (typec_role == TYPEC_ROLE_UNKNOWN ||
+ typec_role >= TYPEC_ROLE_NR) {
+ TYPEC_INFO("Wrong TypeC-Role: %d\r\n", typec_role);
+ return -1;
+ }
+
+ mutex_lock(&tcpc_dev->access_lock);
+
+ tcpc_dev->typec_role = typec_role;
+ TYPEC_INFO("typec_new_role: %s\r\n", typec_role_name[typec_role]);
+
+ local_cc = tcpc_dev->typec_local_cc & 0x07;
+
+ if (typec_role == TYPEC_ROLE_SNK && local_cc == TYPEC_CC_RP)
+ force_unattach = true;
+
+ if (typec_role == TYPEC_ROLE_SRC && local_cc == TYPEC_CC_RD)
+ force_unattach = true;
+
+ if (tcpc_dev->typec_attach_new == TYPEC_UNATTACHED)
+ force_unattach = true;
+
+ if (force_unattach) {
+ TYPEC_DBG("force_unattach\r\n");
+ typec_disable_low_power_mode(tcpc_dev);
+ typec_unattached_entry(tcpc_dev);
+ }
+
+ mutex_unlock(&tcpc_dev->access_lock);
+ return 0;
+}
+
+#ifdef CONFIG_TYPEC_CAP_POWER_OFF_CHARGE
+static int typec_init_power_off_charge(struct tcpc_device *tcpc_dev)
+{
+ int ret = tcpci_get_cc(tcpc_dev);
+
+ if (ret < 0)
+ return ret;
+
+ if (tcpc_dev->typec_role == TYPEC_ROLE_SRC)
+ return 0;
+
+ if (typec_is_cc_open())
+ return 0;
+
+ if (!tcpci_check_vbus_valid(tcpc_dev))
+ return 0;
+
+ TYPEC_DBG("PowerOffCharge\r\n");
+ TYPEC_DBG("init otg host no mache insert.\r\n");
+
+ gpio_hub_power_on();
+ gpio_hub_typec_power_off();
+ hisi_usb_otg_event(CHARGER_DISCONNECT_EVENT);
+ gpio_hub_switch_to_hub();
+ hisi_usb_otg_event(ID_FALL_EVENT);
+ oldstatus = TYPEC_HOST;
+
+ TYPEC_NEW_STATE(typec_unattached_snk);
+ typec_wait_ps_change(tcpc_dev, TYPEC_WAIT_PS_DISABLE);
+
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_OPEN);
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_RD);
+
+ return 1;
+}
+#endif /* CONFIG_TYPEC_CAP_POWER_OFF_CHARGE */
+
+int tcpc_typec_init(struct tcpc_device *tcpc_dev, u8 typec_role)
+{
+ int ret;
+
+ if (typec_role >= TYPEC_ROLE_NR) {
+ TYPEC_INFO("Wrong TypeC-Role: %d\r\n", typec_role);
+ return -2;
+ }
+
+ TYPEC_INFO("typec_init: %s\r\n", typec_role_name[typec_role]);
+
+ tcpc_dev->typec_role = typec_role;
+ tcpc_dev->typec_attach_new = TYPEC_UNATTACHED;
+ tcpc_dev->typec_attach_old = TYPEC_UNATTACHED;
+
+ tcpc_dev->typec_remote_cc[0] = TYPEC_CC_VOLT_OPEN;
+ tcpc_dev->typec_remote_cc[1] = TYPEC_CC_VOLT_OPEN;
+
+#ifdef CONFIG_TYPEC_CHECK_LEGACY_CABLE
+ tcpc_dev->typec_legacy_cable = false;
+ tcpc_dev->typec_legacy_cable_suspect = 0;
+#endif /* CONFIG_TYPEC_CHECK_LEGACY_CABLE */
+
+#ifdef CONFIG_TYPEC_CAP_POWER_OFF_CHARGE
+ ret = typec_init_power_off_charge(tcpc_dev);
+ if (ret != 0)
+ return ret;
+#endif /* CONFIG_TYPEC_CAP_POWER_OFF_CHARGE */
+
+ if (typec_role >= TYPEC_ROLE_DRP) {
+ tcpci_get_cc(tcpc_dev);
+ if (tcpc_dev->typec_remote_cc[0] == TYPEC_CC_VOLT_OPEN &&
+ tcpc_dev->typec_remote_cc[1] == TYPEC_CC_VOLT_OPEN) {
+ tcpci_set_cc(tcpc_dev, TYPEC_CC_OPEN);
+ mdelay(50);
+ }
+ }
+
+#ifdef CONFIG_TYPEC_POWER_CTRL_INIT
+ tcpc_dev->typec_power_ctrl = true;
+#endif /* CONFIG_TYPEC_POWER_CTRL_INIT */
+
+ typec_unattached_entry(tcpc_dev);
+ return 0;
+}
+
+void tcpc_typec_deinit(struct tcpc_device *tcpc_dev)
+{
+}
diff --git a/drivers/usb/pd/richtek/tcpm.c b/drivers/usb/pd/richtek/tcpm.c
new file mode 100644
index 000000000000..01a17a356a1c
--- /dev/null
+++ b/drivers/usb/pd/richtek/tcpm.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Power Delivery Managert Driver
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hisi/usb/pd/richtek/tcpm.h>
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+#include <linux/hisi/usb/pd/richtek/pd_dpm_core.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_typec.h>
+
+/* Inquire TCPC status */
+
+int tcpm_inquire_remote_cc(struct tcpc_device *tcpc_dev,
+ u8 *cc1, u8 *cc2, bool from_ic)
+{
+ int rv = 0;
+
+ if (from_ic) {
+ rv = tcpci_get_cc(tcpc_dev);
+ if (rv < 0)
+ return rv;
+ }
+
+ *cc1 = tcpc_dev->typec_remote_cc[0];
+ *cc2 = tcpc_dev->typec_remote_cc[1];
+ return 0;
+}
+
+int tcpm_inquire_vbus_level(
+ struct tcpc_device *tcpc_dev, bool from_ic)
+{
+ int rv = 0;
+ u16 power_status = 0;
+
+ if (from_ic) {
+ rv = tcpci_get_power_status(tcpc_dev, &power_status);
+ if (rv < 0)
+ return rv;
+
+ tcpci_vbus_level_init(tcpc_dev, power_status);
+ }
+
+ return tcpc_dev->vbus_level;
+}
+
+bool tcpm_inquire_cc_polarity(
+ struct tcpc_device *tcpc_dev)
+{
+ return tcpc_dev->typec_polarity;
+}
+
+u8 tcpm_inquire_typec_attach_state(
+ struct tcpc_device *tcpc_dev)
+{
+ return tcpc_dev->typec_attach_new;
+}
+
+u8 tcpm_inquire_typec_role(
+ struct tcpc_device *tcpc_dev)
+{
+ return tcpc_dev->typec_role;
+}
+
+u8 tcpm_inquire_typec_local_rp(
+ struct tcpc_device *tcpc_dev)
+{
+ u8 level;
+
+ switch (tcpc_dev->typec_local_rp_level) {
+ case TYPEC_CC_RP_1_5:
+ level = 1;
+ break;
+
+ case TYPEC_CC_RP_3_0:
+ level = 2;
+ break;
+
+ default:
+ case TYPEC_CC_RP_DFT:
+ level = 0;
+ break;
+ }
+
+ return level;
+}
+
+int tcpm_typec_set_rp_level(
+ struct tcpc_device *tcpc_dev, u8 level)
+{
+ u8 res;
+
+ if (level == 2)
+ res = TYPEC_CC_RP_3_0;
+ else if (level == 1)
+ res = TYPEC_CC_RP_1_5;
+ else
+ res = TYPEC_CC_RP_DFT;
+
+ return tcpc_typec_set_rp_level(tcpc_dev, res);
+}
+
+int tcpm_typec_change_role(
+ struct tcpc_device *tcpc_dev, u8 typec_role)
+{
+ return tcpc_typec_change_role(tcpc_dev, typec_role);
+}
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+
+bool tcpm_inquire_pd_connected(
+ struct tcpc_device *tcpc_dev)
+{
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ return pd_port->pd_connected;
+}
+
+bool tcpm_inquire_pd_prev_connected(
+ struct tcpc_device *tcpc_dev)
+{
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ return pd_port->pd_prev_connected;
+}
+
+u8 tcpm_inquire_pd_data_role(
+ struct tcpc_device *tcpc_dev)
+{
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ return pd_port->data_role;
+}
+
+u8 tcpm_inquire_pd_power_role(
+ struct tcpc_device *tcpc_dev)
+{
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ return pd_port->power_role;
+}
+
+u8 tcpm_inquire_pd_vconn_role(
+ struct tcpc_device *tcpc_dev)
+{
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ return pd_port->vconn_source;
+}
+
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+/* Request TCPC to send PD Request */
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+
+int tcpm_power_role_swap(struct tcpc_device *tcpc_dev)
+{
+ bool ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ ret = pd_put_dpm_pd_request_event(pd_port,
+ PD_DPM_PD_REQUEST_PR_SWAP);
+ if (!ret)
+ return TCPM_ERROR_PUT_EVENT;
+
+ return TCPM_SUCCESS;
+}
+EXPORT_SYMBOL(tcpm_power_role_swap);
+
+int tcpm_data_role_swap(struct tcpc_device *tcpc_dev)
+{
+ bool ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ ret = pd_put_dpm_pd_request_event(pd_port,
+ PD_DPM_PD_REQUEST_DR_SWAP);
+ if (!ret)
+ return TCPM_ERROR_PUT_EVENT;
+
+ return TCPM_SUCCESS;
+}
+EXPORT_SYMBOL(tcpm_data_role_swap);
+
+int tcpm_vconn_swap(struct tcpc_device *tcpc_dev)
+{
+ bool ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ ret = pd_put_dpm_pd_request_event(pd_port,
+ PD_DPM_PD_REQUEST_VCONN_SWAP);
+ if (!ret)
+ return TCPM_ERROR_PUT_EVENT;
+
+ return TCPM_SUCCESS;
+}
+EXPORT_SYMBOL(tcpm_vconn_swap);
+
+int tcpm_goto_min(struct tcpc_device *tcpc_dev)
+{
+ bool ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ ret = pd_put_dpm_pd_request_event(pd_port,
+ PD_DPM_PD_REQUEST_GOTOMIN);
+ if (!ret)
+ return TCPM_ERROR_PUT_EVENT;
+
+ return TCPM_SUCCESS;
+}
+EXPORT_SYMBOL(tcpm_goto_min);
+
+int tcpm_soft_reset(struct tcpc_device *tcpc_dev)
+{
+ bool ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ ret = pd_put_dpm_pd_request_event(pd_port,
+ PD_DPM_PD_REQUEST_SOFTRESET);
+ if (!ret)
+ return TCPM_ERROR_PUT_EVENT;
+
+ return TCPM_SUCCESS;
+}
+EXPORT_SYMBOL(tcpm_soft_reset);
+
+int tcpm_hard_reset(struct tcpc_device *tcpc_dev)
+{
+ bool ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ ret = pd_put_dpm_pd_request_event(pd_port,
+ PD_DPM_PD_REQUEST_HARDRESET);
+ if (!ret)
+ return TCPM_ERROR_PUT_EVENT;
+
+ return TCPM_SUCCESS;
+}
+EXPORT_SYMBOL(tcpm_hard_reset);
+
+int tcpm_get_source_cap(
+ struct tcpc_device *tcpc_dev, struct tcpm_power_cap *cap)
+{
+ bool ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ ret = pd_put_dpm_pd_request_event(pd_port,
+ PD_DPM_PD_REQUEST_GET_SOURCE_CAP);
+ if (!ret)
+ return TCPM_ERROR_PUT_EVENT;
+
+ /* TODO: Finish it later */
+
+ return TCPM_SUCCESS;
+}
+EXPORT_SYMBOL(tcpm_get_source_cap);
+
+int tcpm_get_sink_cap(
+ struct tcpc_device *tcpc_dev, struct tcpm_power_cap *cap)
+{
+ bool ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ ret = pd_put_dpm_pd_request_event(pd_port,
+ PD_DPM_PD_REQUEST_GET_SINK_CAP);
+ if (!ret)
+ return TCPM_ERROR_PUT_EVENT;
+
+ /* TODO: Finish it later */
+
+ return TCPM_SUCCESS;
+}
+EXPORT_SYMBOL(tcpm_get_sink_cap);
+
+int tcpm_bist_cm2(struct tcpc_device *tcpc_dev)
+{
+ bool ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ ret = pd_put_dpm_pd_request_event(pd_port,
+ PD_DPM_PD_REQUEST_BIST_CM2);
+ if (!ret)
+ return TCPM_ERROR_PUT_EVENT;
+
+ /* TODO: Finish it later */
+
+ return TCPM_SUCCESS;
+}
+EXPORT_SYMBOL(tcpm_bist_cm2);
+
+int tcpm_request(struct tcpc_device *tcpc_dev, int mv, int ma)
+{
+ bool ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ mutex_lock(&pd_port->pd_lock);
+ ret = pd_dpm_send_request(pd_port, mv, ma);
+ mutex_unlock(&pd_port->pd_lock);
+
+ if (!ret)
+ return TCPM_ERROR_PUT_EVENT;
+
+ return TCPM_SUCCESS;
+}
+EXPORT_SYMBOL(tcpm_request);
+
+int tcpm_error_recovery(struct tcpc_device *tcpc_dev)
+{
+ bool ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ ret = pd_put_dpm_event(pd_port, PD_DPM_ERROR_RECOVERY);
+ if (!ret)
+ return TCPM_ERROR_PUT_EVENT;
+
+ return TCPM_SUCCESS;
+}
+
+int tcpm_discover_cable(struct tcpc_device *tcpc_dev, u32 *vdos)
+{
+ bool ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ mutex_lock(&pd_port->pd_lock);
+ pd_port->dpm_flags |= DPM_FLAGS_CHECK_CABLE_ID;
+ ret = vdm_put_dpm_discover_cable_event(pd_port);
+ mutex_unlock(&pd_port->pd_lock);
+
+ if (!ret)
+ return TCPM_ERROR_PUT_EVENT;
+
+ return TCPM_SUCCESS;
+}
+
+int tcpm_vdm_request_id(struct tcpc_device *tcpc_dev,
+ u8 *cnt, u8 *payload)
+{
+ bool ret;
+ pd_port_t *pd_port = &tcpc_dev->pd_port;
+
+ mutex_lock(&pd_port->pd_lock);
+ ret = vdm_put_dpm_vdm_request_event(
+ pd_port, PD_DPM_VDM_REQUEST_DISCOVER_ID);
+ mutex_unlock(&pd_port->pd_lock);
+
+ if (!ret)
+ return TCPM_ERROR_PUT_EVENT;
+
+ return TCPM_SUCCESS;
+}
+
+int tcpm_notify_vbus_stable(
+ struct tcpc_device *tcpc_dev)
+{
+#if CONFIG_USB_PD_VBUS_STABLE_TOUT
+ tcpc_disable_timer(tcpc_dev, PD_TIMER_VBUS_STABLE);
+#endif
+
+ pd_put_vbus_stable_event(tcpc_dev);
+ return TCPM_SUCCESS;
+}
+EXPORT_SYMBOL(tcpm_notify_vbus_stable);
+
+#endif /* CONFIG_USB_POWER_DELIVERY */
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 3c20af999893..1e31cf0550b5 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -18,6 +18,7 @@ source "drivers/gpu/vga/Kconfig"
source "drivers/gpu/host1x/Kconfig"
source "drivers/gpu/ipu-v3/Kconfig"
+source "drivers/gpu/arm/Kconfig"
source "drivers/gpu/drm/Kconfig"
menu "Frame buffer Devices"
diff --git a/include/dsm/dsm_pub.h b/include/dsm/dsm_pub.h
new file mode 100644
index 000000000000..9a04eae4900f
--- /dev/null
+++ b/include/dsm/dsm_pub.h
@@ -0,0 +1,561 @@
+
+#ifndef _DSM_PUB_H
+#define _DSM_PUB_H
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+
+#define CLIENT_NAME_LEN (32) /* max client name length */
+#define DSM_MAX_DEVICE_NAME_LEN (32) /* max device name length */
+#define DSM_MAX_MODULE_NAME_LEN (4) /* max module name length */
+#define DSM_MAX_IC_NAME_LEN (4) /* max ic name length */
+
+/*dsm error no define*/
+#define DSM_ERR_NO_ERROR (0)
+#define DSM_ERR_I2C_TIMEOUT (1)
+
+/* pmu irq */
+#define DSM_PMU_IRQ_ERROR_NO (920005000)
+#define DSM_PMU_VSYS_OV_D200UR_ERROR_NO (DSM_PMU_IRQ_ERROR_NO + 2)
+#define DSM_PMU_VSYS_UV_D10MR_ERROR_NO (DSM_PMU_IRQ_ERROR_NO + 3)
+#define DSM_PMU_VSYS_PWROFF_ABS_2D_ERROR_NO (DSM_PMU_IRQ_ERROR_NO + 4)
+#define DSM_PMU_VSYS_PWROFF_DEB_D80MR_ERROR_NO (DSM_PMU_IRQ_ERROR_NO + 5)
+#define DSM_PMU_VSYS_THSD_OTMP140_D1MR_ERROR_NO (DSM_PMU_IRQ_ERROR_NO + 6)
+#define DSM_PMU_VSYS_THSD_OTMP125_D1MR_ERROR_NO (DSM_PMU_IRQ_ERROR_NO + 7)
+
+/* pmu ocp */
+#define DSM_PMU_OCP_ERROR_NO_BASE (920007000)
+#define DSM_PMU_OCPLDO2_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 0)
+#define DSM_PMU_OCPLDO1_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 1)
+#define DSM_PMU_OCPLDO0_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 2)
+#define DSM_PMU_OCPBUCK4_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 3)
+#define DSM_PMU_OCPBUCK3_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 4)
+#define DSM_PMU_OCPBUCK2_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 5)
+#define DSM_PMU_OCPBUCK1_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 6)
+#define DSM_PMU_OCPBUCK0_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 7)
+#define DSM_PMU_OCPLDO11_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 8)
+#define DSM_PMU_OCPLDO10_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 9)
+#define DSM_PMU_OCPLDO9_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 10)
+#define DSM_PMU_OCPLDO8_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 11)
+#define DSM_PMU_OCPLDO7_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 12)
+#define DSM_PMU_OCPLDO5_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 13)
+#define DSM_PMU_OCPLDO4_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 14)
+#define DSM_PMU_OCPLDO3_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 15)
+#define DSM_PMU_OCPLDO20_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 16)
+#define DSM_PMU_OCPLDO19_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 17)
+#define DSM_PMU_OCPLDO17_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 18)
+#define DSM_PMU_OCPLDO16_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 19)
+#define DSM_PMU_OCPLDO15_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 20)
+#define DSM_PMU_OCPLDO14_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 21)
+#define DSM_PMU_OCPLDO13_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 22)
+#define DSM_PMU_OCPLDO12_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 23)
+#define DSM_PMU_OCPLDO28_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 24)
+#define DSM_PMU_OCPLDO27_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 25)
+#define DSM_PMU_OCPLDO26_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 26)
+#define DSM_PMU_OCPLDO25_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 27)
+#define DSM_PMU_OCPLDO24_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 28)
+#define DSM_PMU_OCPLDO23_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 29)
+#define DSM_PMU_OCPLDO22_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 30)
+#define DSM_PMU_OCPLDO21_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 31)
+#define DSM_PMU_CLASSD_OCP_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 32)
+#define DSM_PMU_OCPLDO32_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 36)
+#define DSM_PMU_OCPLDO31_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 37)
+#define DSM_PMU_OCPLDO30_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 38)
+#define DSM_PMU_OCPLDO29_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 39)
+#define DSM_PMU_BUCK4_SCP_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 40)
+#define DSM_PMU_BUCK3_SCP_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 41)
+#define DSM_PMU_BUCK2_SCP_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 42)
+#define DSM_PMU_BUCK1_SCP_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 43)
+#define DSM_PMU_BUCK0_SCP_ERROR_NO (DSM_PMU_OCP_ERROR_NO_BASE + 44)
+
+#define DSM_CHARGER_ERROR_NO (920011000)
+
+/* pmu coul */
+#define DSM_PMU_COUL_ERROR_NO (920006000)
+
+/* battery detect */
+#define DSM_BATTERY_DETECT_ERROR_NO (920002000)
+
+/* battery */
+#define DSM_BATTERY_ERROR_NO_BASE (920001000)
+#define ERROR_BATT_NOT_EXIST (DSM_BATTERY_ERROR_NO_BASE + 3)
+#define ERROR_BATT_TEMP_LOW (DSM_BATTERY_ERROR_NO_BASE + 4)
+#define ERROR_BATT_VOLT_HIGH (DSM_BATTERY_ERROR_NO_BASE + 5)
+#define ERROR_BATT_VOLT_LOW (DSM_BATTERY_ERROR_NO_BASE + 6)
+#define ERROR_BATT_TERMINATE_TOO_EARLY (DSM_BATTERY_ERROR_NO_BASE + 7)
+#define ERROR_BATT_NOT_TERMINATE (DSM_BATTERY_ERROR_NO_BASE + 8)
+#define ERROR_BATT_BAD_CURR_SENSOR (DSM_BATTERY_ERROR_NO_BASE + 9)
+#define ERROR_VBUS_VOLT_HIGH (DSM_BATTERY_ERROR_NO_BASE + 10)
+#define ERROR_WATCHDOG_RESET (DSM_BATTERY_ERROR_NO_BASE + 11)
+#define ERROR_CHARGE_CURR_OVERHIGH (DSM_BATTERY_ERROR_NO_BASE + 12)
+#define ERROR_DISCHARGE_CURR_OVERHIGH (DSM_BATTERY_ERROR_NO_BASE + 13)
+#define ERROR_CHARGE_CURR_ZERO (DSM_BATTERY_ERROR_NO_BASE + 14)
+#define ERROR_CHARGE_TEMP_FAULT (DSM_BATTERY_ERROR_NO_BASE + 15)
+#define ERROR_CHARGE_BATT_TEMP_SHUTDOWN (DSM_BATTERY_ERROR_NO_BASE + 16)
+#define ERROR_CHARGE_BATT_CAPACITY (DSM_BATTERY_ERROR_NO_BASE + 17)
+#define ERROR_CHARGE_CHARGER_TS (DSM_BATTERY_ERROR_NO_BASE + 18)
+#define ERROR_CHARGE_OTG (DSM_BATTERY_ERROR_NO_BASE + 19)
+#define ERROR_FCP_VOL_OVER_HIGH (DSM_BATTERY_ERROR_NO_BASE + 21)
+#define ERROR_FCP_DETECT (DSM_BATTERY_ERROR_NO_BASE + 22)
+#define ERROR_FCP_OUTPUT (DSM_BATTERY_ERROR_NO_BASE + 23)
+#define ERROR_SWITCH_ATTACH (DSM_BATTERY_ERROR_NO_BASE + 24)
+#define ERROR_ADAPTER_OVLT (DSM_BATTERY_ERROR_NO_BASE + 25)
+#define ERROR_ADAPTER_OCCURRENT (DSM_BATTERY_ERROR_NO_BASE + 26)
+#define ERROR_ADAPTER_OTEMP (DSM_BATTERY_ERROR_NO_BASE + 27)
+#define ERROR_SAFE_PLOICY_LEARN (DSM_BATTERY_ERROR_NO_BASE + 28)
+#define ERROR_SAFE_PLOICY_LEARN1 (DSM_BATTERY_ERROR_NO_BASE + 29)
+#define ERROR_SAFE_PLOICY_LEARN2 (DSM_BATTERY_ERROR_NO_BASE + 31)
+#define ERROR_SAFE_PLOICY_LEARN3 (DSM_BATTERY_ERROR_NO_BASE + 32)
+#define ERROR_BOOST_OCP (DSM_BATTERY_ERROR_NO_BASE + 33)
+#define ERROR_CHARGE_I2C_RW (DSM_BATTERY_ERROR_NO_BASE + 34)
+#define ERROR_BATT_SOC_CHANGE_FAST (DSM_BATTERY_ERROR_NO_BASE + 35)
+#define ERROR_CHARGE_VBAT_OVP (DSM_BATTERY_ERROR_NO_BASE + 36)
+/*internal short current detect*/
+#define DSM_BATTERY_ISCD_LEVEL0 (DSM_BATTERY_ERROR_NO_BASE + 48)
+#define DSM_BATTERY_ISCD_LEVEL1 (DSM_BATTERY_ERROR_NO_BASE + 49)
+#define DSM_BATTERY_ISCD_LEVEL2 (DSM_BATTERY_ERROR_NO_BASE + 50)
+#define DSM_BATTERY_ISCD_LEVEL3 (DSM_BATTERY_ERROR_NO_BASE + 51)
+#define DSM_BATTERY_ISCD_LEVEL4 (DSM_BATTERY_ERROR_NO_BASE + 52)
+#define DSM_BATTERY_ISCD_LEVEL5 (DSM_BATTERY_ERROR_NO_BASE + 53)
+#define DSM_BATTERY_ISCD_LEVEL6 (DSM_BATTERY_ERROR_NO_BASE + 54)
+#define DSM_BATTERY_ISCD_LEVEL7 (DSM_BATTERY_ERROR_NO_BASE + 55)
+#define DSM_BATTERY_ISCD_LEVEL8 (DSM_BATTERY_ERROR_NO_BASE + 56)
+
+#define DSM_DIRECT_CHARGE_VOL_ACCURACY (DSM_BATTERY_ERROR_NO_BASE + 60)
+#define DSM_DIRECT_CHARGE_FULL_PATH_RESISTANCE (DSM_BATTERY_ERROR_NO_BASE + 61)
+#define DSM_DIRECT_CHARGE_LOADSWITCH_RESISTANCE (DSM_BATTERY_ERROR_NO_BASE + 62)
+#define DSM_DIRECT_CHARGE_USB_PORT_LEAKAGE_CURRENT (DSM_BATTERY_ERROR_NO_BASE + 63)
+#define DSM_DIRECT_CHARGE_VBUS_OVP (DSM_BATTERY_ERROR_NO_BASE + 64)
+#define DSM_DIRECT_CHARGE_REVERSE_OCP (DSM_BATTERY_ERROR_NO_BASE + 65)
+#define DSM_DIRECT_CHARGE_OTP (DSM_BATTERY_ERROR_NO_BASE + 66)
+#define DSM_DIRECT_CHARGE_INPUT_OCP (DSM_BATTERY_ERROR_NO_BASE + 67)
+#define DSM_DIRECT_CHARGE_ADAPTER_OTP (DSM_BATTERY_ERROR_NO_BASE + 68)
+
+/* SMPL*/
+#define ERROR_NO_SMPL (920003000)
+
+/* uscp */
+#define ERROR_NO_USB_SHORT_PROTECT (920004000)
+#define ERROR_NO_USB_SHORT_PROTECT_NTC (920004001)
+
+/* pd_richtek */
+#define PD_RICHTEK_ERROR_NO_BASE (926010000)
+#define ERROR_RT_DPSTS_BOTH_CONNECTED (PD_RICHTEK_ERROR_NO_BASE + 0)
+#define ERROR_RT_UFP_INVALID (PD_RICHTEK_ERROR_NO_BASE + 1)
+#define ERROR_RT_DFP_INVALID (PD_RICHTEK_ERROR_NO_BASE + 2)
+#define ERROR_RT_OVER_VDO_MAX_SIZE (PD_RICHTEK_ERROR_NO_BASE + 3)
+#define ERROR_RT_PD_MSG_NULL (PD_RICHTEK_ERROR_NO_BASE + 4)
+#define ERROR_RT_DATA_OBJ_NULL (PD_RICHTEK_ERROR_NO_BASE + 5)
+#define ERROR_RT_OVER_VDO_MAX_SVID_SIZE (PD_RICHTEK_ERROR_NO_BASE + 6)
+#define ERROR_RT_PD_NR_PE_STATES (PD_RICHTEK_ERROR_NO_BASE + 7)
+#define ERROR_RT_PD_TIMER_NR (PD_RICHTEK_ERROR_NO_BASE + 8)
+#define ERROR_RT_PD_ALLOC_MSG (PD_RICHTEK_ERROR_NO_BASE + 9)
+#define ERROR_RT_PD_FREE_MSG (PD_RICHTEK_ERROR_NO_BASE + 10)
+#define ERROR_RT_TCPC_DEV_NULL (PD_RICHTEK_ERROR_NO_BASE + 11)
+#define ERROR_RT_DATA_ROLE_ISNOT_PD_ROLE_DFP (PD_RICHTEK_ERROR_NO_BASE + 12)
+#define ERROR_RT_SVID_DATA_NULL (PD_RICHTEK_ERROR_NO_BASE + 13)
+
+/* tp */
+#define DSM_TP_I2C_RW_ERROR_NO (926004000)
+#define DSM_TP_FWUPDATE_ERROR_NO (926004001)
+#define DSM_TP_RAWDATA_ERROR_NO (20002)
+#define DSM_TP_FW_CRC_ERROR_NO (926004003)
+#define DSM_TP_DEV_STATUS_ERROR_NO (926004004)
+#define DSM_TP_CHANGE_REPORT_ERROR_NO (926004006)
+#define DSM_TP_GLOVE_ON_COUNT_ERROR_NO (926004007)
+#define DSM_TP_ATMEL_IN_MOISTURE_ERROR_NO (926004008)
+#define DSM_TP_ATMEL_IN_VNOISE_ERROR_NO (926004009)
+#define DSM_TP_CALIBRATION_CRC_ERROR_NO (926004010)
+#define DSM_TP_FW_CRASH_ERROR_NO (926004011)
+#define DSM_TP_HID_TIMEOUT_ERROR_NO (926004021)
+#define DSM_TP_STARTUP_FAIDED_ERROR_NO (926004022)
+#define DSM_TP_GET_LCD_PANEL_NAME_ERROR_NO (926004023)
+#define DSM_TP_NO_IRQ_ERROR_NO (20002)
+#define DSM_TP_GHOST_TOUCH_ERROR_NO (926004024)
+
+/* lcd */
+#define DSM_LCD_LDI_UNDERFLOW_NO (922001000)
+#define DSM_LCD_LDI_UNDERFLOW_ERROR_NO (922001000)
+#define DSM_LCD_TE_TIME_OUT_ERROR_NO (922001001)
+#define DSM_LCD_STATUS_ERROR_NO (922001002)
+#define DSM_LCD_POWER_STATUS_ERROR_NO (922001003)
+#define DSM_LCD_PWM_ERROR_NO (922001004)
+#define DSM_LCD_BRIGHTNESS_ERROR_NO (922001005)
+#define DSM_LCD_ESD_RECOVERY_NO (922001006)
+#define DSM_LCD_ESD_OCP_RECOVERY_NO (922001007)
+#define DSM_LCD_OVP_ERROR_NO (922001008)
+#define DSM_LCD_OVP_COMFORT_MODE_NO (922001009)
+#define DSM_LCD_FB0_CLOSE_ERROR_NO (922001032)
+
+#define DSM_SOC_HIFI_RESET (921001000)
+#define DSM_CODEC_HIFI_RESET (921001001)
+#define DSM_CODEC_HIFI_IF_OPEN_ERR (921001002)
+#define DSM_SOC_HIFI_HIGH_CPU (921001003)
+
+#define DSM_HI6402_PLL_PD (921001004)
+#define DSM_HI6402_PLL_UNLOCK (921001005)
+#define DSM_HI6402_PLL_CANNOT_LOCK (921001006)
+#define DSM_HI6402_SLIMBUS_READ_ERR (921001007)
+#define DSM_HI6402_SLIMBUS_LOST_MS (921001008)
+#define DSM_HI6402_MBHC_HS_ERR_TYPE (921001009)
+#define DSM_SOC_HIFI_UPDATE_BUFF_DELAY (921001024)
+
+/* anc_hs */
+#define ANC_HS_OPEN_BOOST_ERR (921002000)
+#define ANC_HS_CANCEL_WORK_BLOCK (921002001)
+#define ANC_HS_CLOSE_BOOST_ERR (921002002)
+#define ANC_HS_ADCH_READ_ERR (921002003)
+#define ANC_HS_ADC_FULL_ERR (921002004)
+#define ANC_HS_MIC_WITH_GPIO_ERR (921002005)
+#define ANC_HS_QUEUE_WORK_ERR (921002006)
+#define ANC_HS_BTN_NOT_IN_RANGE (921002007)
+
+/*smartpa*/
+#define DSM_SMARTPA_I2C_ERR (921003000)
+
+/* sdio */
+#define DSM_SDIO_RW_ERROR_NO (925002000)
+#define DSM_SDIO_DCM_INIT_ERROR_NO (925002001)
+#define DSM_SDIO_HARDWARE_LOCK_NO (925002002)
+#define DSM_SDIO_RW_TIMEOUT_NO (925002003)
+#define DSM_SDIO_ATTACH_ERR_NO (925002004)
+#define DSM_SDIO_CDM52_INVELADE_ARGUMENT_ERR_NO (925002005)
+#define DSM_SDIO_CDM52_R5_ERR_NO (925002006)
+#define DSM_SDIO_CDM52_R5_FUNCTION_NUMBER_ERR_NO (925002007)
+#define DSM_SDIO_CDM52_R5_OUT_OF_RANGE_ERR_NO (925002008)
+#define DSM_SDIO_CDM52__WAIT_FOR_CMD_ERR_NO (925002009)
+#define DSM_SDIO_CMD53_INVELADE_ARGUMENT_ERR_NO (925002010)
+#define DSM_SDIO_CMD53_ALLOC_TABLE_ERR_NO (925002011)
+#define DSM_SDIO_CMD53_CMD_ERR_NO (925002012)
+#define DSM_SDIO_CMD53_DATA_ERR_NO (925002013)
+#define DSM_SDIO_CMD53_R5_ERR_NO (925002014)
+#define DSM_SDIO_CMD53_R5_FUNCTION_NUMBER_ERR_NO (925002015)
+#define DSM_SDIO_CMD53_R5_OUT_OF_RANGE_ERR_NO (925002016)
+#define DSM_SIDO_UNKOWN_ERR_NO (925002017)
+
+/* sensors */
+#define DSM_SHB_ERR_IOM7_CFG_DATA (926005000)
+#define DSM_SHB_ERR_IOM7_CMD (926005001)
+#define DSM_SHB_ERR_IOM7_DYNLOAD (926005002)
+#define DSM_SHB_ERR_IOM7_I2C_DBG (926005003)
+#define DSM_SHB_ERR_IOM7_IPC_TIMEOUT (926005004)
+#define DSM_SHB_ERR_IOM7_OTHER (926005005)
+#define DSM_SHB_ERR_IOM7_READ (926005006)
+#define DSM_SHB_ERR_IOM7_STEP (926005007)
+#define DSM_SHB_ERR_IOM7_WDG (926005008)
+#define DSM_SHB_ERR_IOM7_WRITE (926005009)
+
+#define DSM_SHB_ERR_MCU_ACCEL (926006000)
+#define DSM_SHB_ERR_MCU_AIRPRESS (926006001)
+#define DSM_SHB_ERR_MCU_ALS (926006002)
+#define DSM_SHB_ERR_MCU_FUSION (926006003)
+#define DSM_SHB_ERR_MCU_GYRO (926006004)
+#define DSM_SHB_ERR_MCU_I2C_ERR (926006005)
+#define DSM_SHB_ERR_MCU_LABC (926006006)
+#define DSM_SHB_ERR_MCU_LIGHT (926006007)
+#define DSM_SHB_ERR_MCU_MAG (926006008)
+#define DSM_SHB_ERR_MCU_MOTION (926006009)
+#define DSM_SHB_ERR_MCU_OTHER (926006010)
+#define DSM_SHB_ERR_MCU_PDR (926006011)
+#define DSM_SHB_ERR_MCU_PEDOMETER (926006012)
+#define DSM_SHB_ERR_MCU_PS (926006013)
+#define DSM_SHB_ERR_MCU_SYS_MAIN_INIT (926006100)
+#define DSM_SHB_ERR_MCU_SYS_SERVER_INIT (926006101)
+#define DSM_SHB_ERR_MCU_TIMER_TIMEOUT (926006102)
+#define DSM_SHB_ERR_MCU_ACC_PERIOD_ERROR (926006103)
+#define DSM_SHB_ERR_MCU_CA (926006104)
+#define DSM_SHB_ERR_SYSCOUNT (926006105)
+
+#define DSM_SHB_ERR_GSENSOR_I2C_ERR (926007000)
+#define DSM_SHB_ERR_GSENSOR_DATA_ABNORMAL (926007001)
+#define DSM_SHB_ERR_GSENSOR_DATA_ALL_ZERO (926007002)
+#define DSM_SHB_ERR_GSENSOR_DATA_NOT_UPDATE (926007003)
+#define DSM_SHB_ERR_LIGHT_I2C_ERR (926007004)
+#define DSM_SHB_ERR_LIGHT_IRQ_ERR (926007005)
+#define DSM_SHB_ERR_LIGHT_THRESHOLD_ERR (926007006)
+#define DSM_SHB_ERR_LIGHT_NO_IRQ (926007007)
+#define DSM_SHB_ERR_LIGHT_ENABLE_ERR (926007008)
+#define DSM_SHB_ERR_LIGHT_THRESHOLD_OUT (926007009)
+#define DSM_SHB_ERR_LIGHT_POWER_ERR (926007010)
+#define DSM_SHB_ERR_MAG_I2C_READ (926007011)
+#define DSM_SHB_ERR_MAG_DATA_ABNORAML (926007012)
+#define DSM_SHB_ERR_MAG_DATA_NOT_UPDATE (926007013)
+#define DSM_SHB_ERR_MAG_SELFTEST_ERR (926007014)
+#define DSM_SHB_ERR_SENSORSERVICE_EXIT_ERR (926007015)
+#define DSM_SHB_ERR_SENSORSERVICE_DATA_UPDATE (926007016)
+
+/* key */
+#define DSM_KEY_ERROR_NO (926003000)
+#define DSM_POWER_KEY_ERROR_NO (926003001)
+
+/* hall */
+#define DSM_HALL_ERROR_NO (926002000)
+
+/* camera flash */
+#define DSM_FLASH_I2C_ERROR_NO (927002000)
+#define DSM_FLASH_OPEN_SHOTR_ERROR_NO (927002001)
+#define DSM_FLASH_HOT_DIE_ERROR_NO (927002002)
+
+/* camera hal flash*/
+#define DSM_DALLAS_FLASH_SCHARGER_ERROR_NO (927003018)
+
+/* ext isp */
+#define DSM_EXTISP_LOAD_FW_ERROR_NO (927007000)
+#define DSM_EXTISP_PQ_ERROR_NO (927007001)
+#define DSM_EXTISP_I2C_ERROR_NO (927007002)
+#define DSM_EXTISP_USPI_ERROR_NO (927007003)
+#define DSM_EXTISP_VCM_ERROR_NO (927007004)
+#define DSM_EXTISP_INTERRUPT_ERROR_NO (927007005)
+#define DSM_EXTISP_COMMONFUNC_ERROR_NO (927007006)
+#define DSM_EXTISP_A3_ERROR_NO (927007007)
+#define DSM_EXTISP_PIPE_ERROR_NO (927007008)
+#define DSM_EXTISP_RDN_ERROR_NO (927007009)
+#define DSM_EXTISP_TXLM_ERROR_NO (927007010)
+#define DSM_EXTISP_MTX_ERROR_NO (927007011)
+#define DSM_EXTISP_MRX_ERROR_NO (927007012)
+#define DSM_EXTISP_FEC0_ERROR_NO (927007013)
+#define DSM_EXTISP_SENSOR_ERROR_NO (927007014)
+#define DSM_EXTISP_FEC1_ERROR_NO (927007015)
+#define DSM_EXTISP_CAP_ERROR_NO (927007016)
+#define DSM_EXTISP_OTHER_ERROR_NO (927007017)
+
+/* camera csi */
+#define DSM_CSI_MIPI0_ERROR_NO (927001000)
+#define DSM_CSI_MIPI1_ERROR_NO (927001001)
+
+/* camera pmic */
+#define DSM_CAMPMIC_I2C_ERROR_NO (927010000)
+#define DSM_CAMPMIC_ENABLE_ERROR_NO (927010001)
+#define DSM_CAMPMIC_OVER_CURRENT_ERROR_NO (927010002)
+#define DSM_CAMPMIC_TSD_ERROR_NO (927010003)
+#define DSM_CAMPMIC_UNDER_VOLTAGE_ERROR_NO (927010004)
+
+/* ov isp */
+#define DSM_ISP22_MCU_NO_RSP_ERROR_NO (927004000)
+#define DSM_ISP22_CMD_SET_ERROR_NO (927004001)
+#define DSM_ISP22_SENSOR_PRIMARY_ERROR_NO (927004002)
+#define DSM_ISP22_SENSOR_SECONDARY_ERROR_NO (927004003)
+#define DSM_ISP22_FLASH_ERROR_NO (927004004)
+
+/* ivp */
+#define DSM_IVP_SMMU_ERROR_NO (927005000)
+#define DSM_IVP_WATCH_ERROR_NO (927005001)
+#define DSM_IVP_DWAXI_ERROR_NO (927005002)
+#define DSM_IVP_OPEN_ERROR_NO (927005003)
+#define DSM_IVP_MESSAGE_ERROR_NO (927005004)
+#define DSM_IVP_PANIC_ERROR_NO (927005005)
+
+/* wifi */
+#define DSM_WIFI_MODULE_INIT_ERROR (909030001)
+#define DSM_WIFI_KSO_ERROR (909030002)
+#define DSM_WIFI_CMD52_ERROR (909030003)
+#define DSM_WIFI_CMD53_ERROR (909030004)
+/*wifi open*/
+#define DSM_WIFI_SDIO_RESET_COMM_ERROR (909030005)
+#define DSM_WIFI_SDIO_PROBE_ATTACH_ERROR (909030006)
+#define DSM_WIFI_SDIO_FIRMWARE_DL_ERROR (909030007)
+#define DSM_WIFI_DHD_DEV_INIT_IOCTL_ERROR (909030008)
+/*wifi scan*/
+#define DSM_WIFI_WLC_SET_PASSIVE_SCAN_ERROR (909030009)
+#define DSM_WIFI_WLC_SCAN_ERROR (909030010)
+#define DSM_WIFI_WLC_SET_SCANSUPPRESS_ERROR (909030011)
+#define DSM_WIFI_WLC_GET_CHANNEL_ERROR (909030012)
+#define DSM_WIFI_WLC_SCAN_RESULTS_ERROR (909030013)
+/*wifi connect*/
+#define DSM_WIFI_WLC_DISASSOC_ERROR (909030014)
+#define DSM_WIFI_WLC_SET_SSID_ERROR (909030015)
+#define DSM_WIFI_SET_CIPHER_ERROR (909030016)
+#define DSM_WIFI_SET_KEY_MGMT_ERROR (909030017)
+#define DSM_WIFI_SET_SHAREDKEY_ERROR (909030018)
+#define DSM_WIFI_OPEN_ERROR (909030019)
+
+#define DSM_WIFI_RESERVED (20701)
+
+/* recovery */
+#define DSM_RECOVERY_ERROR_NO (924001000)
+
+/* selinux */
+#define DSM_SELINUX_ERROR_NO (924002000)
+
+/* modem spi */
+#define DSM_SPI_WRITE_ACK_ERROR_NO (926009000)
+#define DSM_SPI_READ_ACK_ERROR_NO (DSM_SPI_WRITE_ACK_ERROR_NO+1)
+#define DSM_SPI_80_ACK_ERROR_NO (DSM_SPI_WRITE_ACK_ERROR_NO+2)
+#define DSM_SPI_SYNC_ERROR_NO (DSM_SPI_WRITE_ACK_ERROR_NO + 3)
+#define DSM_SPI_DOWN_ERROR_NO (DSM_SPI_WRITE_ACK_ERROR_NO + 4)
+#define DSM_SPI_PORT_BUSY_NO (DSM_SPI_WRITE_ACK_ERROR_NO + 5)
+#define DSM_SPI_DATA_READ_ERROR_NO (DSM_SPI_WRITE_ACK_ERROR_NO + 6)
+#define DSM_SPI_DATA_DROP_NO (DSM_SPI_WRITE_ACK_ERROR_NO + 7)
+#define DSM_SPI_AUTO_READY_ERROR_NO (DSM_SPI_WRITE_ACK_ERROR_NO + 8)
+#define DSM_SPI_KZALLOC_FAILED_NO (DSM_SPI_WRITE_ACK_ERROR_NO + 9)
+#define DSM_SPI_ALLOC_SKB_FAILED_NO (DSM_SPI_WRITE_ACK_ERROR_NO + 10)
+#define DSM_SPI_DOWN_RETRY_MAX_NO (DSM_SPI_WRITE_ACK_ERROR_NO + 11)
+#define DSM_SPI_SYNC_ERR_MAX_NO (DSM_SPI_WRITE_ACK_ERROR_NO + 12)
+#define DSM_APCP_SDIO_WRITE_ACK_ERROR_NO (DSM_SPI_WRITE_ACK_ERROR_NO + 13)
+
+/* spi */
+#define DSM_SPI_ERROR_NO (925001000) /* spi_sync timeout error */
+#define DSM_SPI_INVALID_ARGUMENT_ERR_NO (925001001)
+#define DSM_SPI_SHUTDOWN_ERR_NO (925001002)
+#define DSM_SPI_UNKOWN_ERR_NO (925001003)
+#define DSM_SPI_SG_ALLOC_RX_PAGE_INVILAD_LEN (925001004)
+#define DSM_SPI_SG_ALLOC_TX_PAGE_INVILAD_LEN (925001005)
+#define DSM_SPI_RX_FIFO_NOT_EMPTY (925001006)
+#define DSM_SPI_SPI_IS_BUSY (925001007)
+
+/* fs & ext4 */
+#define DSM_FS_EXT4_ERROR (928003000)
+#define DSM_FS_EXT4_ERROR_INODE (928003001)
+#define DSM_FS_EXT4_ERROR_FILE (928003002)
+#define DSM_FS_EXT4_ERROR_READ_SUPER (928003003)
+#define DSM_FS_EXT4_ERROR_READ_SUPER_SECOND (928003004)
+#define DSM_FS_EXT4_ERROR_WRITE_SUPER (928003005)
+
+/* f2fs */
+#define DSM_F2FS_ERROR_MSG (928005000)
+#define DSM_F2FS_SUPER_NEED_FSCK (928005001)
+#define DSM_F2FS_BUGCHK_NEED_FSCK (928005002)
+
+#define DSM_UART_OPEN_HWINIT_ERROR (22100)
+#define DSM_UART_OPEN_IRQ_REQUSET_FAILED (DSM_UART_OPEN_HWINIT_ERROR+1)
+#define DSM_UART_TX_DMA_MAP_FAILED (DSM_UART_OPEN_HWINIT_ERROR+2)
+#define DSM_UART_TX_DMA_BUSY_ERROR (DSM_UART_OPEN_HWINIT_ERROR+3)
+#define DSM_UART_RX_DMA_READ_BUFFER_FULL_ERROR (DSM_UART_OPEN_HWINIT_ERROR+4)
+#define DSM_UART_CONTROLLER_FIFO_OVERRUN_ERROR (DSM_UART_OPEN_HWINIT_ERROR+5)
+#define DSM_UART_CONTROLLER_LINE_BREAK_ERROR (DSM_UART_OPEN_HWINIT_ERROR+6)
+#define DSM_UART_CONTROLLER_PARITY_ERROR (DSM_UART_OPEN_HWINIT_ERROR+7)
+#define DSM_UART_CONTROLLER_FRAME_ERROR (DSM_UART_OPEN_HWINIT_ERROR+8)
+
+/* hwtc */
+#define DSM_HWTC_ERROR_NO (927008000)
+
+/* stat mm */
+#define DSM_MM_STAT (924003000)
+
+/* nfc */
+#define DSM_NFC_I2C_WRITE_ERROR_NO (923002000)
+#define DSM_NFC_I2C_READ_ERROR_NO (923002001)
+#define DSM_NFC_CLK_ENABLE_ERROR_NO (923002002)
+#define DSM_NFC_I2C_WRITE_EOPNOTSUPP_ERROR_NO (923002003)
+#define DSM_NFC_I2C_READ_EOPNOTSUPP_ERROR_NO (923002004)
+#define DSM_NFC_I2C_WRITE_EREMOTEIO_ERROR_NO (923002005)
+#define DSM_NFC_I2C_READ_EREMOTEIO_ERROR_NO (923002006)
+#define DSM_NFC_RD_I2C_WRITE_ERROR_NO (923002007)
+#define DSM_NFC_RD_I2C_READ_ERROR_NO (923002008)
+#define DSM_NFC_RD_I2C_WRITE_EOPNOTSUPP_ERROR_NO (923002009)
+#define DSM_NFC_RD_I2C_READ_EOPNOTSUPP_ERROR_NO (923002010)
+#define DSM_NFC_RD_I2C_WRITE_EREMOTEIO_ERROR_NO (923002011)
+#define DSM_NFC_RD_I2C_READ_EREMOTEIO_ERROR_NO (923002012)
+#define DSM_NFC_SIM_CHECK_ERROR_NO (923002013)
+#define DSM_NFC_HAL_CORE_RESET (923002014)
+#define DSM_NFC_HISEE_COS_IMAGE_UPGRADE_ERROR_NO (923002015)
+#define DSM_NFC_HISEE_POWER_ON_OFF_ERROR_NO (923002016)
+#define DSM_NFC_HISEE_APDU_COMMAND_OPERATION_ERROR_NO (923002017)
+
+/* fingerprint */
+#define DSM_FINGERPRINT_WAIT_FOR_FINGER_ERROR_NO (912001000)
+#define DSM_FINGERPRINT_CAPTURE_IMAGE_ERROR_NO (912001001)
+#define DSM_FINGERPRINT_IDENTIFY_ERROR_NO (912001002)
+#define DSM_FINGERPRINT_TEST_DEADPIXELS_ERROR_NO (912001003)
+#define DSM_FINGERPRINT_ENROLL_ERROR_NO (912001004)
+#define DSM_FINGERPRINT_REMOVE_TEMPLATE_ERROR_NO (912001005)
+#define DSM_FINGERPRINT_ENUMERATE_ERROR_NO (912001006)
+#define DSM_FINGERPRINT_MODULE_OPEN_ERROR_NO (912001007)
+#define DSM_FINGERPRINT_PROBE_FAIL_ERROR_NO (912001008)
+#define DSM_FINGERPRINT_DIFF_DEADPIXELS_ERROR_NO (912001009)
+#define DSM_FINGERPRINT_MANY_DEADPIXELS_ERROR_NO (912001010)
+#define DSM_FINGERPRINT_DB_FILE_LOST_ERROR_NO (912001011)
+
+/* DM */
+#define DSM_DM_VERITY_ERROR_NO (928001000)
+#define DSM_DM_VERITY_FEC_INFO_NO (928001001)
+#define DSM_DM_VERITY_CE_ERROR_NO (928001002)
+/*cpu_buck*/
+#define ERROR_NO_CPU_BUCK_BASE (920012000)
+
+struct dsm_client_ops {
+ int (*poll_state) (void);
+ int (*dump_func) (int type, void *buff, int size);
+};
+
+struct dsm_dev {
+ const char *name;
+ const char *device_name;
+ const char *ic_name;
+ const char *module_name;
+ struct dsm_client_ops *fops;
+ size_t buff_size;
+};
+
+struct dsm_client {
+ char *client_name;
+ char *device_name;
+ char *ic_name;
+ char *module_name;
+ int client_id;
+ int error_no;
+ unsigned long buff_flag;
+ struct dsm_client_ops *cops;
+ wait_queue_head_t waitq;
+ size_t read_size;
+ size_t used_size;
+ size_t buff_size;
+ u8 dump_buff[];
+};
+
+/*
+* for userspace client, such as sensor service, please refer to it.
+*/
+struct dsm_extern_client {
+ char client_name[CLIENT_NAME_LEN];
+ int buf_size;
+};
+
+#ifdef CONFIG_HUAWEI_DSM
+struct dsm_client *dsm_register_client(struct dsm_dev *dev);
+struct dsm_client *dsm_find_client(char *dsm_name);
+int dsm_client_ocuppy(struct dsm_client *client);
+int dsm_client_unocuppy(struct dsm_client *client);
+int dsm_client_record(struct dsm_client *client, const char *fmt, ...);
+int dsm_client_copy(struct dsm_client *client, void *src, int sz);
+void dsm_client_notify(struct dsm_client *client, int error_no);
+#else
+static inline struct dsm_client *dsm_register_client(struct dsm_dev *dev)
+{
+ return NULL;
+}
+
+static inline struct dsm_client *dsm_find_client(char *dsm_name)
+{
+ return NULL;
+}
+
+static inline int dsm_client_ocuppy(struct dsm_client *client)
+{
+ return 1;
+}
+
+static inline int dsm_client_unocuppy(struct dsm_client *client)
+{
+ return 0;
+}
+
+static inline int dsm_client_record(struct dsm_client *client, const char *fmt,
+ ...)
+{
+ return 0;
+}
+
+static inline int dsm_client_copy(struct dsm_client *client, void *src, int sz)
+{
+ return 0;
+}
+
+static inline void dsm_client_notify(struct dsm_client *client, int error_no)
+{
+ return;
+}
+#endif
+
+#endif
diff --git a/include/dt-bindings/clock/hi3660-clock.h b/include/dt-bindings/clock/hi3660-clock.h
new file mode 100644
index 000000000000..57a8c5bf5537
--- /dev/null
+++ b/include/dt-bindings/clock/hi3660-clock.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2016-2017 Linaro Ltd.
+ * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DTS_HI3660_CLOCK_H
+#define __DTS_HI3660_CLOCK_H
+
+/* fixed rate clocks */
+#define HI3660_CLKIN_SYS 0
+#define HI3660_CLKIN_REF 1
+#define HI3660_CLK_FLL_SRC 2
+#define HI3660_CLK_PPLL0 3
+#define HI3660_CLK_PPLL1 4
+#define HI3660_CLK_PPLL2 5
+#define HI3660_CLK_PPLL3 6
+#define HI3660_CLK_SCPLL 7
+#define HI3660_PCLK 8
+#define HI3660_CLK_UART0_DBG 9
+#define HI3660_CLK_UART6 10
+#define HI3660_OSC32K 11
+#define HI3660_OSC19M 12
+#define HI3660_CLK_480M 13
+#define HI3660_CLK_INV 14
+
+/* clk in crgctrl */
+#define HI3660_FACTOR_UART3 15
+#define HI3660_CLK_FACTOR_MMC 16
+#define HI3660_CLK_GATE_I2C0 17
+#define HI3660_CLK_GATE_I2C1 18
+#define HI3660_CLK_GATE_I2C2 19
+#define HI3660_CLK_GATE_I2C6 20
+#define HI3660_CLK_DIV_SYSBUS 21
+#define HI3660_CLK_DIV_320M 22
+#define HI3660_CLK_DIV_A53 23
+#define HI3660_CLK_GATE_SPI0 24
+#define HI3660_CLK_GATE_SPI2 25
+#define HI3660_PCIEPHY_REF 26
+#define HI3660_CLK_ABB_USB 27
+#define HI3660_HCLK_GATE_SDIO0 28
+#define HI3660_HCLK_GATE_SD 29
+#define HI3660_CLK_GATE_AOMM 30
+#define HI3660_PCLK_GPIO0 31
+#define HI3660_PCLK_GPIO1 32
+#define HI3660_PCLK_GPIO2 33
+#define HI3660_PCLK_GPIO3 34
+#define HI3660_PCLK_GPIO4 35
+#define HI3660_PCLK_GPIO5 36
+#define HI3660_PCLK_GPIO6 37
+#define HI3660_PCLK_GPIO7 38
+#define HI3660_PCLK_GPIO8 39
+#define HI3660_PCLK_GPIO9 40
+#define HI3660_PCLK_GPIO10 41
+#define HI3660_PCLK_GPIO11 42
+#define HI3660_PCLK_GPIO12 43
+#define HI3660_PCLK_GPIO13 44
+#define HI3660_PCLK_GPIO14 45
+#define HI3660_PCLK_GPIO15 46
+#define HI3660_PCLK_GPIO16 47
+#define HI3660_PCLK_GPIO17 48
+#define HI3660_PCLK_GPIO18 49
+#define HI3660_PCLK_GPIO19 50
+#define HI3660_PCLK_GPIO20 51
+#define HI3660_PCLK_GPIO21 52
+#define HI3660_CLK_GATE_SPI3 53
+#define HI3660_CLK_GATE_I2C7 54
+#define HI3660_CLK_GATE_I2C3 55
+#define HI3660_CLK_GATE_SPI1 56
+#define HI3660_CLK_GATE_UART1 57
+#define HI3660_CLK_GATE_UART2 58
+#define HI3660_CLK_GATE_UART4 59
+#define HI3660_CLK_GATE_UART5 60
+#define HI3660_CLK_GATE_I2C4 61
+#define HI3660_CLK_GATE_DMAC 62
+#define HI3660_PCLK_GATE_DSS 63
+#define HI3660_ACLK_GATE_DSS 64
+#define HI3660_CLK_GATE_LDI1 65
+#define HI3660_CLK_GATE_LDI0 66
+#define HI3660_CLK_GATE_VIVOBUS 67
+#define HI3660_CLK_GATE_EDC0 68
+#define HI3660_CLK_GATE_TXDPHY0_CFG 69
+#define HI3660_CLK_GATE_TXDPHY0_REF 70
+#define HI3660_CLK_GATE_TXDPHY1_CFG 71
+#define HI3660_CLK_GATE_TXDPHY1_REF 72
+#define HI3660_ACLK_GATE_USB3OTG 73
+#define HI3660_CLK_GATE_SPI4 74
+#define HI3660_CLK_GATE_SD 75
+#define HI3660_CLK_GATE_SDIO0 76
+#define HI3660_CLK_GATE_UFS_SUBSYS 77
+#define HI3660_PCLK_GATE_DSI0 78
+#define HI3660_PCLK_GATE_DSI1 79
+#define HI3660_ACLK_GATE_PCIE 80
+#define HI3660_PCLK_GATE_PCIE_SYS 81
+#define HI3660_CLK_GATE_PCIEAUX 82
+#define HI3660_PCLK_GATE_PCIE_PHY 83
+#define HI3660_CLK_ANDGT_LDI0 84
+#define HI3660_CLK_ANDGT_LDI1 85
+#define HI3660_CLK_ANDGT_EDC0 86
+#define HI3660_CLK_GATE_UFSPHY_GT 87
+#define HI3660_CLK_ANDGT_MMC 88
+#define HI3660_CLK_ANDGT_SD 89
+#define HI3660_CLK_A53HPM_ANDGT 90
+#define HI3660_CLK_ANDGT_SDIO 91
+#define HI3660_CLK_ANDGT_UART0 92
+#define HI3660_CLK_ANDGT_UART1 93
+#define HI3660_CLK_ANDGT_UARTH 94
+#define HI3660_CLK_ANDGT_SPI 95
+#define HI3660_CLK_VIVOBUS_ANDGT 96
+#define HI3660_CLK_AOMM_ANDGT 97
+#define HI3660_CLK_320M_PLL_GT 98
+#define HI3660_AUTODIV_EMMC0BUS 99
+#define HI3660_AUTODIV_SYSBUS 100
+#define HI3660_CLK_GATE_UFSPHY_CFG 101
+#define HI3660_CLK_GATE_UFSIO_REF 102
+#define HI3660_CLK_MUX_SYSBUS 103
+#define HI3660_CLK_MUX_UART0 104
+#define HI3660_CLK_MUX_UART1 105
+#define HI3660_CLK_MUX_UARTH 106
+#define HI3660_CLK_MUX_SPI 107
+#define HI3660_CLK_MUX_I2C 108
+#define HI3660_CLK_MUX_MMC_PLL 109
+#define HI3660_CLK_MUX_LDI1 110
+#define HI3660_CLK_MUX_LDI0 111
+#define HI3660_CLK_MUX_SD_PLL 112
+#define HI3660_CLK_MUX_SD_SYS 113
+#define HI3660_CLK_MUX_EDC0 114
+#define HI3660_CLK_MUX_SDIO_SYS 115
+#define HI3660_CLK_MUX_SDIO_PLL 116
+#define HI3660_CLK_MUX_VIVOBUS 117
+#define HI3660_CLK_MUX_A53HPM 118
+#define HI3660_CLK_MUX_320M 119
+#define HI3660_CLK_MUX_IOPERI 120
+#define HI3660_CLK_DIV_UART0 121
+#define HI3660_CLK_DIV_UART1 122
+#define HI3660_CLK_DIV_UARTH 123
+#define HI3660_CLK_DIV_MMC 124
+#define HI3660_CLK_DIV_SD 125
+#define HI3660_CLK_DIV_EDC0 126
+#define HI3660_CLK_DIV_LDI0 127
+#define HI3660_CLK_DIV_SDIO 128
+#define HI3660_CLK_DIV_LDI1 129
+#define HI3660_CLK_DIV_SPI 130
+#define HI3660_CLK_DIV_VIVOBUS 131
+#define HI3660_CLK_DIV_I2C 132
+#define HI3660_CLK_DIV_UFSPHY 133
+#define HI3660_CLK_DIV_CFGBUS 134
+#define HI3660_CLK_DIV_MMC0BUS 135
+#define HI3660_CLK_DIV_MMC1BUS 136
+#define HI3660_CLK_DIV_UFSPERI 137
+#define HI3660_CLK_DIV_AOMM 138
+#define HI3660_CLK_DIV_IOPERI 139
+#define HI3660_VENC_VOLT_HOLD 140
+#define HI3660_PERI_VOLT_HOLD 141
+#define HI3660_CLK_GATE_VENC 142
+#define HI3660_CLK_GATE_VDEC 143
+#define HI3660_CLK_ANDGT_VENC 144
+#define HI3660_CLK_ANDGT_VDEC 145
+#define HI3660_CLK_MUX_VENC 146
+#define HI3660_CLK_MUX_VDEC 147
+#define HI3660_CLK_DIV_VENC 148
+#define HI3660_CLK_DIV_VDEC 149
+#define HI3660_CLK_FAC_ISP_SNCLK 150
+#define HI3660_CLK_GATE_ISP_SNCLK0 151
+#define HI3660_CLK_GATE_ISP_SNCLK1 152
+#define HI3660_CLK_GATE_ISP_SNCLK2 153
+#define HI3660_CLK_ANGT_ISP_SNCLK 154
+#define HI3660_CLK_MUX_ISP_SNCLK 155
+#define HI3660_CLK_DIV_ISP_SNCLK 156
+
+/* clk in pmuctrl */
+#define HI3660_GATE_ABB_192 0
+
+/* clk in pctrl */
+#define HI3660_GATE_UFS_TCXO_EN 0
+#define HI3660_GATE_USB_TCXO_EN 1
+
+/* clk in sctrl */
+#define HI3660_PCLK_AO_GPIO0 0
+#define HI3660_PCLK_AO_GPIO1 1
+#define HI3660_PCLK_AO_GPIO2 2
+#define HI3660_PCLK_AO_GPIO3 3
+#define HI3660_PCLK_AO_GPIO4 4
+#define HI3660_PCLK_AO_GPIO5 5
+#define HI3660_PCLK_AO_GPIO6 6
+#define HI3660_PCLK_GATE_MMBUF 7
+#define HI3660_CLK_GATE_DSS_AXI_MM 8
+#define HI3660_PCLK_MMBUF_ANDGT 9
+#define HI3660_CLK_MMBUF_PLL_ANDGT 10
+#define HI3660_CLK_FLL_MMBUF_ANDGT 11
+#define HI3660_CLK_SYS_MMBUF_ANDGT 12
+#define HI3660_CLK_GATE_PCIEPHY_GT 13
+#define HI3660_ACLK_MUX_MMBUF 14
+#define HI3660_CLK_SW_MMBUF 15
+#define HI3660_CLK_DIV_AOBUS 16
+#define HI3660_PCLK_DIV_MMBUF 17
+#define HI3660_ACLK_DIV_MMBUF 18
+#define HI3660_CLK_DIV_PCIEPHY 19
+
+/* clk in iomcu */
+#define HI3660_CLK_I2C0_IOMCU 0
+#define HI3660_CLK_I2C1_IOMCU 1
+#define HI3660_CLK_I2C2_IOMCU 2
+#define HI3660_CLK_I2C6_IOMCU 3
+#define HI3660_CLK_IOMCU_PERI0 4
+
+/* clk in stub clock */
+#define HI3660_CLK_STUB_CLUSTER0 0
+#define HI3660_CLK_STUB_CLUSTER1 1
+#define HI3660_CLK_STUB_GPU 2
+#define HI3660_CLK_STUB_DDR 3
+#define HI3660_CLK_STUB_DDR_VOTE 4
+#define HI3660_CLK_STUB_DDR_LIMIT 5
+#define HI3660_CLK_STUB_NUM 6
+
+#endif /* __DTS_HI3660_CLOCK_H */
diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h
index 38f1ea879ea1..0359bfdc9119 100644
--- a/include/dt-bindings/pinctrl/hisi.h
+++ b/include/dt-bindings/pinctrl/hisi.h
@@ -56,4 +56,19 @@
#define DRIVE4_08MA (4 << 4)
#define DRIVE4_10MA (6 << 4)
+/* drive strength definition for hi3660 */
+#define DRIVE6_MASK (15 << 4)
+#define DRIVE6_04MA (0 << 4)
+#define DRIVE6_12MA (4 << 4)
+#define DRIVE6_19MA (8 << 4)
+#define DRIVE6_27MA (10 << 4)
+#define DRIVE6_32MA (15 << 4)
+#define DRIVE7_02MA (0 << 4)
+#define DRIVE7_04MA (1 << 4)
+#define DRIVE7_06MA (2 << 4)
+#define DRIVE7_08MA (3 << 4)
+#define DRIVE7_10MA (4 << 4)
+#define DRIVE7_12MA (5 << 4)
+#define DRIVE7_14MA (6 << 4)
+#define DRIVE7_16MA (7 << 4)
#endif
diff --git a/include/linux/hisi/hisi-iommu.h b/include/linux/hisi/hisi-iommu.h
new file mode 100644
index 000000000000..00dd5e97db59
--- /dev/null
+++ b/include/linux/hisi/hisi-iommu.h
@@ -0,0 +1,13 @@
+#ifndef _HI36XX_SMMU_H
+#define _HI36XX_SMMU_H
+
+#include <linux/types.h>
+struct iommu_domain_data {
+ unsigned int iova_start;
+ unsigned int iova_size;
+ phys_addr_t phy_pgd_base;
+ unsigned long iova_align;
+ struct list_head list;
+};
+
+#endif
diff --git a/include/linux/hisi/hisi_ion.h b/include/linux/hisi/hisi_ion.h
new file mode 100644
index 000000000000..0d7be75f795f
--- /dev/null
+++ b/include/linux/hisi/hisi_ion.h
@@ -0,0 +1,178 @@
+/*
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_HISI_ION_H
+#define _LINUX_HISI_ION_H
+
+#include <linux/ion.h>
+#include <linux/sizes.h>
+
+/**
+ * These are the only ids that should be used for Ion heap ids.
+ * The ids listed are the order in which allocation will be attempted
+ * if specified. Don't swap the order of heap ids unless you know what
+ * you are doing!
+ * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
+ * possible fallbacks)
+ */
+
+enum ion_heap_ids {
+ INVALID_HEAP_ID = -1,
+ ION_SYSTEM_HEAP_ID = 0,
+ ION_SYSTEM_CONTIG_HEAP_ID = 1,
+ ION_GRALLOC_HEAP_ID = 2,
+ ION_DMA_HEAP_ID = 3,
+ ION_DMA_POOL_HEAP_ID = 4,
+ ION_CPU_DRAW_HEAP_ID = 5,
+ ION_CAMERA_HEAP_ID = 6,
+ ION_OVERLAY_HEAP_ID = 7,
+ ION_VCODEC_HEAP_ID = 8,
+ ION_ISP_HEAP_ID = 9,
+ ION_FB_HEAP_ID = 10,
+ ION_VPU_HEAP_ID = 11,
+ ION_JPU_HEAP_ID = 12,
+ HISI_ION_HEAP_IOMMU_ID = 13,
+ ION_MISC_HEAP_ID = 14,
+ ION_DRM_GRALLOC_HEAP_ID=15,
+ ION_DRM_VCODEC_HEAP_ID =16,
+ ION_TUI_HEAP_ID=17,
+ ION_IRIS_HEAP_ID=18,
+ ION_RESERV2_ID=19,
+ ION_DRM_HEAP_ID=20,
+ ION_HEAP_ID_RESERVED = 31, /* Bit reserved */
+};
+
+
+/**
+ * Macro should be used with ion_heap_ids defined above.
+ */
+#define ION_HEAP(bit) (1 << (bit))
+#define ION_8K_ALIGN(len) ALIGN(len, SZ_8K)
+#define IOMMU_PAGE_SIZE SZ_8K
+
+#define ION_VMALLOC_HEAP_NAME "vmalloc"
+#define ION_KMALLOC_HEAP_NAME "kmalloc"
+#define ION_GRALLOC_HEAP_NAME "gralloc"
+
+
+#define ION_SET_CACHED(__cache) (__cache | ION_FLAG_CACHED)
+#define ION_SET_UNCACHED(__cache) (__cache & ~ION_FLAG_CACHED)
+
+#define ION_IS_CACHED(__flags) ((__flags) & ION_FLAG_CACHED)
+
+//struct used for get phys addr of contig heap
+struct ion_phys_data {
+ int fd_buffer;
+ unsigned int size;
+ union {
+ unsigned int phys;
+ unsigned int phys_l;
+ };
+ unsigned int phys_h;
+};
+
+struct ion_flag_data {
+ int shared_fd;
+ int flags;
+};
+
+struct ion_smart_pool_info_data {
+ int water_mark;
+};
+
+#define HISI_ION_NAME_LEN 16
+
+struct ion_heap_info_data{
+ char name[HISI_ION_NAME_LEN];
+ phys_addr_t heap_phy;
+ unsigned int heap_size;
+};
+struct ion_kern_va_data {
+ int handle_id;
+ unsigned int kern_va_h;
+ unsigned int kern_va_l;
+};
+struct ion_issupport_iommu_data{
+ int is_support_iommu;
+};
+
+struct ion_flush_data {
+ int fd;
+ void *vaddr;
+ unsigned int offset;
+ unsigned int length;
+};
+
+
+//user command add for additional use
+enum ION_HISI_CUSTOM_CMD {
+ ION_HISI_CUSTOM_PHYS,
+ ION_HISI_CLEAN_CACHES,
+ ION_HISI_INV_CACHES,
+ ION_HISI_CLEAN_INV_CACHES,
+ ION_HISI_CUSTOM_GET_KERN_VA,
+ ION_HISI_CUSTOM_FREE_KERN_VA,
+ ION_HISI_CUSTOM_ISSUPPORT_IOMMU,
+ ION_HISI_CUSTOM_GET_MEDIA_HEAP_MODE,
+ ION_HISI_CUSTOM_SET_FLAG,
+ ION_HISI_CUSTOM_SET_SMART_POOL_INFO,
+};
+
+enum ION_HISI_HEAP_MODE {
+ ION_CARVEROUT_MODE=0,
+ ION_IOMMU_MODE=1,
+};
+
+#define TINY_SYSTEM 0x0 /* tiny version system for chip test*/
+#define FULL_SYSTEM 0x1 /* full version system */
+/**
+ * hisi_ion_client_create() - create iommu mapping for the given handle
+ * @heap_mask: ion heap type mask
+ * @name: the client name
+ * @return: the client handle
+ *
+ * This function should called by high-level user in kernel. Before users
+ * can access a buffer, they should get a client via calling this function.
+ */
+struct ion_client *
+hisi_ion_client_create(const char *name);
+int hisi_ion_get_heap_info(unsigned int id,struct ion_heap_info_data* data);
+int hisi_ion_get_media_mode(void);
+unsigned long long get_system_type(void);
+struct ion_device * get_ion_device(void);
+#define ION_IOC_HISI_MAGIC 'H'
+/**
+ *DOC: ION_IOC_FLUSH_ALL_CACHES - flush all the caches pf L1 and L2
+ *
+ *flush all the caches pf L1 and L2
+ */
+#define ION_IOC_FLUSH_ALL_CACHES _IOWR(ION_IOC_HISI_MAGIC, 3, \
+ struct ion_flush_data)
+
+#ifdef CONFIG_ION
+extern unsigned long hisi_ion_total(void);
+#else
+static inline unsigned long hisi_ion_total(void)
+{
+ return 0;
+}
+#endif
+
+/*k3 add to calc free memory*/
+void hisi_ionsysinfo(struct sysinfo *si);
+int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
+ unsigned long *flags);
+int hisi_ion_memory_info(bool verbose);
+#endif
diff --git a/include/linux/hisi/hisi_irq_affinity.h b/include/linux/hisi/hisi_irq_affinity.h
new file mode 100644
index 000000000000..94347f3c3009
--- /dev/null
+++ b/include/linux/hisi/hisi_irq_affinity.h
@@ -0,0 +1,17 @@
+/* hi3xxx_irq_affinity.h */
+
+#ifndef HISI_IRQ_AFFINITY_H
+#define HISI_IIRQ_AFFINITY_H
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_HISI_IRQ_AFFINITY
+extern void hisi_irqaffinity_status(void);
+extern int hisi_irqaffinity_register(unsigned int irq, int cpu);
+extern void hisi_irqaffinity_unregister(unsigned int irq);
+#else
+static inline int hisi_irqaffinity_register(unsigned int irq, int cpu) { return -ENOSYS; }
+static inline void hisi_irqaffinity_unregister(unsigned int irq) { return; }
+static inline void hisi_irqaffinity_status(void) { return; }
+#endif /* CONFIG_HISI_IRQ_AFFINITY */
+#endif /* HISI_IRQ_AFFINITY_H */
diff --git a/include/linux/hisi/hisi_mailbox.h b/include/linux/hisi/hisi_mailbox.h
new file mode 100644
index 000000000000..6605126046ba
--- /dev/null
+++ b/include/linux/hisi/hisi_mailbox.h
@@ -0,0 +1,240 @@
+/* hisi_mailbox.h */
+
+#ifndef __HISI_MAILBOX_H__
+#define __HISI_MAILBOX_H__
+
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kfifo.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/time.h>
+#include <linux/errno.h>
+
+
+#define EMDEVCLEAN 1
+#define EMDEVDIRTY 2
+#define ETIMEOUT 3
+
+#define RPUNCERTAIN 2
+#define RPACCESSIBLE 1
+#define RPUNACCESSIBLE 0
+
+#define MDEV_SYNC_SENDING (1 << 0)
+#define MDEV_ASYNC_ENQUEUE (1 << 1)
+#define MDEV_DEACTIVATED (1 << 2)
+
+/* mailbox's channel size, does portland use the same? */
+#define MBOX_CHAN_DATA_SIZE 8
+
+/**
+ * Alloc a static buffer for the tx_task, the num can be adjust according to different platform.
+ * 512 allow 4 mdev-kfifo to be full fille (one mdev-kfifo can cache 256 tx_task node).
+ */
+#define TX_TASK_DDR_NODE_NUM 512
+#define TX_TASK_DDR_NODE_AVA 0xA5A57777
+#define TX_TASK_DDR_NODE_OPY 0x7777A5A5
+
+#define CONTINUOUS_FAIL_CNT_MAX 50
+#define CONTINUOUS_FAIL_JUDGE (likely(g_ContinuousFailCnt < CONTINUOUS_FAIL_CNT_MAX))
+
+#define MAILBOX_AUTOACK_TIMEOUT msecs_to_jiffies(300)
+#define MAILBOX_MANUACK_TIMEOUT msecs_to_jiffies(300)
+/* IPC_DEFAULT_BOARD_TYPE means hardware_board_type is not UDP&FPGA */
+#define IPC_DEFAULT_BOARD_TYPE 0
+
+#ifdef CONFIG_HISI_MAILBOX_PERFORMANCE_DEBUG
+enum {
+ PERFORMANCE_DEBUG_OFF = 0,
+ PERFORMANCE_DEBUG_ON
+};
+
+enum {
+ IPC_START = 0,
+ IPC_SEND,
+ IPC_RECEIVE,
+ IPC_BH,
+ IPC_COMPLETE,
+ IPC_TTS_MAX
+};
+
+#define MBOX_DEBUG_ON(mbox) (mbox->perf_debug = PERFORMANCE_DEBUG_ON)
+#define MBOX_DEBUG_OFF(mbox) (mbox->perf_debug = PERFORMANCE_DEBUG_OFF)
+#define MBOX_IS_DEBUG_ON(mbox) (mbox->perf_debug == PERFORMANCE_DEBUG_ON ? 1 : 0)
+#else
+#define MBOX_DEBUG_ON(mbox) do {} while (0)
+#define MBOX_DEBUG_OFF(mbox) do {} while (0)
+#define MBOX_IS_DEBUG_ON(mbox) (0)
+#endif
+
+#define IDLE_STATUS (1 << 4)
+#define SOURCE_STATUS (1 << 5)
+#define DESTINATION_STATUS (1 << 6)
+#define ACK_STATUS (1 << 7)
+
+typedef enum {
+ MANUAL_ACK = 0,
+ AUTO_ACK,
+} mbox_ack_type_t;
+
+typedef enum {
+ TX_MAIL = 0,
+ RX_MAIL,
+ MAIL_TYPE_MAX,
+} mbox_mail_type_t;
+
+/*
+ typedef enum {
+ SEND_MSG = 0,
+ MSG_SCHE,
+ DEAL_START,
+ DEAL_END,
+ SEND_ACK,
+ ACK_SCHE,
+ FREE_CHAN,
+//used in portland's share memory
+TRACK_MAX
+}mbox_track_type_t;
+ */
+
+struct hisi_mbox_task;
+typedef u32 mbox_msg_t;
+typedef int mbox_msg_len_t;
+typedef void (*mbox_complete_t)(struct hisi_mbox_task *task);
+typedef int (*mbox_irq_handler_t)(int irq, void *p);
+typedef mbox_msg_t rproc_msg_t;
+typedef mbox_msg_len_t rproc_msg_len_t;
+typedef void (*rproc_complete_t)(rproc_msg_t *ack_buffer,
+ rproc_msg_len_t ack_buffer_len,
+ int error,
+ void *data);
+
+struct hisi_mbox_task {
+ /* use static memory to cache the async tx buffer*/
+ mbox_msg_t tx_buffer[MBOX_CHAN_DATA_SIZE];
+ /* alloc by mailbox core, shouldn't be free when a tx task complete by mailbox users */
+ mbox_msg_t *ack_buffer;
+ mbox_msg_len_t tx_buffer_len;
+ mbox_msg_len_t ack_buffer_len;
+ int need_auto_ack;
+ /* for performance */
+#ifdef CONFIG_HISI_MAILBOX_PERFORMANCE_DEBUG
+ int perf_debug;
+ struct timespec tts[IPC_TTS_MAX];
+#endif
+};
+
+struct hisi_mbox_device {
+ const char *name;
+ struct list_head node;
+ int configured;
+ struct device *dev;
+ void *priv;
+ struct hisi_mbox_dev_ops *ops;
+ int cur_task;
+ int cur_irq;
+ volatile unsigned int status;
+ spinlock_t status_lock;
+ mbox_irq_handler_t irq_handler;
+
+ /* tx attributes */
+ spinlock_t fifo_lock;
+ struct kfifo fifo;
+ struct mutex dev_lock;
+ struct completion complete;
+ spinlock_t complete_lock;
+ int completed;
+ struct hisi_mbox_task *tx_task;
+
+ /* rx attributes */
+ mbox_msg_t *rx_buffer;
+ mbox_msg_t *ack_buffer;
+ struct atomic_notifier_head notifier;
+ struct tasklet_struct rx_bh;
+ struct task_struct *tx_kthread;
+
+ wait_queue_head_t tx_wait;
+};
+
+struct hisi_mbox_dev_ops {
+ /* get ready */
+ int (*startup)(struct hisi_mbox_device *mdev);
+ void (*shutdown)(struct hisi_mbox_device *mdev);
+ int (*check)(struct hisi_mbox_device *mdev,
+ mbox_mail_type_t mtype, int mdev_index);
+ /* communication */
+ mbox_msg_len_t (*recv)(struct hisi_mbox_device *mdev, mbox_msg_t **msg);
+ int (*send)(struct hisi_mbox_device *mdev,
+ mbox_msg_t *msg,
+ mbox_msg_len_t len,
+ int ack_mode);
+ void (*ack)(struct hisi_mbox_device *mdev,
+ mbox_msg_t *msg,
+ mbox_msg_len_t len);
+ void (*refresh)(struct hisi_mbox_device *mdev);
+ unsigned int (*get_timeout)(struct hisi_mbox_device *mdev);
+ unsigned int (*get_fifo_size)(struct hisi_mbox_device *mdev);
+ unsigned int (*get_sched_priority)(struct hisi_mbox_device *mdev);
+ unsigned int (*get_sched_policy)(struct hisi_mbox_device *mdev);
+ unsigned int (*read_board_type)(struct hisi_mbox_device *mdev);
+ /* irq */
+ int (*request_irq)(struct hisi_mbox_device *mdev, irq_handler_t handler, void *p);
+ void (*free_irq)(struct hisi_mbox_device *mdev, void *p);
+ void (*enable_irq)(struct hisi_mbox_device *mdev);
+ void (*disable_irq)(struct hisi_mbox_device *mdev);
+ struct hisi_mbox_device *(*irq_to_mdev)(struct hisi_mbox_device *mdev, struct list_head *list, int irq);
+ int (*is_stm)(struct hisi_mbox_device *mdev, unsigned int stm);
+ void (*clr_ack)(struct hisi_mbox_device *mdev);
+ void (*ensure_channel)(struct hisi_mbox_device *mdev);
+
+ /* mntn */
+ void (*status)(struct hisi_mbox_device *mdev);
+};
+
+struct hisi_mbox {
+ int mdev_index;
+ struct hisi_mbox_device *tx;
+ struct hisi_mbox_device *rx;
+ struct notifier_block *nb;
+
+#ifdef CONFIG_HISI_MAILBOX_PERFORMANCE_DEBUG
+ int perf_debug;
+#endif
+};
+
+extern void hisi_mbox_task_free(struct hisi_mbox_task **tx_task);
+extern struct hisi_mbox_task *hisi_mbox_task_alloc(struct hisi_mbox *mbox,
+ mbox_msg_t *tx_buffer,
+ mbox_msg_len_t tx_buffer_len,
+ int need_auto_ack
+ );
+
+/*
+ * might sleep function
+ * guarantee function called not in atomic context
+ */
+extern int hisi_mbox_msg_send_sync(struct hisi_mbox *mbox,
+ mbox_msg_t *tx_buffer,
+ mbox_msg_len_t tx_buffer_len,
+ int need_auto_ack,
+ mbox_msg_t *ack_buffer,
+ mbox_msg_len_t ack_buffer_len);
+
+/*
+ * atomic context function
+ */
+extern int hisi_mbox_msg_send_async(struct hisi_mbox *mbox, struct hisi_mbox_task *tx_task);
+
+extern struct hisi_mbox *hisi_mbox_get(int mdev_index, struct notifier_block *nb);
+extern void hisi_mbox_put(struct hisi_mbox **mbox);
+
+extern void hisi_mbox_device_activate(struct hisi_mbox_device **mdevs);
+extern void hisi_mbox_device_deactivate(struct hisi_mbox_device **mdevs);
+
+extern int hisi_mbox_device_register(struct device *parent, struct hisi_mbox_device **mdevs);
+extern int hisi_mbox_device_unregister(struct hisi_mbox_device **list);
+
+#endif /* __HISI_MAILBOX_H__ */
diff --git a/include/linux/hisi/hisi_rproc.h b/include/linux/hisi/hisi_rproc.h
new file mode 100644
index 000000000000..5455a6220041
--- /dev/null
+++ b/include/linux/hisi/hisi_rproc.h
@@ -0,0 +1,96 @@
+/* hisi_rproc.h */
+
+#ifndef __HISI_RPROC_H__
+#define __HISI_RPROC_H__
+
+#include <linux/hisi/hisi_mailbox.h>
+#include <linux/errno.h>
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif
+
+ typedef enum {
+ HISI_RPROC_LPM3_MBX0,
+ HISI_RPROC_RDR_MBX1,
+ HISI_RPROC_HIFI_MBX2,
+ HISI_RPROC_DEFAULT_MBX3,
+ HISI_RPROC_IOM3_MBX4,
+ HISI_RPROC_IVP_MBX5,
+ HISI_RPROC_IVP_MBX6,
+ HISI_RPROC_DEFAULT_MBX7,
+ /* MBX8 and MBX9 are not used in austin and dallas */
+ HISI_RPROC_ISP_MBX8,
+ HISI_RPROC_ISP_MBX9,
+ HISI_RPROC_IOM3_MBX10,
+ HISI_RPROC_IOM3_MBX11,
+ HISI_RPROC_IOM3_MBX12,
+ HISI_RPROC_LPM3_MBX13,
+ HISI_RPROC_LPM3_MBX14,
+ HISI_RPROC_LPM3_MBX15,
+ HISI_RPROC_LPM3_MBX16,
+ HISI_RPROC_LPM3_MBX17,
+ HISI_RPROC_HIFI_MBX18,
+ HISI_RPROC_MODEM_A9_MBX19,
+ HISI_RPROC_MODEM_A9_MBX20,
+ HISI_RPROC_MODEM_A9_MBX21,
+ HISI_RPROC_MODEM_BBE16_MBX22,
+ /* MBX23 and MBX24 are not used in austin and dallas */
+ HISI_RPROC_ISP_MBX23,
+ HISI_RPROC_ISP_MBX24,
+ HISI_RPROC_IVP_MBX25,
+ HISI_RPROC_IVP_MBX26,
+ HISI_RPROC_LPM3_MBX27,
+ HISI_RPROC_LPM3_MBX28,
+ HISI_RPROC_HIFI_MBX29,
+ HISI_RPROC_LPM3_MBX30,
+ HISI_RPROC_ISP_MBX0 = 100,
+ HISI_RPROC_ISP_MBX1,
+ HISI_RPROC_ISP_MBX2,
+ HISI_RPROC_ISP_MBX3,
+ HISI_RPROC_MAX = 0xFF
+ } rproc_id_t;
+
+#define RPROC_SYNC_SEND(rproc_id, msg, len, \
+ ack_buffer, ack_buffer_len) \
+ hisi_rproc_xfer_sync(rproc_id, msg, len, \
+ ack_buffer, ack_buffer_len)
+
+#define RPROC_ASYNC_SEND(rproc_id, msg, len) \
+ hisi_rproc_xfer_async(rproc_id, msg, len)
+
+#define RPROC_MONITOR_REGISTER(rproc_id, nb) \
+ hisi_rproc_rx_register(rproc_id, nb)
+
+#define RPROC_MONITOR_UNREGISTER(rproc_id, nb) \
+ hisi_rproc_rx_unregister(rproc_id, nb)
+
+#define RPROC_PUT(rproc_id) hisi_rproc_put(rproc_id)
+#define RPROC_FLUSH_TX(rproc_id) hisi_rproc_flush_tx(rproc_id)
+
+
+ extern int hisi_rproc_xfer_sync(rproc_id_t rproc_id,
+ rproc_msg_t *msg,
+ rproc_msg_len_t len,
+ rproc_msg_t *ack_buffer,
+ rproc_msg_len_t ack_buffer_len);
+ extern int hisi_rproc_xfer_async(rproc_id_t rproc_id,
+ rproc_msg_t *msg,
+ rproc_msg_len_t len
+ );
+ extern int
+ hisi_rproc_rx_register(rproc_id_t rproc_id, struct notifier_block *nb);
+ extern int
+ hisi_rproc_rx_unregister(rproc_id_t rproc_id, struct notifier_block *nb);
+ extern int hisi_rproc_put(rproc_id_t rproc_id);
+ extern int hisi_rproc_flush_tx(rproc_id_t rproc_id);
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif
+
+#endif /* __HISI_RPROC_H__ */
diff --git a/include/linux/hisi/ion-iommu.h b/include/linux/hisi/ion-iommu.h
new file mode 100644
index 000000000000..6e73ae3ff494
--- /dev/null
+++ b/include/linux/hisi/ion-iommu.h
@@ -0,0 +1,79 @@
+#ifndef _HISI_IOMMU_H_
+#define _HISI_IOMMU_H_
+
+#include <linux/list.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#ifdef CONFIG_HISI_IODOMAIN_API
+
+struct section_info {
+ unsigned int iova_start;
+ unsigned int iova_size;
+ unsigned int page_size;
+ unsigned int align;
+};
+struct hisi_iommu_domain {
+ struct iommu_domain *domain;
+ struct gen_pool *iova_pool;
+ struct section_info range;
+};
+
+/**
+ * hisi iommu domain interface
+ */
+struct hisi_iommu_domain * hisi_get_domain(void);
+size_t hisi_iommu_iova_size(void);
+size_t hisi_iommu_iova_available(void);
+void hisi_iommu_free_iova(unsigned long iova, size_t size);
+unsigned long hisi_iommu_alloc_iova(size_t size, unsigned long align);
+int hisi_iommu_map_range(struct iommu_domain *domain,unsigned long iova_start, struct scatterlist *sgl,
+ unsigned long iova_size,unsigned int prot);
+int hisi_iommu_unmap_range(struct iommu_domain *domain,unsigned long iova_start,unsigned long iova_size);
+int hisi_iommu_map_domain(struct scatterlist *sg,struct iommu_map_format *format);
+int hisi_iommu_unmap_domain(struct iommu_map_format *format);
+
+phys_addr_t hisi_iommu_domain_iova_to_phys(unsigned long iova);
+
+unsigned int hisi_iommu_page_size (void);
+bool hisi_iommu_off_on(void);
+int hisi_iommu_get_info(unsigned int *iova_start, unsigned int *pgtbl_base);
+
+#else
+
+/**
+ * hisi iommu domain interface
+ */
+static inline int hisi_iommu_map_domain(struct scatterlist *sg,
+ struct iommu_map_format *format)
+{
+ return 0;
+}
+
+static inline int hisi_iommu_unmap_domain(struct iommu_map_format *format)
+{
+ return 0;
+}
+
+static inline phys_addr_t hisi_iommu_domain_iova_to_phys(unsigned long iova)
+{
+ return 0;
+}
+static inline unsigned int hisi_iommu_page_size (void)
+{
+ return SZ_4K;
+}
+
+static inline bool hisi_iommu_off_on(void)
+{
+ return false;
+}
+
+static inline int hisi_iommu_get_info(unsigned int *iova_start, unsigned int *pgtbl_base)
+{
+ return 0;
+}
+
+#endif /* CONFIG_HISI_IODOMAIN_API */
+
+#endif /* _HISI_IOMMU_H_ */
diff --git a/include/linux/hisi/ipc_msg.h b/include/linux/hisi/ipc_msg.h
new file mode 100644
index 000000000000..c591937a7f27
--- /dev/null
+++ b/include/linux/hisi/ipc_msg.h
@@ -0,0 +1,105 @@
+#pragma GCC diagnostic push
+
+#ifndef __IPC_MSG_H__
+#define __IPC_MSG_H__
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif
+
+ /*********************SEND MODE DEFINE*********************/
+#define SYNC_MODE 1
+#define ASYNC_MODE 0
+
+ /***********************CMD DEFINE***********************/
+#define CMD_ON 0
+#define CMD_OFF 1
+#define CMD_INQUIRY 2
+#define CMD_SETTING 3
+#define CMD_NOTIFY 4
+#define CMD_TEST 5
+#define MAX_CMD 6
+
+#define OBJ_AP 0
+#define OBJ_LITTLE_CLUSTER 1
+#define OBJ_BIG_CLUSTER 2
+#define OBJ_GPU 3
+#define OBJ_DDR 4
+#define OBJ_ASP 5
+#define OBJ_HIFI 6
+#define OBJ_IOM3 7
+#define OBJ_LPM3 8
+#define OBJ_MODEM 9
+#define OBJ_SYS 10
+#define OBJ_HKADC 11
+#define OBJ_REGULATOR 12
+#define OBJ_CLK 13
+#define OBJ_TEMPERTURE 14
+#define OBJ_COUL 15
+#define OBJ_PSCI 16
+#define OBJ_TELEMNTN 17
+#define OBJ_MCA 18
+#define OBJ_INSE 19
+#define OBJ_TEST 20
+#define MAX_CMD_OBJ 21
+
+#define TYPE_POWER 1
+#define TYPE_CLK 2
+#define TYPE_CORE 3
+#define TYPE_CLUSTER 4
+#define TYPE_SLEEP 5
+#define TYPE_SR 6
+#define TYPE_MODE 7
+#define TYPE_UPLIMIT 8
+#define TYPE_DNLIMIT 9
+#define TYPE_FREQ 10
+#define TYPE_T 11
+#define TYPE_VOLT 12
+#define TYPE_RESET 13
+#define TYPE_PWC 14
+#define TYPE_TEST 15
+
+ /* mail size */
+#define MAX_MAIL_SIZE 8
+#define IPC_CMD(src, obj, cmd, type) (((src) << 24) | ((obj) << 16) | ((cmd) << 8) | (type))
+
+ struct cmd_parse {
+ unsigned char cmd_type;
+ unsigned char cmd;
+ unsigned char cmd_obj;
+ unsigned char cmd_src;
+ unsigned char cmd_para[4];
+ };
+
+ struct ipc_msg {
+ union {
+ unsigned int data[MAX_MAIL_SIZE];
+ struct cmd_parse cmd_mix;
+ };
+ unsigned char ipc_id;
+ unsigned char mailbox_id;
+ unsigned char dest_id;
+ unsigned char wait_id;
+ unsigned char mode;
+ unsigned char end_id;
+ };
+
+ union ipc_data {
+ unsigned int data[MAX_MAIL_SIZE];
+ struct cmd_parse cmd_mix;
+ };
+
+ extern int ipc_msg_send(unsigned int obj, struct ipc_msg *msg, unsigned int mode);
+ extern int ipc_msg_req_callback(unsigned int obj, unsigned int cmd, int (*func)(union ipc_data *));
+ extern int ipc_msg_put_callback(unsigned int obj, unsigned int cmd);
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif
+
+#endif/*__IPC_MSG_H__*/
+#pragma GCC diagnostic pop
diff --git a/include/linux/hisi/log/hisi_log.h b/include/linux/hisi/log/hisi_log.h
new file mode 100644
index 000000000000..7a9b28194ade
--- /dev/null
+++ b/include/linux/hisi/log/hisi_log.h
@@ -0,0 +1,143 @@
+#ifndef _LINUX_HISILOG_H
+#define _LINUX_HISILOG_H
+
+#include <linux/printk.h>
+#include <linux/types.h>
+
+enum {
+ HISILOG_ERR = 1U << 0,
+ HISILOG_WARNING = 1U << 1,
+ HISILOG_INFO = 1U << 2,
+ HISILOG_DEBUG = 1U << 3,
+ HISILOG_DEBUG1 = 1U << 4,
+ HISILOG_DEBUG2 = 1U << 5,
+ HISILOG_DEBUG3 = 1U << 6,
+ HISILOG_DEBUG4 = 1U << 7,
+};
+
+#define HISILOG_TAG_DEFOUTL_LEVEL (HISILOG_ERR \
+ | HISILOG_WARNING \
+ | HISILOG_INFO)
+
+struct hisi_log_tag {
+ const char *name;
+ u32 level;
+};
+
+#define HISILOG_REGIST() \
+ HISILOG_REGIST_TAG_LEVEL(HISILOG_TAG, HISILOG_TAG_DEFOUTL_LEVEL)
+
+#define HISILOG_REGIST_LEVEL(level) \
+ HISILOG_REGIST_TAG_LEVEL(HISILOG_TAG, level)
+
+#define HISILOG_REGIST_TAG_LEVEL(name, level) \
+ _HISILOG_REGIST_TAG_LEVEL(name, level)
+
+#define _HISILOG_REGIST_TAG_LEVEL(name, level) \
+ static struct hisi_log_tag TAG_STRUCT_NAME(name) \
+__used \
+__attribute__ ((unused, __section__("__hisilog_tag"))) \
+= { #name, level}
+
+#define hisilog_err(x...) \
+ _hisilog_err(HISILOG_TAG, ##x)
+
+#define _hisilog_err(TAG, x...) \
+ __hisilog_err(TAG, ##x)
+
+#define __hisilog_err(TAG, fmt, ...) \
+ do { \
+ if (TAG_STRUCT_NAME(TAG).level & HISILOG_ERR) \
+ pr_err(hw_fmt_tag(TAG, E) fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define hisilog_warn(x...) \
+ _hisilog_warn(HISILOG_TAG, ##x)
+
+#define _hisilog_warn(TAG, x...) \
+ __hisilog_warn(TAG, ##x)
+
+#define __hisilog_warn(TAG, fmt, ...) \
+ do { \
+ if (TAG_STRUCT_NAME(TAG).level & HISILOG_WARNING) \
+ pr_err(hw_fmt_tag(TAG, W) fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define hisilog_info(x...) \
+ _hisilog_info(HISILOG_TAG, ##x)
+
+#define _hisilog_info(TAG, x...) \
+ __hisilog_info(TAG, ##x)
+
+#define __hisilog_info(TAG, fmt, ...) \
+ do { \
+ if (TAG_STRUCT_NAME(TAG).level & HISILOG_INFO) \
+ pr_info(hw_fmt_tag(TAG, I) fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define hisilog_debug(x...) \
+ _hisilog_debug(HISILOG_TAG, ##x)
+
+#define _hisilog_debug(TAG, x...) \
+ __hisilog_debug(TAG, ##x)
+
+#define __hisilog_debug(TAG, fmt, ...) \
+ do { \
+ if (TAG_STRUCT_NAME(TAG).level & HISILOG_DEBUG) \
+ pr_err(hw_fmt_tag(TAG, D) fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define hisilog_debug1(x...) \
+ _hisilog_debug1(HISILOG_TAG, ##x)
+
+#define _hisilog_debug1(TAG, x...) \
+ __hisilog_debug1(TAG, ##x)
+
+#define __hisilog_debug1(TAG, fmt, ...) \
+ do { \
+ if (TAG_STRUCT_NAME(TAG).level & HISILOG_DEBUG1) \
+ pr_err(hw_fmt_tag(TAG, D1) fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define hisilog_debug2(x...) \
+ _hisilog_debug2(HISILOG_TAG, ##x)
+
+#define _hisilog_debug2(TAG, x...) \
+ __hisilog_debug2(TAG, ##x)
+
+#define __hisilog_debug2(TAG, fmt, ...) \
+ do { \
+ if (TAG_STRUCT_NAME(TAG).level & HISILOG_DEBUG2) \
+ pr_err(hw_fmt_tag(TAG, D2) fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define hisilog_debug3(x...) \
+ _hisilog_debug3(HISILOG_TAG, ##x)
+
+#define _hisilog_debug3(TAG, x...) \
+ __hisilog_debug3(TAG, ##x)
+
+#define __hisilog_debug3(TAG, fmt, ...) \
+ do { \
+ if (TAG_STRUCT_NAME(TAG).level & HISILOG_DEBUG3) \
+ pr_err(hw_fmt_tag(TAG, D3) fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define hisilog_debug4(x...) \
+ _hisilog_debug4(HISILOG_TAG, ##x)
+
+#define _hisilog_debug4(TAG, x...) \
+ __hisilog_debug4(TAG, ##x)
+
+#define __hisilog_debug4(TAG, fmt, ...) \
+ do { \
+ if (TAG_STRUCT_NAME(TAG).level & HISILOG_DEBUG4) \
+ pr_err(hw_fmt_tag(TAG, D4) fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define TAG_STRUCT_NAME(name) \
+ _hwtag_##name
+
+#define hw_fmt_tag(TAG, LEVEL) "[" #LEVEL "/" #TAG "] "
+
+#endif
diff --git a/include/linux/hisi/rdr_pub.h b/include/linux/hisi/rdr_pub.h
new file mode 100644
index 000000000000..584070cf980a
--- /dev/null
+++ b/include/linux/hisi/rdr_pub.h
@@ -0,0 +1,303 @@
+/*
+ * blackbox header file (blackbox: kernel run data recorder.)
+ *
+ * Copyright (c) 2013 Hisilicon Technologies CO., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __BB_PUB_H__
+#define __BB_PUB_H__
+
+#include <linux/module.h>
+#include <linux/hisi/rdr_types.h>
+#include "mntn_public_interface.h"
+
+#define STR_MODULENAME_MAXLEN 16
+#define STR_EXCEPTIONDESC_MAXLEN 48
+#define STR_TASKNAME_MAXLEN 16
+#define STR_USERDATA_MAXLEN 64
+
+#define PATH_ROOT "/data/hisi_logs/"
+#define RDR_REBOOT_TIMES_FILE "/data/hisi_logs/reboot_times.log"
+#define RDR_ERECOVERY_REASON_FILE "/cache/recovery/last_erecovery_entry"
+#define RDR_UNEXPECTED_REBOOT_MARK_ADDR 0x2846579
+
+#define INT_IN_FLAG 0xAAAAUL
+#define INT_EXIT_FLAG 0xBBBBUL
+
+#define BBOX_SAVE_DONE_FILENAME "/DONE"
+
+#define FILE_EDITION "/proc/log-usertype"
+#define OVERSEA_USER 5
+#define BETA_USER 3
+#define COMMERCIAL_USER 1
+
+#define START_CHAR_0 '0'
+#define END_CHAR_9 '9'
+
+enum EDITION_KIND {
+ EDITION_USER = 1,
+ EDITION_INTERNAL_BETA = 2,
+ EDITION_OVERSEA_BETA = 3,
+ EDITION_MAX
+};
+
+enum SAVE_STEP {
+ BBOX_SAVE_STEP1 = 0x1,
+ BBOX_SAVE_STEP2 = 0x2,
+ BBOX_SAVE_STEP3 = 0x3,
+ BBOX_SAVE_STEP_DONE = 0x100
+};
+
+/*this is for test*/
+enum rdr_except_reason_e {
+ RDR_EXCE_WD = 0x01,/*watchdog timeout*/
+ RDR_EXCE_INITIATIVE, /*initictive call sys_error*/
+ RDR_EXCE_PANIC, /*ARM except(eg:data abort)*/
+ RDR_EXCE_STACKOVERFLOW,
+ RDR_EXCE_DIE,
+ RDR_EXCE_UNDEF,
+ RDR_EXCE_MAX
+};
+
+enum PROCESS_PRI {
+ RDR_ERR = 0x01,
+ RDR_WARN,
+ RDR_OTHER,
+ RDR_PPRI_MAX
+};
+
+enum REBOOT_PRI {
+ RDR_REBOOT_NOW = 0x01,
+ RDR_REBOOT_WAIT,
+ RDR_REBOOT_NO,
+ RDR_REBOOT_MAX
+};
+
+enum REENTRANT {
+ RDR_REENTRANT_ALLOW = 0xff00da00,
+ RDR_REENTRANT_DISALLOW
+};
+
+enum UPLOAD_FLAG {
+ RDR_UPLOAD_YES = 0xff00fa00,
+ RDR_UPLOAD_NO
+};
+
+
+enum RDR_RETURN {
+ RDR_SUCCESSED = 0x9f000000,
+ RDR_FAILD = 0x9f000001,
+ RDR_NULLPOINTER = 0x9f0000ff
+};
+
+typedef void (*rdr_e_callback)(u32, void*);
+
+/*
+ * struct list_head e_list;
+ * u32 modid, exception id;
+ * if modid equal 0, will auto generation modid, and return it.
+ * u32 modid_end, can register modid region. [modid~modid_end];
+ need modid_end >= modid,
+ * if modid_end equal 0, will be register modid only,
+ but modid & modid_end cant equal 0 at the same time.
+ * u32 process_priority, exception process priority
+ * u32 reboot_priority, exception reboot priority
+ * u64 save_log_mask, need save log mask
+ * u64 notify_core_mask, need notify other core mask
+ * u64 reset_core_mask, need reset other core mask
+ * u64 from_core, the core of happen exception
+ * u32 reentrant, whether to allow exception reentrant
+ * u32 exce_type, the type of exception
+ * char* from_module, the module of happen excption
+ * char* desc, the desc of happen excption
+ * rdr_e_callback callback, will be called when excption has processed.
+ * u32 reserve_u32; reserve u32
+ * void* reserve_p reserve void *
+ */
+struct rdr_exception_info_s {
+ struct list_head e_list;
+ u32 e_modid;
+ u32 e_modid_end;
+ u32 e_process_priority;
+ u32 e_reboot_priority;
+ u64 e_notify_core_mask;
+ u64 e_reset_core_mask;
+ u64 e_from_core;
+ u32 e_reentrant;
+ u32 e_exce_type;
+ u32 e_upload_flag;
+ u8 e_from_module[MODULE_NAME_LEN];
+ u8 e_desc[STR_EXCEPTIONDESC_MAXLEN];
+ u32 e_reserve_u32;
+ void *e_reserve_p;
+ rdr_e_callback e_callback;
+};
+
+/*
+ * func name: pfn_cb_dump_done
+ * func args:
+ * u32 modid
+ * exception id
+ * u64 coreid
+ * which core done
+ * return value null
+ */
+typedef void (*pfn_cb_dump_done)(u32 modid, u64 coreid);
+
+/*
+ * func name: pfn_dump
+ * func args:
+ * u32 modid
+ * exception id
+ * u64 coreid
+ * exception core
+ * u32 etype
+ * exception type
+ * char* logpath
+ * exception log path
+ * pfn_cb_dump_done fndone
+ * return mask bitmap.
+ */
+typedef void (*pfn_dump)(u32 modid, u32 etype, u64 coreid,
+ char *logpath, pfn_cb_dump_done fndone);
+/*
+ * func name: pfn_reset
+ * func args:
+ * u32 modid
+ * exception id
+ * u32 coreid
+ * exception core
+ * u32 e_type
+ * exception type
+ * return value null
+ */
+typedef void (*pfn_reset)(u32 modid, u32 etype, u64 coreid);
+
+struct rdr_module_ops_pub {
+ pfn_dump ops_dump;
+ pfn_reset ops_reset;
+};
+
+struct rdr_register_module_result {
+ u64 log_addr;
+ u32 log_len;
+ RDR_NVE nve;
+};
+
+#ifdef CONFIG_HISI_BB
+/*
+ * func name: rdr_register_exception_type
+ * func args:
+ * struct rdr_exception_info_pub* s_e_type
+ * return value e_modid
+ * < 0 error
+ * >=0 success
+ */
+u32 rdr_register_exception(struct rdr_exception_info_s *e);
+
+/*
+ * func name: bb_unregister_exception_type
+ * func args:
+ * u32 modid, exception id;
+ * return
+ * < 0 fail
+ * >=0 success
+ * u32 bb_unregister_exception(u32 modid);
+ */
+
+/*
+ * func name: hisi_bbox_map
+ * func args:
+ * @paddr: physical address in black box
+ * @size: size of memory
+ * return:
+ * success: virtual address
+ * fail: NULL or -ENOMEM
+ */
+void *hisi_bbox_map(phys_addr_t paddr, size_t size);
+
+/*
+ * func name: hisi_bbox_unmap
+ * func args:
+ * @addr: virtual address that alloced by hisi_bbox_map
+ */
+void hisi_bbox_unmap(const void *vaddr);
+
+/*
+ * func name: rdr_register_module_ops
+ * func args:
+ * u32 coreid, core id;
+ * .
+ * struct rdr_module_ops_pub* ops;
+ * struct rdr_register_module_result* retinfo
+ * return value e_modid
+ * < 0 error
+ * >=0 success
+ */
+int rdr_register_module_ops(
+ u64 coreid,
+ struct rdr_module_ops_pub *ops,
+ struct rdr_register_module_result *retinfo
+ );
+
+/*
+ * func name: bb_unregister_exception_type
+ * func args:
+ * u64 coreid, core id;
+ * return
+ * < 0 fail
+ * >=0 success
+ u64 rdr_unregister_module_ops_info(u64 coreid);
+ */
+
+/*
+ * func name: rdr_system_error
+ * func args:
+ * u32 modid, modid( must be registered);
+ * u32 arg1, arg1;
+ * u32 arg2, arg2;
+ * char * data, short message.
+ * u32 length, len(IMPORTANT: <=4k)
+ * return void
+ */
+void rdr_system_error(u32 modid, u32 arg1, u32 arg2);
+
+void rdr_syserr_process_for_ap(u32 modid, u64 arg1, u64 arg2);
+
+unsigned int bbox_check_edition(void);
+int rdr_wait_partition(char *path, int timeouts);
+void rdr_set_wdt_kick_slice(void);
+u64 rdr_get_last_wdt_kick_slice(void);
+u64 get_32k_abs_timer_value(void);
+void save_log_to_dfx_tempbuffer(u32 reboot_type);
+void clear_dfx_tempbuffer(void);
+void systemerror_save_log2dfx(u32 reboot_type);
+#else
+static inline void *hisi_bbox_map(phys_addr_t paddr, size_t size) { return NULL; }
+static inline u32 rdr_register_exception(struct rdr_exception_info_s *e) { return 0; }
+static inline int rdr_register_module_ops(
+ u64 coreid,
+ struct rdr_module_ops_pub *ops,
+ struct rdr_register_module_result *retinfo
+ ){ return -1; }
+static inline void rdr_system_error(u32 modid, u32 arg1, u32 arg2) {}
+static inline void rdr_syserr_process_for_ap(u32 modid, u64 arg1, u64 arg2) {}
+
+static inline unsigned int bbox_check_edition(void){return EDITION_USER; }
+static inline int rdr_wait_partition(char *path, int timeouts) { return 0; }
+static inline void rdr_set_wdt_kick_slice(void){ return; }
+static inline u64 rdr_get_last_wdt_kick_slice(void){ return 0; }
+static inline u64 get_32k_abs_timer_value(void) { return 0; }
+static inline void save_log_to_dfx_tempbuffer(u32 reboot_type) {return; };
+static inline void clear_dfx_tempbuffer(void) {return; };
+#endif
+
+void get_exception_info(unsigned long *buf, unsigned long *buf_len);
+#define RDR_REBOOTDUMPINFO_FLAG 0xdd140607
+
+#endif/* End #define __BB_PUB_H__ */
+
diff --git a/include/linux/hisi/rdr_types.h b/include/linux/hisi/rdr_types.h
new file mode 100644
index 000000000000..3e64c704e5b8
--- /dev/null
+++ b/include/linux/hisi/rdr_types.h
@@ -0,0 +1,21 @@
+/*
+ * blackbox header file (blackbox: kernel run data recorder.)
+ *
+ * Copyright (c) 2013 Hisilicon Technologies CO., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RDR_TYPES_H__
+#define __RDR_TYPES_H__
+
+#include <linux/types.h>
+
+#define RDR_INT (long long)
+#define RDR_PTR (void *)
+#define RDR_NVE u64
+
+#endif/* End #define __RDR_TYPES_H__ */
+
diff --git a/include/linux/hisi/usb/hisi_pd_dev.h b/include/linux/hisi/usb/hisi_pd_dev.h
new file mode 100644
index 000000000000..68b6fc9356d9
--- /dev/null
+++ b/include/linux/hisi/usb/hisi_pd_dev.h
@@ -0,0 +1,193 @@
+/**********************************************************
+ * Filename: hisi_typec_dev.h
+ *
+ * Discription: Hisilicon type-c device public head file for
+ * type-c core driver and chip drivers
+ *
+ * Copyright: (C) 2014 hisilicon.
+ *
+ * Author: Hisilicon
+ *
+ **********************************************************/
+
+#ifndef __HISI_PD_DEV_H__
+#define __HISI_PD_DEV_H__
+
+#include <linux/device.h>
+#include <linux/hisi/usb/hisi_usb.h>
+#include <linux/hisi/log/hisi_log.h>
+
+#define CONFIG_DPM_USB_PD_CUSTOM_DBGACC
+#define CONFIG_DPM_TYPEC_CAP_DBGACC_SNK
+#define CONFIG_DPM_TYPEC_CAP_CUSTOM_SRC
+
+/* type-c inserted plug orientation */
+enum pd_cc_orient {
+ PD_CC_ORIENT_DEFAULT = 0,
+ PD_CC_ORIENT_CC1,
+ PD_CC_ORIENT_CC2,
+ PD_CC_NOT_READY,
+};
+
+enum pd_connect_result {
+ PD_CONNECT_NONE = 0,
+ PD_CONNECT_TYPEC_ONLY,
+ PD_CONNECT_TYPEC_ONLY_SNK_DFT,
+ PD_CONNECT_TYPEC_ONLY_SNK,
+ PD_CONNECT_TYPEC_ONLY_SRC,
+ PD_CONNECT_PE_READY,
+ PD_CONNECT_PE_READY_SNK,
+ PD_CONNECT_PE_READY_SRC,
+
+#ifdef CONFIG_DPM_USB_PD_CUSTOM_DBGACC
+ PD_CONNECT_PE_READY_DBGACC_UFP,
+ PD_CONNECT_PE_READY_DBGACC_DFP,
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
+};
+
+enum pd_device_port_power_mode {
+ PD_DEV_PORT_POWERMODE_SOURCE = 0,
+ PD_DEV_PORT_POWERMODE_SINK,
+ PD_DEV_PORT_POWERMODE_NOT_READY,
+};
+
+enum pd_device_port_data_mode {
+ PD_DEV_PORT_DATAMODE_HOST = 0,
+ PD_DEV_PORT_DATAMODE_DEVICE,
+ PD_DEV_PORT_DATAMODE_NOT_READY,
+};
+
+enum pd_device_port_mode {
+ PD_DEV_PORT_MODE_DFP = 0,
+ PD_DEV_PORT_MODE_UFP,
+ PD_DEV_PORT_MODE_NOT_READY,
+};
+
+enum {
+ PD_DPM_PE_EVT_DIS_VBUS_CTRL,
+ PD_DPM_PE_EVT_SOURCE_VCONN,
+ PD_DPM_PE_EVT_SOURCE_VBUS,
+ PD_DPM_PE_EVT_SINK_VBUS,
+ PD_DPM_PE_EVT_PR_SWAP,
+ PD_DPM_PE_EVT_DR_SWAP,
+ PD_DPM_PE_EVT_VCONN_SWAP,
+ PD_DPM_PE_EVT_TYPEC_STATE,
+ PD_DPM_PE_EVT_PD_STATE,
+ PD_DPM_PE_EVT_BC12,
+};
+
+enum pd_typec_attach_type {
+ PD_DPM_TYPEC_UNATTACHED = 0,
+ PD_DPM_TYPEC_ATTACHED_SNK,
+ PD_DPM_TYPEC_ATTACHED_SRC,
+ PD_DPM_TYPEC_ATTACHED_AUDIO,
+ PD_DPM_TYPEC_ATTACHED_DEBUG,
+
+#ifdef CONFIG_DPM_TYPEC_CAP_DBGACC_SNK
+ PD_DPM_TYPEC_ATTACHED_DBGACC_SNK, /* Rp, Rp */
+#endif /* CONFIG_TYPEC_CAP_DBGACC_SNK */
+
+#ifdef CONFIG_DPM_TYPEC_CAP_CUSTOM_SRC
+ PD_DPM_TYPEC_ATTACHED_CUSTOM_SRC, /* Same Rp */
+#endif /* CONFIG_TYPEC_CAP_CUSTOM_SRC */
+};
+
+enum pd_dpm_charger_event_type {
+ PD_EVENT_CHARGER_TYPE_USB = 0, /*SDP*/
+ PD_EVENT_CHARGER_TYPE_BC_USB, /*CDP*/
+ PD_EVENT_CHARGER_TYPE_NON_STANDARD, /*UNKNOWN*/
+ PD_EVENT_CHARGER_TYPE_STANDARD, /*DCP*/
+ PD_EVENT_CHARGER_TYPE_FCP, /*FCP*/
+};
+
+enum {
+ PD_DPM_USB_TYPEC_NONE = 0,
+ PD_DPM_USB_TYPEC_DETACHED,
+ PD_DPM_USB_TYPEC_DEVICE_ATTACHED,
+ PD_DPM_USB_TYPEC_HOST_ATTACHED,
+};
+
+enum pd_dpm_uevent_type {
+ PD_DPM_UEVENT_START = 0,
+ PD_DPM_UEVENT_COMPLETE,
+};
+
+enum pd_dpm_wake_lock_type {
+ PD_WAKE_LOCK = 100,
+ PD_WAKE_UNLOCK,
+};
+
+struct pd_dpm_typec_state {
+ bool polarity;
+ int cc1_status;
+ int cc2_status;
+ int old_state;
+ int new_state;
+};
+
+struct pd_dpm_pd_state {
+ u8 connected;
+};
+
+struct pd_dpm_swap_state {
+ u8 new_role;
+};
+
+enum pd_dpm_vbus_type {
+ PD_DPM_VBUS_TYPE_TYPEC = 20,
+ PD_DPM_VBUS_TYPE_PD,
+};
+
+struct pd_dpm_vbus_state {
+ int mv;
+ int ma;
+ u8 vbus_type;
+};
+
+struct pd_dpm_info {
+ struct i2c_client *client;
+ struct device *dev;
+ struct mutex pd_lock;
+ struct mutex sink_vbus_lock;
+
+ struct dual_role_phy_instance *dual_role;
+ struct dual_role_phy_desc *desc;
+
+ enum hisi_charger_type charger_type;
+ struct notifier_block usb_nb;
+ struct atomic_notifier_head pd_evt_nh;
+ struct atomic_notifier_head pd_wake_unlock_evt_nh;
+
+ enum pd_dpm_uevent_type uevent_type;
+ struct work_struct pd_work;
+
+ const char *tcpc_name;
+
+ /* usb state update */
+ struct mutex usb_lock;
+ int pending_usb_event;
+ int last_usb_event;
+ struct workqueue_struct *usb_wq;
+ struct delayed_work usb_state_update_work;
+
+ bool bc12_finish_flag;
+ bool pd_finish_flag;
+ bool pd_source_vbus;
+
+ struct pd_dpm_vbus_state bc12_sink_vbus_state;
+};
+
+/* for chip layer to get class created by core layer */
+struct class *hisi_pd_get_class(void);
+
+struct tcpc_device *tcpc_dev_get_by_name(const char *name);
+
+int register_pd_dpm_notifier(struct notifier_block *nb);
+int unregister_pd_dpm_notifier(struct notifier_block *nb);
+int register_pd_wake_unlock_notifier(struct notifier_block *nb);
+int unregister_pd_wake_unlock_notifier(struct notifier_block *nb);
+int pd_dpm_handle_pe_event(unsigned long event, void *data);
+bool pd_dpm_get_pd_finish_flag(void);
+bool pd_dpm_get_pd_source_vbus(void);
+void pd_dpm_get_typec_state(int *typec_detach);
+#endif
diff --git a/include/linux/hisi/usb/hisi_usb.h b/include/linux/hisi/usb/hisi_usb.h
new file mode 100644
index 000000000000..9ee216e32cd1
--- /dev/null
+++ b/include/linux/hisi/usb/hisi_usb.h
@@ -0,0 +1,57 @@
+#ifndef _HISI_USB_H_
+#define _HISI_USB_H_
+
+enum hisi_charger_type {
+ CHARGER_TYPE_SDP = 0, /* Standard Downstreame Port */
+ CHARGER_TYPE_CDP, /* Charging Downstreame Port */
+ CHARGER_TYPE_DCP, /* Dedicate Charging Port */
+ CHARGER_TYPE_UNKNOWN, /* non-standard */
+ CHARGER_TYPE_NONE, /* not connected */
+
+ /* other messages */
+ PLEASE_PROVIDE_POWER, /* host mode, provide power */
+};
+
+enum otg_dev_event_type {
+ CHARGER_CONNECT_EVENT = 0,
+ CHARGER_DISCONNECT_EVENT,
+ ID_FALL_EVENT,
+ ID_RISE_EVENT,
+ NONE_EVENT
+};
+
+#if defined(CONFIG_USB_SUSB_HDRC) || defined(CONFIG_USB_DWC3)
+int hisi_charger_type_notifier_register(struct notifier_block *nb);
+int hisi_charger_type_notifier_unregister(struct notifier_block *nb);
+enum hisi_charger_type hisi_get_charger_type(void);
+int hisi_usb_otg_event(enum otg_dev_event_type event_type);
+void hisi_usb_otg_bc_again(void);
+#else
+static inline int hisi_charger_type_notifier_register(
+ struct notifier_block *nb){return 0; }
+static inline int hisi_charger_type_notifier_unregister(
+ struct notifier_block *nb){return 0; }
+static inline enum hisi_charger_type hisi_get_charger_type(void)
+{
+ return CHARGER_TYPE_NONE;
+}
+
+static inline int hisi_usb_otg_event(enum otg_dev_event_type event_type)
+{
+ return 0;
+}
+
+static inline void hisi_usb_otg_bc_again(void)
+{
+}
+#endif /* CONFIG_USB_SUSB_HDRC || CONFIG_USB_DWC3 */
+
+static inline int hisi_usb_id_change(enum otg_dev_event_type event)
+{
+ if ((event == ID_FALL_EVENT) || (event == ID_RISE_EVENT))
+ return hisi_usb_otg_event(event);
+ else
+ return 0;
+}
+
+#endif /* _HISI_USB_H_*/
diff --git a/include/linux/hisi/usb/hub/hisi_hub.h b/include/linux/hisi/usb/hub/hisi_hub.h
new file mode 100644
index 000000000000..99df5772df96
--- /dev/null
+++ b/include/linux/hisi/usb/hub/hisi_hub.h
@@ -0,0 +1,24 @@
+/*
+ * hub_usb5734.h
+ *
+ * Copyright (c) Hisilicon Tech. Co., Ltd. All rights reserved.
+ *
+ * Chenjun <chenjun@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+void gpio_hub_power_on(void);
+void gpio_hub_power_off(void);
+void gpio_hub_switch_to_hub(void);
+void gpio_hub_switch_to_typec(void);
+void gpio_hub_typec_power_off(void);
+void gpio_hub_typec_power_on(void);
diff --git a/include/linux/hisi/usb/pd/richtek/pd_core.h b/include/linux/hisi/usb/pd/richtek/pd_core.h
new file mode 100644
index 000000000000..8675f835fd52
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/pd_core.h
@@ -0,0 +1,1218 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PD_CORE_H_
+#define PD_CORE_H_
+#include <linux/module.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_timer.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_event.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_config.h>
+
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+#define CONFIG_PD_DISCOVER_CABLE_ID
+#endif /* CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID */
+
+#ifdef CONFIG_USB_PD_DFP_READY_DISCOVER_ID
+#undef CONFIG_PD_DISCOVER_CABLE_ID
+#define CONFIG_PD_DISCOVER_CABLE_ID
+#endif /* CONFIG_USB_PD_DFP_READY_DISCOVER_ID */
+
+#define PD_SOP_NR 3
+
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+
+/* Default retry count for transmitting */
+#define PD_RETRY_COUNT 3
+
+#if PD_RETRY_COUNT > 3
+#error "PD_RETRY_COUNT Max = 3"
+#endif
+
+/* --- PD data message helpers --- */
+#define PDO_MAX_OBJECTS 7
+#define PDO_MODES (PDO_MAX_OBJECTS - 1)
+
+/* PDO : Power Data Object */
+/*
+ * 1. The vSafe5V Fixed Supply Object shall always be the first object.
+ * 2. The remaining Fixed Supply Objects,
+ * if present, shall be sent in voltage order; lowest to highest.
+ * 3. The Battery Supply Objects,
+ * if present shall be sent in Minimum Voltage order; lowest to highest.
+ * 4. The Variable Supply (non battery) Objects,
+ * if present, shall be sent in Minimum Voltage order; lowest to highest.
+ */
+#define PDO_TYPE_FIXED (0 << 30)
+#define PDO_TYPE_BATTERY BIT(30)
+#define PDO_TYPE_VARIABLE (2 << 30)
+#define PDO_TYPE_MASK (3 << 30)
+
+#define PDO_FIXED_DUAL_ROLE BIT(29) /* Dual role device */
+#define PDO_FIXED_SUSPEND BIT(28) /* USB Suspend supported (SRC)*/
+#define PDO_FIXED_HIGH_CAP BIT(28) /* Higher Capability (SNK )*/
+#define PDO_FIXED_EXTERNAL BIT(27) /* Externally powered */
+#define PDO_FIXED_COMM_CAP BIT(26) /* USB Communications Capable */
+#define PDO_FIXED_DATA_SWAP BIT(25) /* Data role swap command supported */
+
+#define PDO_FIXED_PEAK_CURR(i) \
+ (((i) & 0x03) << 20) /* [21..20] Peak current */
+#define PDO_FIXED_VOLT(mv) \
+ ((((mv) / 50) & 0x3fff) << 10) /* Voltage in 50mV units */
+#define PDO_FIXED_CURR(ma) \
+ ((((ma) / 10) & 0x3fff) << 0) /* Max current in 10mA units */
+
+#define PDO_TYPE(raw) ((raw) & PDO_TYPE_MASK)
+
+#define PDO_FIXED_EXTRACT_VOLT_RAW(raw) (((raw) >> 10) & 0x3ff)
+#define PDO_FIXED_EXTRACT_CURR_RAW(raw) (((raw) >> 0) & 0x3ff)
+#define PDO_FIXED_EXTRACT_VOLT(raw) (PDO_FIXED_EXTRACT_VOLT_RAW(raw) * 50)
+#define PDO_FIXED_EXTRACT_CURR(raw) (PDO_FIXED_EXTRACT_CURR_RAW(raw) * 10)
+#define PDO_FIXED_RESET_CURR(raw, ma) \
+ (((raw) & ~0x3ff) | PDO_FIXED_CURR(ma))
+
+#define PDO_FIXED(mv, ma, flags) (PDO_FIXED_VOLT(mv) |\
+ PDO_FIXED_CURR(ma) | (flags))
+
+#define PDO_VAR_MAX_VOLT(mv) ((((mv) / 50) & 0x3FF) << 20)
+#define PDO_VAR_MIN_VOLT(mv) ((((mv) / 50) & 0x3FF) << 10)
+#define PDO_VAR_OP_CURR(ma) ((((ma) / 10) & 0x3FF) << 0)
+
+#define PDO_VAR_EXTRACT_MAX_VOLT_RAW(raw) (((raw) >> 20) & 0x3ff)
+#define PDO_VAR_EXTRACT_MIN_VOLT_RAW(raw) (((raw) >> 10) & 0x3ff)
+#define PDO_VAR_EXTRACT_CURR_RAW(raw) (((raw) >> 0) & 0x3ff)
+
+#define PDO_VAR_EXTRACT_MAX_VOLT(raw) (PDO_VAR_EXTRACT_MAX_VOLT_RAW(raw) * 50)
+#define PDO_VAR_EXTRACT_MIN_VOLT(raw) (PDO_VAR_EXTRACT_MIN_VOLT_RAW(raw) * 50)
+#define PDO_VAR_EXTRACT_CURR(raw) (PDO_VAR_EXTRACT_CURR_RAW(raw) * 10)
+
+#define PDO_VAR_RESET_CURR(raw, ma) \
+ (((raw) & ~0x3ff) | PDO_VAR_OP_CURR(ma))
+
+#define PDO_VAR(min_mv, max_mv, op_ma) \
+ (PDO_VAR_MIN_VOLT(min_mv) | \
+ PDO_VAR_MAX_VOLT(max_mv) | \
+ PDO_VAR_OP_CURR(op_ma) | \
+ PDO_TYPE_VARIABLE)
+
+#define PDO_BATT_MAX_VOLT(mv) ((((mv) / 50) & 0x3FF) << 20)
+#define PDO_BATT_MIN_VOLT(mv) ((((mv) / 50) & 0x3FF) << 10)
+#define PDO_BATT_OP_POWER(mw) ((((mw) / 250) & 0x3FF) << 0)
+
+#define PDO_BATT_EXTRACT_MAX_VOLT_RAW(raw) (((raw) >> 20) & 0x3ff)
+#define PDO_BATT_EXTRACT_MIN_VOLT_RAW(raw) (((raw) >> 10) & 0x3ff)
+#define PDO_BATT_EXTRACT_OP_POWER_RAW(raw) (((raw) >> 0) & 0x3ff)
+
+#define PDO_BATT_EXTRACT_MAX_VOLT(raw) \
+ (PDO_BATT_EXTRACT_MAX_VOLT_RAW(raw) * 50)
+#define PDO_BATT_EXTRACT_MIN_VOLT(raw) \
+ (PDO_BATT_EXTRACT_MIN_VOLT_RAW(raw) * 50)
+#define PDO_BATT_EXTRACT_OP_POWER(raw) \
+ (PDO_BATT_EXTRACT_OP_POWER_RAW(raw) * 250)
+
+#define PDO_BATT(min_mv, max_mv, op_mw) \
+ (PDO_BATT_MIN_VOLT(min_mv) | \
+ PDO_BATT_MAX_VOLT(max_mv) | \
+ PDO_BATT_OP_POWER(op_mw) | \
+ PDO_TYPE_BATTERY)
+
+/* RDO : Request Data Object */
+#define RDO_OBJ_POS(n) (((n) & 0x7) << 28)
+#define RDO_POS(rdo) (((rdo) >> 28) & 0x7)
+#define RDO_GIVE_BACK BIT(27)
+#define RDO_CAP_MISMATCH BIT(26)
+#define RDO_COMM_CAP BIT(25)
+#define RDO_NO_SUSPEND BIT(24)
+#define RDO_FIXED_VAR_OP_CURR(ma) ((((ma) / 10) & 0x3FF) << 10)
+#define RDO_FIXED_VAR_MAX_CURR(ma) ((((ma) / 10) & 0x3FF) << 0)
+
+#define RDO_FIXED_VAR_EXTRACT_OP_CURR(raw) ((((raw) >> 10 & 0x3ff)) * 10)
+#define RDO_FIXED_VAR_EXTRACT_MAX_CURR(raw) ((((raw) >> 0 & 0x3ff)) * 10)
+
+#define RDO_BATT_OP_POWER(mw) ((((mw) / 250) & 0x3FF) << 10)
+#define RDO_BATT_MAX_POWER(mw) ((((mw) / 250) & 0x3FF) << 0)
+
+#define RDO_BATT_EXTRACT_OP_POWER(raw) ((((raw) >> 10 & 0x3ff)) * 250)
+#define RDO_BATT_EXTRACT_MAX_POWER(raw) ((((raw) >> 0 & 0x3ff)) * 250)
+
+#define RDO_FIXED(n, op_ma, max_ma, flags) \
+ (RDO_OBJ_POS(n) | (flags) | \
+ RDO_FIXED_VAR_OP_CURR(op_ma) | \
+ RDO_FIXED_VAR_MAX_CURR(max_ma))
+
+#define RDO_BATT(n, op_mw, max_mw, flags) \
+ (RDO_OBJ_POS(n) | (flags) | \
+ RDO_BATT_OP_POWER(op_mw) | \
+ RDO_BATT_MAX_POWER(max_mw))
+
+/* BDO : BIST Data Object */
+#define BDO_MODE_RECV (0 << 28)
+#define BDO_MODE_TRANSMIT BIT(28)
+#define BDO_MODE_COUNTERS (2 << 28)
+#define BDO_MODE_CARRIER0 (3 << 28)
+#define BDO_MODE_CARRIER1 (4 << 28)
+#define BDO_MODE_CARRIER2 (5 << 28)
+#define BDO_MODE_CARRIER3 (6 << 28)
+#define BDO_MODE_EYE (7 << 28)
+#define BDO_MODE_TEST_DATA (8 << 28)
+
+#define BDO_MODE(obj) ((obj) & (0xf << 28))
+#define BDO(mode, cnt) ((mode) | ((cnt) & 0xFFFF))
+
+#define SVID_DISCOVERY_MAX 16
+
+/* Protocol revision */
+#define PD_REV10 0
+#define PD_REV20 1
+
+/* build message header */
+
+#define PD_HEADER_SOP(msg_type, prole, drole, id, cnt) \
+ ((msg_type) | (PD_REV20 << 6) | \
+ ((drole) << 5) | ((prole) << 8) | \
+ ((id) << 9) | ((cnt) << 12))
+
+#define PD_HEADER_SOP_PRIME(msg_type, cable_plug, id, cnt) \
+ ((msg_type) | (PD_REV20 << 6) | \
+ ((cable_plug) << 8) | \
+ ((id) << 9) | ((cnt) << 12))
+
+#define PD_HEADER_CNT(header) (((header) >> 12) & 7)
+#define PD_HEADER_TYPE(header) ((header) & 0xF)
+#define PD_HEADER_ID(header) (((header) >> 9) & 7)
+#define PD_HEADER_PR(header) (((header) >> 8) & 1)
+#define PD_HEADER_DR(header) (((header) >> 5) & 1)
+
+/*
+ * VDO : Vendor Defined Message Object
+ * VDM object is minimum of VDM header + 6 additional data objects.
+ */
+
+/*
+ * VDM header
+ * ----------
+ * <31:16> :: SVID
+ * <15> :: VDM type ( 1b == structured, 0b == unstructured )
+ * <14:13> :: Structured VDM version (can only be 00 == 1.0 currently)
+ * <12:11> :: reserved
+ * <10:8> :: object position (1-7 valid ... used for enter/exit mode only)
+ * <7:6> :: command type (SVDM only?)
+ * <5> :: reserved (SVDM), command type (UVDM)
+ * <4:0> :: command
+ */
+#define VDO_MAX_SIZE (7)
+#define VDO_MAX_DATA_SIZE (VDO_MAX_SIZE - 1)
+#define VDO_MAX_SVID_SIZE (VDO_MAX_DATA_SIZE * 2)
+
+#define VDO(vid, type, custom) \
+ (((vid) << 16) | \
+ ((type) << 15) | \
+ ((custom) & 0x7FFF))
+
+#define VDO_S(svid, cmd_type, cmd, obj) \
+ VDO(svid, 1, VDO_CMDT(cmd_type) | VDO_OPOS(obj) | (cmd))
+
+#define VDO_SVDM_TYPE BIT(15)
+#define VDO_SVDM_VERS(x) ((x) << 13)
+#define VDO_OPOS(x) ((x) << 8)
+#define VDO_CMDT(x) ((x) << 6)
+#define VDO_OPOS_MASK VDO_OPOS(0x7)
+#define VDO_CMDT_MASK VDO_CMDT(0x3)
+
+#define CMDT_INIT 0
+#define CMDT_RSP_ACK 1
+#define CMDT_RSP_NAK 2
+#define CMDT_RSP_BUSY 3
+
+/* reserved for SVDM ... for Google UVDM */
+#define VDO_SRC_INITIATOR (0 << 5)
+#define VDO_SRC_RESPONDER BIT(5)
+
+#define CMD_DISCOVER_IDENT 1
+#define CMD_DISCOVER_SVID 2
+#define CMD_DISCOVER_MODES 3
+#define CMD_ENTER_MODE 4
+#define CMD_EXIT_MODE 5
+#define CMD_ATTENTION 6
+#define CMD_DP_STATUS 16
+#define CMD_DP_CONFIG 17
+
+#define VDO_CMD_VENDOR(x) (((10 + (x)) & 0x1f))
+
+/* ChromeOS specific commands */
+#define VDO_CMD_VERSION VDO_CMD_VENDOR(0)
+#define VDO_CMD_SEND_INFO VDO_CMD_VENDOR(1)
+#define VDO_CMD_READ_INFO VDO_CMD_VENDOR(2)
+#define VDO_CMD_REBOOT VDO_CMD_VENDOR(5)
+#define VDO_CMD_FLASH_ERASE VDO_CMD_VENDOR(6)
+#define VDO_CMD_FLASH_WRITE VDO_CMD_VENDOR(7)
+#define VDO_CMD_ERASE_SIG VDO_CMD_VENDOR(8)
+#define VDO_CMD_PING_ENABLE VDO_CMD_VENDOR(10)
+#define VDO_CMD_CURRENT VDO_CMD_VENDOR(11)
+#define VDO_CMD_FLIP VDO_CMD_VENDOR(12)
+#define VDO_CMD_GET_LOG VDO_CMD_VENDOR(13)
+#define VDO_CMD_CCD_EN VDO_CMD_VENDOR(14)
+
+#define PD_VDO_VID(vdo) ((vdo) >> 16)
+#define PD_VDO_SVDM(vdo) (((vdo) >> 15) & 1)
+#define PD_VDO_OPOS(vdo) (((vdo) >> 8) & 0x7)
+#define PD_VDO_CMD(vdo) ((vdo) & 0x1f)
+#define PD_VDO_CMDT(vdo) (((vdo) >> 6) & 0x3)
+
+/*
+ * SVDM Identity request -> response
+ *
+ * Request is simply properly formatted SVDM header
+ *
+ * Response is 4 data objects:
+ * [0] :: SVDM header
+ * [1] :: Identitiy header
+ * [2] :: Cert Stat VDO
+ * [3] :: (Product | Cable) VDO
+ * [4] :: AMA VDO
+ *
+ */
+#define VDO_INDEX_HDR 0
+#define VDO_INDEX_IDH 1
+#define VDO_INDEX_CSTAT 2
+#define VDO_INDEX_CABLE 3
+#define VDO_INDEX_PRODUCT 3
+#define VDO_INDEX_AMA 4
+#define VDO_I(name) VDO_INDEX_##name
+
+/*
+ * SVDM Identity Header
+ * --------------------
+ * <31> :: data capable as a USB host
+ * <30> :: data capable as a USB device
+ * <29:27> :: product type
+ * <26> :: modal operation supported (1b == yes)
+ * <25:16> :: SBZ
+ * <15:0> :: USB-IF assigned VID for this cable vendor
+ */
+#define IDH_PTYPE_UNDEF 0
+#define IDH_PTYPE_HUB 1
+#define IDH_PTYPE_PERIPH 2
+#define IDH_PTYPE_PCABLE 3
+#define IDH_PTYPE_ACABLE 4
+#define IDH_PTYPE_AMA 5
+
+#define VDO_IDH(usbh, usbd, ptype, is_modal, vid) \
+ ((usbh) << 31 | (usbd) << 30 | ((ptype) & 0x7) << 27 \
+ | (is_modal) << 26 | ((vid) & 0xffff))
+
+#define PD_IDH_PTYPE(vdo) (((vdo) >> 27) & 0x7)
+#define PD_IDH_VID(vdo) ((vdo) & 0xffff)
+
+#define PD_IDH_MODAL_SUPPORT BIT(26)
+
+/*
+ * Cert Stat VDO
+ * -------------
+ * <31:20> : SBZ
+ * <19:0> : USB-IF assigned TID for this cable
+ */
+#define VDO_CSTAT(tid) ((tid) & 0xfffff)
+#define PD_CSTAT_TID(vdo) ((vdo) & 0xfffff)
+
+/*
+ * Product VDO
+ * -----------
+ * <31:16> : USB Product ID
+ * <15:0> : USB bcdDevice
+ */
+#define VDO_PRODUCT(pid, bcd) (((pid) & 0xffff) << 16 | ((bcd) & 0xffff))
+#define PD_PRODUCT_PID(vdo) (((vdo) >> 16) & 0xffff)
+
+/*
+ * Cable VDO
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:20> :: SBZ
+ * <19:18> :: type-C to Type-A/B/C (00b == A, 01 == B, 10 == C)
+ * <17> :: Type-C to Plug/Receptacle (0b == plug, 1b == receptacle)
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (11b == both ends active VCONN req)
+ * <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <9> :: SSTX2 Directionality support
+ * <8> :: SSRX1 Directionality support
+ * <7> :: SSRX2 Directionality support
+ * <6:5> :: Vbus current handling capability
+ * <4> :: Vbus through cable (0b == no, 1b == yes)
+ * <3> :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0> :: USB SS Signaling support
+ */
+#define CABLE_ATYPE 0
+#define CABLE_BTYPE 1
+#define CABLE_CTYPE 2
+#define CABLE_PLUG 0
+#define CABLE_RECEPTACLE 1
+#define CABLE_CURR_1A5 0
+#define CABLE_CURR_3A 1
+#define CABLE_CURR_5A 2
+#define CABLE_USBSS_U2_ONLY 0
+#define CABLE_USBSS_U31_GEN1 1
+#define CABLE_USBSS_U31_GEN2 2
+#define VDO_CABLE(hw, fw, cbl, gdr, lat, term, tx1d,\
+ tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
+ | (gdr) << 17 | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 \
+ | (tx1d) << 10 | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 \
+ | ((cur) & 0x3) << 5 | (vps) << 4 | (sopp) << 3 \
+ | ((usbss) & 0x7))
+
+#define PD_VDO_CABLE_CURR(x) (((x) >> 5) & 0x03)
+
+/*
+ * AMA VDO
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:12> :: SBZ
+ * <11> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <10> :: SSTX2 Directionality support
+ * <9> :: SSRX1 Directionality support
+ * <8> :: SSRX2 Directionality support
+ * <7:5> :: Vconn power
+ * <4> :: Vconn power required
+ * <3> :: Vbus power required
+ * <2:0> :: USB SS Signaling support
+ */
+#define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 \
+ | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8 \
+ | ((vcpwr) & 0x3) << 5 | (vcr) << 4 | (vbr) << 3 \
+ | ((usbss) & 0x7))
+
+#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
+#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
+
+#define AMA_VCONN_PWR_1W 0
+#define AMA_VCONN_PWR_1W5 1
+#define AMA_VCONN_PWR_2W 2
+#define AMA_VCONN_PWR_3W 3
+#define AMA_VCONN_PWR_4W 4
+#define AMA_VCONN_PWR_5W 5
+#define AMA_VCONN_PWR_6W 6
+#define AMA_USBSS_U2_ONLY 0
+#define AMA_USBSS_U31_GEN1 1
+#define AMA_USBSS_U31_GEN2 2
+#define AMA_USBSS_BBONLY 3
+
+/*
+ * SVDM Discover SVIDs request -> response
+ *
+ * Request is properly formatted VDM Header with discover SVIDs command.
+ * Response is a set of SVIDs of all all supported SVIDs with all zero's to
+ * mark the end of SVIDs. If more than 12 SVIDs are supported command SHOULD be
+ * repeated.
+ */
+#define VDO_SVID(svid0, svid1) (((svid0) & 0xffff) << 16 | ((svid1) & 0xffff))
+#define PD_VDO_SVID_SVID0(vdo) ((vdo) >> 16)
+#define PD_VDO_SVID_SVID1(vdo) ((vdo) & 0xffff)
+
+/*
+ * Google modes capabilities
+ * <31:8> : reserved
+ * <7:0> : mode
+ */
+#define VDO_MODE_GOOGLE(mode) ((mode) & 0xff)
+
+#define MODE_GOOGLE_FU 1 /* Firmware Update mode */
+
+/*
+ * Mode Capabilities
+ *
+ * Number of VDOs supplied is SID dependent (but <= 6 VDOS?)
+ */
+#define VDO_MODE_CNT_DISPLAYPORT 1
+
+/*
+ * DisplayPort modes capabilities
+ * -------------------------------
+ * <31:24> : SBZ
+ * <23:16> : UFP_D pin assignment supported
+ * <15:8> : DFP_D pin assignment supported
+ * <7> : USB 2.0 signaling (0b=yes, 1b=no)
+ * <6> : Plug | Receptacle (0b == plug, 1b == receptacle)
+ * <5:2> : xxx1: Supports DPv1.3, xx1x Supports USB Gen 2 signaling
+ * Other bits are reserved.
+ * <1:0> : signal direction ( 00b=rsv, 01b=sink, 10b=src 11b=both )
+ */
+#define VDO_MODE_DP(snkp, srcp, usb, gdr, sign, sdir) \
+ (((snkp) & 0xff) << 16 | ((srcp) & 0xff) << 8 \
+ | ((usb) & 1) << 7 | ((gdr) & 1) << 6 | ((sign) & 0xF) << 2 \
+ | ((sdir) & 0x3))
+#define PD_DP_PIN_CAPS(x) ((((x) >> 6) & 0x1) ? (((x) >> 16) & 0x3f) \
+ : (((x) >> 8) & 0x3f))
+
+#define MODE_DP_PIN_A 0x01
+#define MODE_DP_PIN_B 0x02
+#define MODE_DP_PIN_C 0x04
+#define MODE_DP_PIN_D 0x08
+#define MODE_DP_PIN_E 0x10
+#define MODE_DP_PIN_F 0x20
+
+/* Pin configs B/D/F support multi-function */
+#define MODE_DP_PIN_MF_MASK 0x2a
+/* Pin configs A/B support BR2 signaling levels */
+#define MODE_DP_PIN_BR2_MASK 0x3
+/* Pin configs C/D/E/F support DP signaling levels */
+#define MODE_DP_PIN_DP_MASK 0x3c
+
+#define MODE_DP_V13 0x1
+#define MODE_DP_GEN2 0x2
+
+#define MODE_DP_SNK 0x1
+#define MODE_DP_SRC 0x2
+#define MODE_DP_BOTH 0x3
+
+#define MODE_DP_PORT_CAP(raw) ((raw) & 0x03)
+#define MODE_DP_SIGNAL_SUPPORT(raw) (((raw) >> 2) & 0x0f)
+#define MODE_DP_RECEPT(mode) (((mode) >> 6) & 0x01)
+
+#define MODE_DP_PIN_DFP(mode) (((mode) >> 8) & 0xff)
+#define MODE_DP_PIN_UFP(mode) (((mode) >> 16) & 0xff)
+
+#define PD_DP_DFP_D_PIN_CAPS(x) (MODE_DP_RECEPT(x) ? \
+ MODE_DP_PIN_DFP(x) : MODE_DP_PIN_UFP(x))
+
+#define PD_DP_UFP_D_PIN_CAPS(x) (MODE_DP_RECEPT(x) ? \
+ MODE_DP_PIN_UFP(x) : MODE_DP_PIN_DFP(x))
+
+/*
+ * DisplayPort Status VDO
+ * ----------------------
+ * <31:9> : SBZ
+ * <8> : IRQ_HPD : 1 == irq arrived since last message otherwise 0.
+ * <7> : HPD state : 0 = HPD_LOW, 1 == HPD_HIGH
+ * <6> : Exit DP Alt mode: 0 == maintain, 1 == exit
+ * <5> : USB config : 0 == maintain current, 1 == switch to USB from DP
+ * <4> : Multi-function preference : 0 == no pref, 1 == MF preferred.
+ * <3> : enabled : is DPout on/off.
+ * <2> : power low : 0 == normal or LPM disabled, 1 == DP disabled for LPM
+ * <1:0> : connect status : 00b == no (DFP|UFP)_D is connected or disabled.
+ * 01b == DFP_D connected, 10b == UFP_D connected, 11b == both.
+ */
+
+#define VDO_DP_STATUS(irq, lvl, amode, usbc, mf, en, lp, conn) \
+ (((irq) & 1) << 8 | ((lvl) & 1) << 7 | ((amode) & 1) << 6 \
+ | ((usbc) & 1) << 5 | ((mf) & 1) << 4 | ((en) & 1) << 3 \
+ | ((lp) & 1) << 2 | (((conn) & 0x3) << 0))
+
+#define PD_VDO_DPSTS_HPD_IRQ(x) (((x) >> 8) & 1)
+#define PD_VDO_DPSTS_HPD_LVL(x) (((x) >> 7) & 1)
+#define PD_VDO_DPSTS_MF_PREF(x) (((x) >> 4) & 1)
+
+#define PD_VDO_DPSTS_CONNECT(x) (((x) >> 0) & 0x03)
+
+#define DPSTS_DISCONNECT 0
+
+#define DPSTS_DFP_D_CONNECTED BIT(0)
+#define DPSTS_UFP_D_CONNECTED BIT(1)
+#define DPSTS_BOTH_CONNECTED (DPSTS_DFP_D_CONNECTED | DPSTS_UFP_D_CONNECTED)
+
+/* UFP_U only */
+#define DPSTS_DP_ENABLED BIT(3)
+#define DPSTS_DP_MF_PREF BIT(4)
+#define DPSTS_DP_USB_CONFIG BIT(5)
+#define DPSTS_DP_EXIT_ALT_MODE BIT(6)
+
+/* UFP_D only */
+#define DPSTS_DP_HPD_STATUS BIT(7)
+#define DPSTS_DP_HPD_IRQ BIT(8)
+
+/* Per DisplayPort Spec v1.3 Section 3.3 */
+#define HPD_USTREAM_DEBOUNCE_LVL (2 * MSEC)
+#define HPD_USTREAM_DEBOUNCE_IRQ (250)
+#define HPD_DSTREAM_DEBOUNCE_IRQ (750) /* between 500-1000us */
+
+/*
+ * DisplayPort Configure VDO
+ * -------------------------
+ * <31:24> : SBZ
+ * <23:16> : SBZ
+ * <15:8> : Pin assignment requested. Choose one from mode caps.
+ * <7:6> : SBZ
+ * <5:2> : signalling : 1h == DP v1.3, 2h == Gen 2
+ * Oh is only for USB, remaining values are reserved
+ * <1:0> : cfg : 00 == USB, 01 == DFP_D, 10 == UFP_D, 11 == reserved
+ */
+
+#define DP_CONFIG_USB 0
+#define DP_CONFIG_DFP_D 1
+#define DP_CONFIG_UFP_D 2
+
+#define VDO_DP_CFG(pin, sig, cfg) \
+ (((pin) & 0xff) << 8 | ((sig) & 0xf) << 2 | ((cfg) & 0x3))
+
+#define VDO_DP_DFP_CFG(pin, sig) VDO_DP_CFG(pin, sig, DP_CONFIG_DFP_D)
+#define VDO_DP_UFP_CFG(pin, sig) VDO_DP_CFG(pin, sig, DP_CONFIG_UFP_D)
+
+#define PD_DP_CFG_USB(x) (((x) & 0x3) == DP_CONFIG_USB)
+#define PD_DP_CFG_DFP_D(x) (((x) & 0x3) == DP_CONFIG_DFP_D)
+#define PD_DP_CFG_UFP_D(x) (((x) & 0x3) == DP_CONFIG_UFP_D)
+#define PD_DP_CFG_DPON(x) (PD_DP_CFG_DFP_D(x) | PD_DP_CFG_UFP_D(x))
+
+#define DP_SIG_DPV13 (0x01)
+#define DP_SIG_GEN2 (0x02)
+
+#define DP_PIN_ASSIGN_SUPPORT_A BIT(0)
+#define DP_PIN_ASSIGN_SUPPORT_B BIT(1)
+#define DP_PIN_ASSIGN_SUPPORT_C BIT(2)
+#define DP_PIN_ASSIGN_SUPPORT_D BIT(3)
+#define DP_PIN_ASSIGN_SUPPORT_E BIT(4)
+#define DP_PIN_ASSIGN_SUPPORT_F BIT(5)
+
+/*
+ * Get the pin assignment mask
+ * for backward compatibility, if it is null,
+ * get the former sink pin assignment we used to be in <23:16>.
+ */
+
+#define PD_DP_CFG_PIN(x) (((x) >> 8) & 0xff)
+
+/*
+ * ChromeOS specific PD device Hardware IDs. Used to identify unique
+ * products and used in VDO_INFO. Note this field is 10 bits.
+ */
+#define USB_PD_HW_DEV_ID_RESERVED 0
+#define USB_PD_HW_DEV_ID_ZINGER 1
+#define USB_PD_HW_DEV_ID_MINIMUFFIN 2
+#define USB_PD_HW_DEV_ID_DINGDONG 3
+#define USB_PD_HW_DEV_ID_HOHO 4
+#define USB_PD_HW_DEV_ID_HONEYBUNS 5
+
+/*
+ * ChromeOS specific VDO_CMD_READ_INFO responds with device info including:
+ * RW Hash: First 20 bytes of SHA-256 of RW (20 bytes)
+ * HW Device ID: unique descriptor for each ChromeOS model (2 bytes)
+ * top 6 bits are minor revision, bottom 10 bits are major
+ * SW Debug Version: Software version useful for debugging (15 bits)
+ * IS RW: True if currently in RW, False otherwise (1 bit)
+ */
+#define VDO_INFO(id, id_minor, ver, is_rw) ((id_minor) << 26 \
+ | ((id) & 0x3ff) << 16 \
+ | ((ver) & 0x7fff) << 1 \
+ | ((is_rw) & 1))
+#define VDO_INFO_HW_DEV_ID(x) ((x) >> 16)
+#define VDO_INFO_SW_DBG_VER(x) (((x) >> 1) & 0x7fff)
+#define VDO_INFO_IS_RW(x) ((x) & 1)
+
+#define HW_DEV_ID_MAJ(x) ((x) & 0x3ff)
+#define HW_DEV_ID_MIN(x) ((x) >> 10)
+
+/* USB-IF SIDs */
+#define USB_SID_PD 0xff00 /* power delivery */
+#define USB_SID_DISPLAYPORT 0xff01 /* display port */
+#define USB_SID_RICHTEK 0x29cf /* demo uvdm */
+#define USB_SID_DIRECTCHARGE 0x29cf /* direct charge */
+
+/* DPM Flags */
+
+#define DPM_FLAGS_PARTNER_DR_POWER BIT(0)
+#define DPM_FLAGS_PARTNER_DR_DATA BIT(1)
+#define DPM_FLAGS_PARTNER_EXTPOWER BIT(2)
+#define DPM_FLAGS_PARTNER_USB_COMM BIT(3)
+#define DPM_FLAGS_PARTNER_USB_SUSPEND BIT(4)
+#define DPM_FLAGS_PARTNER_HIGH_CAP BIT(5)
+
+#define DPM_FLAGS_PARTNER_MISMATCH BIT(7)
+#define DPM_FLAGS_PARTNER_GIVE_BACK BIT(8)
+#define DPM_FLAGS_PARTNER_NO_SUSPEND BIT(9)
+
+#define DPM_FLAGS_RESET_PARTNER_MASK \
+ (DPM_FLAGS_PARTNER_DR_POWER | DPM_FLAGS_PARTNER_DR_DATA | \
+ DPM_FLAGS_PARTNER_EXTPOWER | DPM_FLAGS_PARTNER_USB_COMM)
+
+#define DPM_FLAGS_CHECK_DC_MODE BIT(20)
+#define DPM_FLAGS_CHECK_UFP_SVID BIT(21)
+#define DPM_FLAGS_CHECK_EXT_POWER BIT(22)
+#define DPM_FLAGS_CHECK_DP_MODE BIT(23)
+#define DPM_FLAGS_CHECK_SINK_CAP BIT(24)
+#define DPM_FLAGS_CHECK_SOURCE_CAP BIT(25)
+#define DPM_FLAGS_CHECK_UFP_ID BIT(26)
+#define DPM_FLAGS_CHECK_CABLE_ID BIT(27)
+#define DPM_FLAGS_CHECK_CABLE_ID_DFP BIT(28)
+#define DPM_FLAGS_CHECK_PR_ROLE BIT(29)
+#define DPM_FLAGS_CHECK_DR_ROLE BIT(30)
+
+/* DPM_CAPS */
+
+#define DPM_CAP_LOCAL_DR_POWER BIT(0)
+#define DPM_CAP_LOCAL_DR_DATA BIT(1)
+#define DPM_CAP_LOCAL_EXT_POWER BIT(2)
+#define DPM_CAP_LOCAL_USB_COMM BIT(3)
+#define DPM_CAP_LOCAL_USB_SUSPEND BIT(4)
+#define DPM_CAP_LOCAL_HIGH_CAP BIT(5)
+#define DPM_CAP_LOCAL_GIVE_BACK BIT(6)
+#define DPM_CAP_LOCAL_NO_SUSPEND BIT(7)
+#define DPM_CAP_LOCAL_VCONN_SUPPLY BIT(8)
+
+#define DPM_CAP_ATTEMP_ENTER_DC_MODE BIT(11)
+#define DPM_CAP_ATTEMP_DISCOVER_CABLE_DFP BIT(12)
+#define DPM_CAP_ATTEMP_ENTER_DP_MODE BIT(13)
+#define DPM_CAP_ATTEMP_DISCOVER_CABLE BIT(14)
+#define DPM_CAP_ATTEMP_DISCOVER_ID BIT(15)
+
+enum dpm_cap_pr_check_prefer {
+ DPM_CAP_PR_CHECK_DISABLE = 0,
+ DPM_CAP_PR_CHECK_PREFER_SNK = 1,
+ DPM_CAP_PR_CHECK_PREFER_SRC = 2,
+};
+
+#define DPM_CAP_PR_CHECK_PROP(cap) (((cap) & 0x03) << 16)
+#define DPM_CAP_EXTRACT_PR_CHECK(raw) (((raw) >> 16) & 0x03)
+#define DPM_CAP_PR_SWAP_REJECT_AS_SRC BIT(18)
+#define DPM_CAP_PR_SWAP_REJECT_AS_SNK BIT(19)
+#define DPM_CAP_PR_SWAP_CHECK_GP_SRC BIT(20)
+#define DPM_CAP_PR_SWAP_CHECK_GP_SNK BIT(21)
+#define DPM_CAP_PR_SWAP_CHECK_GOOD_POWER \
+ (DPM_CAP_PR_SWAP_CHECK_GP_SRC | DPM_CAP_PR_SWAP_CHECK_GP_SNK)
+
+enum dpm_cap_dr_check_prefer {
+ DPM_CAP_DR_CHECK_DISABLE = 0,
+ DPM_CAP_DR_CHECK_PREFER_UFP = 1,
+ DPM_CAP_DR_CHECK_PREFER_DFP = 2,
+};
+
+#define DPM_CAP_DR_CHECK_PROP(cap) (((cap) & 0x03) << 22)
+#define DPM_CAP_EXTRACT_DR_CHECK(raw) (((raw) >> 22) & 0x03)
+#define DPM_CAP_DR_SWAP_REJECT_AS_DFP BIT(24)
+#define DPM_CAP_DR_SWAP_REJECT_AS_UFP BIT(25)
+
+#define DPM_CAP_DP_PREFER_MF BIT(29)
+#define DPM_CAP_SNK_PREFER_LOW_VOLTAGE BIT(30)
+#define DPM_CAP_SNK_IGNORE_MISMATCH_CURRENT BIT(31)
+
+/* PD counter definitions */
+#define PD_MESSAGE_ID_COUNT 7
+#define PD_HARD_RESET_COUNT 2
+#define PD_CAPS_COUNT 50
+#define PD_GET_SNK_CAP_RETRIES 3
+#define PD_GET_SRC_CAP_RETRIES 3
+#define PD_DISCOVER_ID_COUNT 3 /* max : 20 */
+
+enum {
+ PD_WAIT_VBUS_DISABLE = 0,
+ PD_WAIT_VBUS_VALID_ONCE = 1,
+ PD_WAIT_VBUS_INVALID_ONCE = 2,
+ PD_WAIT_VBUS_SAFE0V_ONCE = 3,
+ PD_WAIT_VBUS_STABLE_ONCE = 4,
+};
+
+typedef struct __pd_port_power_cababilities {
+ u8 nr;
+ u32 pdos[7];
+} pd_port_power_caps;
+
+#define PD_SVID_DATA_NR 2 /* must < 11 */
+
+typedef struct __svdm_mode {
+ u8 mode_cnt;
+ u32 mode_vdo[VDO_MAX_DATA_SIZE];
+} svdm_mode_t;
+
+struct __svdm_svid_ops;
+typedef struct __svdm_svid_data {
+ bool exist;
+ u16 svid;
+ u8 active_mode;
+ svdm_mode_t local_mode;
+ svdm_mode_t remote_mode;
+ const struct __svdm_svid_ops *ops;
+} svdm_svid_data_t;
+
+typedef struct __svdm_svid_list {
+ u8 cnt;
+ u16 svids[VDO_MAX_SVID_SIZE];
+} svdm_svid_list_t;
+
+typedef struct __pd_port {
+ struct tcpc_device *tcpc_dev;
+ struct mutex pd_lock;
+
+ /* PD */
+ bool explicit_contract;
+ bool invalid_contract;
+ bool vconn_source;
+
+#ifdef CONFIG_USB_PD_DFP_READY_DISCOVER_ID
+ bool vconn_return;
+#endif /* CONFIG_USB_PD_DFP_READY_DISCOVER_ID */
+
+ bool pe_ready;
+ bool pd_connected;
+ bool pd_prev_connected;
+ bool msg_output_lock;
+
+ u8 state_machine;
+ u8 pd_connect_state;
+
+ bool reset_vdm_state;
+ u8 pe_pd_state;
+ u8 pe_vdm_state;
+
+ u8 pe_state_next;
+ u8 pe_state_curr;
+
+ u8 data_role;
+ u8 power_role;
+
+ u8 cap_counter;
+ u8 discover_id_counter;
+ u8 hard_reset_counter;
+ u8 snk_cap_count;
+ u8 src_cap_count;
+ u8 get_snk_cap_count;
+ u8 get_src_cap_count;
+
+ u8 msg_id_rx[PD_SOP_NR];
+ u8 msg_id_rx_init[PD_SOP_NR];
+ u8 msg_id_tx[PD_SOP_NR];
+
+ u32 last_rdo;
+ u32 cable_vdos[VDO_MAX_SIZE];
+ bool power_cable_present;
+
+ u8 id_vdo_nr;
+ u32 id_vdos[VDO_MAX_DATA_SIZE];
+
+#ifdef CONFIG_USB_PD_KEEP_SVIDS
+ svdm_svid_list_t remote_svid_list;
+#endif
+
+ u8 svid_data_cnt;
+ svdm_svid_data_t svid_data[PD_SVID_DATA_NR];
+
+ bool during_swap; /* pr or dr swap */
+
+/* DPM */
+ int request_v;
+ int request_i;
+ int request_v_new;
+ int request_i_new;
+ int request_i_op;
+ int request_i_max;
+
+ u8 local_selected_cap;
+ u8 remote_selected_cap;
+ pd_port_power_caps local_src_cap;
+ pd_port_power_caps local_snk_cap;
+ pd_port_power_caps local_src_cap_default;
+ pd_port_power_caps remote_src_cap;
+ pd_port_power_caps remote_snk_cap;
+
+ u16 mode_svid;
+ u8 mode_obj_pos;
+ bool modal_operation;
+ bool dpm_ack_immediately;
+
+ u32 dpm_flags;
+ u32 dpm_init_flags;
+ u32 dpm_caps;
+ u32 dpm_dfp_retry_cnt;
+
+#ifdef CONFIG_USB_PD_CUSTOM_DBGACC
+ bool custom_dbgacc;
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
+} pd_port_t;
+
+int pd_core_init(struct tcpc_device *tcpc_dev);
+int pd_alert_vbus_changed(pd_port_t *pd_port, int vbus_level);
+
+static inline int pd_is_auto_discover_cable_id(pd_port_t *pd_port)
+{
+ if (pd_port->dpm_flags & DPM_FLAGS_CHECK_CABLE_ID) {
+ if (pd_port->discover_id_counter < PD_DISCOVER_ID_COUNT)
+ return true;
+
+ pd_port->dpm_flags &= ~DPM_FLAGS_CHECK_CABLE_ID;
+ return false;
+ }
+
+ return false;
+}
+
+static inline int pd_is_support_modal_operation(pd_port_t *pd_port)
+{
+ if (!(pd_port->id_vdos[0] & PD_IDH_MODAL_SUPPORT))
+ return false;
+
+ return (pd_port->svid_data_cnt > 0);
+}
+
+/* new definitions*/
+
+#define PD_RX_CAP_PE_IDLE (0)
+#define PD_RX_CAP_PE_DISABLE (TCPC_RX_CAP_HARD_RESET)
+#define PD_RX_CAP_PE_STARTUP (TCPC_RX_CAP_HARD_RESET)
+#define PD_RX_CAP_PE_HARDRESET (0)
+#define PD_RX_CAP_PE_SEND_WAIT_CAP \
+ (TCPC_RX_CAP_HARD_RESET | TCPC_RX_CAP_SOP)
+#define PD_RX_CAP_PE_DISCOVER_CABLE \
+ (TCPC_RX_CAP_HARD_RESET | TCPC_RX_CAP_SOP_PRIME)
+#define PD_RX_CAP_PE_READY_UFP \
+ (TCPC_RX_CAP_HARD_RESET | TCPC_RX_CAP_SOP)
+
+#ifdef CONFIG_PD_DISCOVER_CABLE_ID
+#define PD_RX_CAP_PE_READY_DFP \
+ (TCPC_RX_CAP_HARD_RESET | TCPC_RX_CAP_SOP | TCPC_RX_CAP_SOP_PRIME)
+#else
+#define PD_RX_CAP_PE_READY_DFP (TCPC_RX_CAP_HARD_RESET | TCPC_RX_CAP_SOP)
+#endif
+
+enum {
+ PD_BIST_MODE_DISABLE = 0,
+ PD_BIST_MODE_EVENT_PENDING,
+ PD_BIST_MODE_TEST_DATA,
+};
+
+void pd_reset_svid_data(pd_port_t *pd_port);
+int pd_reset_protocol_layer(pd_port_t *pd_port);
+
+int pd_set_rx_enable(pd_port_t *pd_port, u8 enable);
+
+int pd_enable_vbus_valid_detection(pd_port_t *pd_port, bool wait_valid);
+int pd_enable_vbus_safe0v_detection(pd_port_t *pd_port);
+int pd_enable_vbus_stable_detection(pd_port_t *pd_port);
+
+u32 pd_reset_pdo_power(u32 pdo, u32 imax);
+
+void pd_extract_rdo_power(
+ u32 rdo, u32 pdo, u32 *op_curr,
+ u32 *max_curr);
+
+void pd_extract_pdo_power(u32 pdo,
+ u32 *vmin, u32 *vmax, u32 *ioper);
+
+u32 pd_extract_cable_curr(u32 vdo);
+
+int pd_set_data_role(pd_port_t *pd_port, u8 dr);
+int pd_set_power_role(pd_port_t *pd_port, u8 pr);
+int pd_init_role(pd_port_t *pd_port, u8 pr, u8 dr, bool vr);
+
+int pd_set_cc_res(pd_port_t *pd_port, int pull);
+int pd_set_vconn(pd_port_t *pd_port, int enable);
+int pd_reset_local_hw(pd_port_t *pd_port);
+
+int pd_enable_bist_test_mode(pd_port_t *pd_port, bool en);
+
+void pd_lock_msg_output(pd_port_t *pd_port);
+void pd_unlock_msg_output(pd_port_t *pd_port);
+
+int pd_update_connect_state(pd_port_t *pd_port, u8 state);
+void pd_update_dpm_request_state(pd_port_t *pd_port, u8 state);
+
+/* ---- PD notify TCPC Policy Engine State Changed ---- */
+
+void pd_try_put_pe_idle_event(pd_port_t *pd_port);
+void pd_notify_pe_transit_to_default(pd_port_t *pd_port);
+void pd_notify_pe_hard_reset_completed(pd_port_t *pd_port);
+void pd_notify_pe_send_hard_reset(pd_port_t *pd_port);
+void pd_notify_pe_idle(pd_port_t *pd_port);
+void pd_notify_pe_wait_vbus_once(pd_port_t *pd_port, int wait_evt);
+void pd_notify_pe_error_recovery(pd_port_t *pd_port);
+void pd_notify_pe_execute_pr_swap(pd_port_t *pd_port, bool start_swap);
+void pd_notify_pe_cancel_pr_swap(pd_port_t *pd_port);
+void pd_notify_pe_reset_protocol(pd_port_t *pd_port);
+void pd_noitfy_pe_bist_mode(pd_port_t *pd_port, u8 mode);
+void pd_notify_pe_pr_changed(pd_port_t *pd_port);
+void pd_notify_pe_src_explicit_contract(pd_port_t *pd_port);
+void pd_notify_pe_transmit_msg(pd_port_t *pd_port, u8 type);
+void pd_notify_pe_recv_ping_event(pd_port_t *pd_port);
+
+/* ---- pd_timer ---- */
+
+static inline void pd_restart_timer(pd_port_t *pd_port, u32 timer_id)
+{
+ return tcpc_restart_timer(pd_port->tcpc_dev, timer_id);
+}
+
+static inline void pd_enable_timer(pd_port_t *pd_port, u32 timer_id)
+{
+ return tcpc_enable_timer(pd_port->tcpc_dev, timer_id);
+}
+
+static inline void pd_disable_timer(pd_port_t *pd_port, u32 timer_id)
+{
+ return tcpc_disable_timer(pd_port->tcpc_dev, timer_id);
+}
+
+static inline void pd_reset_pe_timer(pd_port_t *pd_port)
+{
+ tcpc_reset_pe_timer(pd_port->tcpc_dev);
+}
+
+/* ---- pd_event ---- */
+
+static inline void pd_free_pd_event(pd_port_t *pd_port, pd_event_t *pd_event)
+{
+ pd_free_event(pd_port->tcpc_dev, pd_event);
+}
+
+static inline bool pd_put_pe_event(pd_port_t *pd_port, u8 pe_event)
+{
+ pd_event_t evt = {
+ .event_type = PD_EVT_PE_MSG,
+ .msg = pe_event,
+ .pd_msg = NULL,
+ };
+
+ return pd_put_event(pd_port->tcpc_dev, &evt, false);
+}
+
+static inline bool pd_put_dpm_event(pd_port_t *pd_port, u8 event)
+{
+ pd_event_t evt = {
+ .event_type = PD_EVT_DPM_MSG,
+ .msg = event,
+ .pd_msg = NULL,
+ };
+
+ return pd_put_event(pd_port->tcpc_dev, &evt, false);
+}
+
+static inline bool pd_put_dpm_pd_request_event(
+ pd_port_t *pd_port, u8 event)
+{
+ pd_event_t evt = {
+ .event_type = PD_EVT_DPM_MSG,
+ .msg = PD_DPM_PD_REQUEST,
+ .msg_sec = event,
+ .pd_msg = NULL,
+ };
+
+ return pd_put_event(pd_port->tcpc_dev, &evt, false);
+}
+
+static inline bool vdm_put_dpm_vdm_request_event(
+ pd_port_t *pd_port, u8 event)
+{
+ bool ret;
+ pd_event_t evt = {
+ .event_type = PD_EVT_DPM_MSG,
+ .msg = PD_DPM_VDM_REQUEST,
+ .msg_sec = event,
+ .pd_msg = NULL,
+ };
+
+ ret = pd_put_vdm_event(pd_port->tcpc_dev, &evt, false);
+
+ if (ret) {
+ pd_port->reset_vdm_state = true;
+ pd_port->pe_vdm_state = pd_port->pe_pd_state;
+ }
+
+ return ret;
+}
+
+static inline bool pd_put_dpm_notify_event(pd_port_t *pd_port, u8 notify)
+{
+ pd_event_t evt = {
+ .event_type = PD_EVT_DPM_MSG,
+ .msg = PD_DPM_NOTIFIED,
+ .msg_sec = notify,
+ .pd_msg = NULL,
+ };
+
+ return pd_put_event(pd_port->tcpc_dev, &evt, false);
+}
+
+static inline bool pd_put_dpm_ack_event(pd_port_t *pd_port)
+{
+ pd_event_t evt = {
+ .event_type = PD_EVT_DPM_MSG,
+ .msg = PD_DPM_ACK,
+ .pd_msg = NULL,
+ };
+
+ return pd_put_event(pd_port->tcpc_dev, &evt, false);
+}
+
+static inline bool pd_put_dpm_nak_event(pd_port_t *pd_port, u8 notify)
+{
+ pd_event_t evt = {
+ .event_type = PD_EVT_DPM_MSG,
+ .msg = PD_DPM_NAK,
+ .msg_sec = notify,
+ .pd_msg = NULL,
+ };
+
+ return pd_put_event(pd_port->tcpc_dev, &evt, false);
+}
+
+static inline bool vdm_put_hw_event(
+ struct tcpc_device *tcpc_dev, u8 hw_event)
+{
+ pd_event_t evt = {
+ .event_type = PD_EVT_HW_MSG,
+ .msg = hw_event,
+ .pd_msg = NULL,
+ };
+
+ return pd_put_vdm_event(tcpc_dev, &evt, false);
+}
+
+static inline bool vdm_put_dpm_event(
+ pd_port_t *pd_port, u8 dpm_event, pd_msg_t *pd_msg)
+{
+ pd_event_t evt = {
+ .event_type = PD_EVT_DPM_MSG,
+ .msg = dpm_event,
+ .pd_msg = pd_msg,
+ };
+
+ return pd_put_vdm_event(pd_port->tcpc_dev, &evt, false);
+}
+
+static inline bool vdm_put_dpm_notified_event(pd_port_t *pd_port)
+{
+ return vdm_put_dpm_event(pd_port, PD_DPM_NOTIFIED, NULL);
+}
+
+static inline bool vdm_put_dpm_discover_cable_event(pd_port_t *pd_port)
+{
+ pd_port->pe_vdm_state = pd_port->pe_pd_state;
+ return vdm_put_dpm_event(pd_port, PD_DPM_DISCOVER_CABLE_ID, NULL);
+}
+
+static inline bool pd_put_hw_event(
+ struct tcpc_device *tcpc_dev, u8 hw_event)
+{
+ pd_event_t evt = {
+ .event_type = PD_EVT_HW_MSG,
+ .msg = hw_event,
+ .pd_msg = NULL,
+ };
+
+ return pd_put_event(tcpc_dev, &evt, false);
+}
+
+static inline bool pd_put_cc_attached_event(
+ struct tcpc_device *tcpc_dev, u8 type)
+{
+ pd_event_t evt = {
+ .event_type = PD_EVT_HW_MSG,
+ .msg = PD_HW_CC_ATTACHED,
+ .msg_sec = type,
+ .pd_msg = NULL,
+ };
+
+ return pd_put_event(tcpc_dev, &evt, false);
+}
+
+/* ---- Handle PD Message ----*/
+
+int pd_handle_soft_reset(pd_port_t *pd_port, u8 state_machine);
+
+/* ---- Send PD Message ----*/
+
+int pd_send_ctrl_msg(
+ pd_port_t *pd_port, u8 sop_type, u8 msg);
+
+int pd_send_data_msg(pd_port_t *pd_port,
+ u8 sop_type, u8 msg, u8 cnt, u32 *payload);
+
+int pd_send_soft_reset(pd_port_t *pd_port, u8 state_machine);
+int pd_send_hard_reset(pd_port_t *pd_port);
+
+int pd_send_bist_mode2(pd_port_t *pd_port);
+int pd_disable_bist_mode2(pd_port_t *pd_port);
+
+/* ---- Send / Reply SVDM Command ----*/
+
+/* Auto enable pd_timer_vdm_response if success */
+int pd_send_svdm_request(pd_port_t *pd_port,
+ u8 sop_type, u16 svid, u8 vdm_cmd,
+ u8 obj_pos, u8 cnt, u32 *data_obj);
+
+int pd_reply_svdm_request(pd_port_t *pd_port,
+ pd_event_t *pd_event, u8 reply,
+ u8 cnt, u32 *data_obj);
+
+static inline int pd_send_vdm_discover_id(
+ pd_port_t *pd_port, u8 sop_type)
+{
+ return pd_send_svdm_request(
+ pd_port, sop_type, USB_SID_PD, CMD_DISCOVER_IDENT, 0, 0, NULL);
+}
+
+static inline int pd_send_vdm_discover_svids(
+ pd_port_t *pd_port, u8 sop_type)
+{
+ return pd_send_svdm_request(
+ pd_port, sop_type, USB_SID_PD, CMD_DISCOVER_SVID, 0, 0, NULL);
+}
+
+static inline int pd_send_vdm_discover_modes(
+ pd_port_t *pd_port, u8 sop_type, u16 svid)
+{
+ return pd_send_svdm_request(
+ pd_port, sop_type, svid, CMD_DISCOVER_MODES, 0, 0, NULL);
+}
+
+static inline int pd_send_vdm_enter_mode(
+ pd_port_t *pd_port, u8 sop_type, u16 svid, u8 obj_pos)
+{
+ return pd_send_svdm_request(
+ pd_port, sop_type, svid, CMD_ENTER_MODE, obj_pos, 0, NULL);
+}
+
+static inline int pd_send_vdm_exit_mode(
+ pd_port_t *pd_port, u8 sop_type, u16 svid, u8 obj_pos)
+{
+ return pd_send_svdm_request(
+ pd_port, sop_type, svid, CMD_EXIT_MODE, obj_pos, 0, NULL);
+}
+
+static inline int pd_send_vdm_attention(
+ pd_port_t *pd_port, u8 sop_type, u16 svid, u8 obj_pos)
+{
+ return pd_send_svdm_request(
+ pd_port, sop_type, svid, CMD_ATTENTION, obj_pos, 0, NULL);
+}
+
+static inline int pd_send_vdm_dp_attention(pd_port_t *pd_port,
+ u8 sop_type, u8 obj_pos,
+ u32 dp_status)
+{
+ return pd_send_svdm_request(pd_port, sop_type,
+ USB_SID_DISPLAYPORT, CMD_ATTENTION, obj_pos, 1, &dp_status);
+}
+
+static inline int pd_send_vdm_dp_status(pd_port_t *pd_port,
+ u8 sop_type, u8 obj_pos,
+ u8 cnt, u32 *data_obj)
+{
+ return pd_send_svdm_request(pd_port, sop_type,
+ USB_SID_DISPLAYPORT, CMD_DP_STATUS, obj_pos, cnt, data_obj);
+}
+
+static inline int pd_send_vdm_dp_config(pd_port_t *pd_port,
+ u8 sop_type, u8 obj_pos,
+ u8 cnt, u32 *data_obj)
+{
+ return pd_send_svdm_request(pd_port, sop_type,
+ USB_SID_DISPLAYPORT, CMD_DP_CONFIG, obj_pos, cnt, data_obj);
+}
+
+static inline int pd_reply_svdm_request_simply(
+ pd_port_t *pd_port, pd_event_t *pd_event, u8 reply)
+{
+ return pd_reply_svdm_request(pd_port, pd_event, reply, 0, NULL);
+}
+#endif /* PD_CORE_H_ */
diff --git a/include/linux/hisi/usb/pd/richtek/pd_dpm_core.h b/include/linux/hisi/usb/pd/richtek/pd_dpm_core.h
new file mode 100644
index 000000000000..61ea34047a55
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/pd_dpm_core.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PD_DPM_CORE_H
+#define PD_DPM_CORE_H
+
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+
+/* ---- MISC ---- */
+int pd_dpm_enable_vconn(pd_port_t *pd_port, bool en);
+int pd_dpm_send_sink_caps(pd_port_t *pd_port);
+int pd_dpm_send_source_caps(pd_port_t *pd_port);
+
+/* ---- SNK ---- */
+
+bool pd_dpm_send_request(pd_port_t *pd_port, int mv, int ma);
+
+void pd_dpm_snk_evaluate_caps(pd_port_t *pd_port, pd_event_t *pd_event);
+void pd_dpm_snk_transition_power(pd_port_t *pd_port, pd_event_t *pd_event);
+void pd_dpm_snk_hard_reset(pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- SRC ---- */
+
+void pd_dpm_src_evaluate_request(pd_port_t *pd_port, pd_event_t *pd_event);
+void pd_dpm_src_transition_power(pd_port_t *pd_port, pd_event_t *pd_event);
+void pd_dpm_src_hard_reset(pd_port_t *pd_port);
+void pd_dpm_src_inform_cable_vdo(pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- UFP : Evaluate VDM Request ---- */
+
+void pd_dpm_ufp_request_id_info(pd_port_t *pd_port, pd_event_t *pd_event);
+void pd_dpm_ufp_request_svid_info(pd_port_t *pd_port, pd_event_t *pd_event);
+void pd_dpm_ufp_request_mode_info(pd_port_t *pd_port, pd_event_t *pd_event);
+void pd_dpm_ufp_request_enter_mode(pd_port_t *pd_port, pd_event_t *pd_event);
+void pd_dpm_ufp_request_exit_mode(pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- UFP : Response VDM Request ---- */
+
+int pd_dpm_ufp_response_id(pd_port_t *pd_port, pd_event_t *pd_event);
+int pd_dpm_ufp_response_svids(pd_port_t *pd_port, pd_event_t *pd_event);
+int pd_dpm_ufp_response_modes(pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- DFP : Inform VDM Result ---- */
+
+void pd_dpm_dfp_inform_id(pd_port_t *pd_port, pd_event_t *pd_event, bool ack);
+void pd_dpm_dfp_inform_svids(
+ pd_port_t *pd_port, pd_event_t *pd_event, bool ack);
+void pd_dpm_dfp_inform_modes(
+ pd_port_t *pd_port, pd_event_t *pd_event, bool ack);
+void pd_dpm_dfp_inform_enter_mode(
+ pd_port_t *pd_port, pd_event_t *pd_event, bool ack);
+void pd_dpm_dfp_inform_exit_mode(pd_port_t *pd_port, pd_event_t *pd_event);
+void pd_dpm_dfp_inform_attention(pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pd_dpm_dfp_inform_cable_vdo(pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- DRP : Inform PowerCap ---- */
+
+void pd_dpm_dr_inform_sink_cap(pd_port_t *pd_port, pd_event_t *pd_event);
+void pd_dpm_dr_inform_source_cap(pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- DRP : Data Role Swap ---- */
+
+void pd_dpm_drs_evaluate_swap(pd_port_t *pd_port, uint8_t role);
+void pd_dpm_drs_change_role(pd_port_t *pd_port, uint8_t role);
+
+/* ---- DRP : Power Role Swap ---- */
+
+void pd_dpm_prs_evaluate_swap(pd_port_t *pd_port, uint8_t role);
+void pd_dpm_prs_turn_off_power_sink(pd_port_t *pd_port);
+void pd_dpm_prs_enable_power_source(pd_port_t *pd_port, bool en);
+void pd_dpm_prs_change_role(pd_port_t *pd_port, uint8_t role);
+
+/* ---- DRP : Vconn Swap ---- */
+
+void pd_dpm_vcs_evaluate_swap(pd_port_t *pd_port);
+void pd_dpm_vcs_enable_vconn(pd_port_t *pd_port, bool en);
+
+/* PE : Notify DPM */
+
+int pd_dpm_notify_pe_startup(pd_port_t *pd_port);
+int pd_dpm_notify_pe_hardreset(pd_port_t *pd_port);
+int pd_dpm_notify_pe_ready(pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* TCPCI - VBUS Control */
+
+static inline int pd_dpm_check_vbus_valid(pd_port_t *pd_port)
+{
+ return tcpci_check_vbus_valid(pd_port->tcpc_dev);
+}
+
+static inline int pd_dpm_sink_vbus(pd_port_t *pd_port, bool en)
+{
+ int mv = en ? TCPC_VBUS_SINK_5V : TCPC_VBUS_SINK_0V;
+
+ return tcpci_sink_vbus(pd_port->tcpc_dev,
+ TCP_VBUS_CTRL_REQUEST, mv, -1);
+}
+
+static inline int pd_dpm_source_vbus(pd_port_t *pd_port, bool en)
+{
+ int mv = en ? TCPC_VBUS_SOURCE_5V : TCPC_VBUS_SOURCE_0V;
+
+ return tcpci_source_vbus(pd_port->tcpc_dev,
+ TCP_VBUS_CTRL_REQUEST, mv, -1);
+}
+#endif /* PD_DPM_CORE_H */
diff --git a/include/linux/hisi/usb/pd/richtek/pd_policy_engine.h b/include/linux/hisi/usb/pd/richtek/pd_policy_engine.h
new file mode 100644
index 000000000000..fe4593a56943
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/pd_policy_engine.h
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PD_POLICY_ENGINE_H_
+#define PD_POLICY_ENGINE_H_
+
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+
+/* ---- Policy Engine State ---- */
+
+enum pd_pe_state_machine {
+ PE_STATE_MACHINE_IDLE = 0,
+ PE_STATE_MACHINE_SINK,
+ PE_STATE_MACHINE_SOURCE,
+ PE_STATE_MACHINE_DR_SWAP,
+ PE_STATE_MACHINE_PR_SWAP,
+ PE_STATE_MACHINE_VCONN_SWAP,
+ PE_STATE_MACHINE_DBGACC,
+};
+
+enum pd_pe_state {
+ PE_SRC_STARTUP = 0,
+ PE_SRC_DISCOVERY,
+ PE_SRC_SEND_CAPABILITIES,
+ PE_SRC_NEGOTIATE_CAPABILITIES,
+ PE_SRC_TRANSITION_SUPPLY,
+ PE_SRC_TRANSITION_SUPPLY2,
+ PE_SRC_READY,
+ PE_SRC_DISABLED,
+ PE_SRC_CAPABILITY_RESPONSE,
+ PE_SRC_HARD_RESET,
+ PE_SRC_HARD_RESET_RECEIVED,
+ PE_SRC_TRANSITION_TO_DEFAULT,
+ PE_SRC_GIVE_SOURCE_CAP,
+ PE_SRC_GET_SINK_CAP,
+ PE_SRC_WAIT_NEW_CAPABILITIES,
+
+ PE_SRC_SEND_SOFT_RESET,
+ PE_SRC_SOFT_RESET,
+ PE_SRC_PING,
+
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+ PE_SRC_VDM_IDENTITY_REQUEST,
+ PE_SRC_VDM_IDENTITY_ACKED,
+ PE_SRC_VDM_IDENTITY_NAKED,
+#endif /* PD_CAP_PE_SRC_STARTUP_DISCOVER_ID */
+
+ PE_SNK_STARTUP,
+ PE_SNK_DISCOVERY,
+ PE_SNK_WAIT_FOR_CAPABILITIES,
+ PE_SNK_EVALUATE_CAPABILITY,
+ PE_SNK_SELECT_CAPABILITY,
+ PE_SNK_TRANSITION_SINK,
+ PE_SNK_READY,
+ PE_SNK_HARD_RESET,
+ PE_SNK_TRANSITION_TO_DEFAULT,
+ PE_SNK_GIVE_SINK_CAP,
+ PE_SNK_GET_SOURCE_CAP,
+
+ PE_SNK_SEND_SOFT_RESET,
+ PE_SNK_SOFT_RESET,
+
+ PE_DRS_DFP_UFP_EVALUATE_DR_SWAP,
+ PE_DRS_DFP_UFP_ACCEPT_DR_SWAP,
+ PE_DRS_DFP_UFP_CHANGE_TO_UFP,
+ PE_DRS_DFP_UFP_SEND_DR_SWAP,
+ PE_DRS_DFP_UFP_REJECT_DR_SWAP,
+
+ PE_DRS_UFP_DFP_EVALUATE_DR_SWAP,
+ PE_DRS_UFP_DFP_ACCEPT_DR_SWAP,
+ PE_DRS_UFP_DFP_CHANGE_TO_DFP,
+ PE_DRS_UFP_DFP_SEND_DR_SWAP,
+ PE_DRS_UFP_DFP_REJECT_DR_SWAP,
+
+ PE_PRS_SRC_SNK_EVALUATE_PR_SWAP,
+ PE_PRS_SRC_SNK_ACCEPT_PR_SWAP,
+ PE_PRS_SRC_SNK_TRANSITION_TO_OFF,
+ PE_PRS_SRC_SNK_ASSERT_RD,
+ PE_PRS_SRC_SNK_WAIT_SOURCE_ON,
+ PE_PRS_SRC_SNK_SEND_SWAP,
+ PE_PRS_SRC_SNK_REJECT_PR_SWAP,
+
+ PE_PRS_SNK_SRC_EVALUATE_PR_SWAP,
+ PE_PRS_SNK_SRC_ACCEPT_PR_SWAP,
+ PE_PRS_SNK_SRC_TRANSITION_TO_OFF,
+ PE_PRS_SNK_SRC_ASSERT_RP,
+ PE_PRS_SNK_SRC_SOURCE_ON,
+ PE_PRS_SNK_SRC_SEND_SWAP,
+ PE_PRS_SNK_SRC_REJECT_SWAP,
+
+ PE_DR_SRC_GET_SOURCE_CAP,
+ PE_DR_SRC_GIVE_SINK_CAP,
+ PE_DR_SNK_GET_SINK_CAP,
+ PE_DR_SNK_GIVE_SOURCE_CAP,
+
+ PE_VCS_SEND_SWAP,
+ PE_VCS_EVALUATE_SWAP,
+ PE_VCS_ACCEPT_SWAP,
+ PE_VCS_REJECT_VCONN_SWAP,
+ PE_VCS_WAIT_FOR_VCONN,
+ PE_VCS_TURN_OFF_VCONN,
+ PE_VCS_TURN_ON_VCONN,
+ PE_VCS_SEND_PS_RDY,
+
+ PE_UFP_VDM_GET_IDENTITY,
+ PE_UFP_VDM_SEND_IDENTITY,
+ PE_UFP_VDM_GET_IDENTITY_NAK,
+
+ PE_UFP_VDM_GET_SVIDS,
+ PE_UFP_VDM_SEND_SVIDS,
+ PE_UFP_VDM_GET_SVIDS_NAK,
+
+ PE_UFP_VDM_GET_MODES,
+ PE_UFP_VDM_SEND_MODES,
+ PE_UFP_VDM_GET_MODES_NAK,
+
+ PE_UFP_VDM_EVALUATE_MODE_ENTRY,
+ PE_UFP_VDM_MODE_ENTRY_ACK,
+ PE_UFP_VDM_MODE_ENTRY_NAK,
+
+ PE_UFP_VDM_MODE_EXIT,
+ PE_UFP_VDM_MODE_EXIT_ACK,
+ PE_UFP_VDM_MODE_EXIT_NAK,
+
+ PE_UFP_VDM_ATTENTION_REQUEST,
+
+ PE_DFP_UFP_VDM_IDENTITY_REQUEST,
+ PE_DFP_UFP_VDM_IDENTITY_ACKED,
+ PE_DFP_UFP_VDM_IDENTITY_NAKED,
+
+ PE_DFP_CBL_VDM_IDENTITY_REQUEST,
+ PE_DFP_CBL_VDM_IDENTITY_ACKED,
+ PE_DFP_CBL_VDM_IDENTITY_NAKED,
+
+ PE_DFP_VDM_SVIDS_REQUEST,
+ PE_DFP_VDM_SVIDS_ACKED,
+ PE_DFP_VDM_SVIDS_NAKED,
+
+ PE_DFP_VDM_MODES_REQUEST,
+ PE_DFP_VDM_MODES_ACKED,
+ PE_DFP_VDM_MODES_NAKED,
+
+ PE_DFP_VDM_MODE_ENTRY_REQUEST,
+ PE_DFP_VDM_MODE_ENTRY_ACKED,
+ PE_DFP_VDM_MODE_ENTRY_NAKED,
+
+ PE_DFP_VDM_MODE_EXIT_REQUEST,
+ PE_DFP_VDM_MODE_EXIT_ACKED,
+
+ PE_DFP_VDM_ATTENTION_REQUEST,
+
+#ifdef CONFIG_USB_PD_CUSTOM_DBGACC
+ PE_DBG_READY,
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
+
+ PE_ERROR_RECOVERY,
+
+ PE_BIST_TEST_DATA,
+ PE_BIST_CARRIER_MODE_2,
+
+ PE_IDLE1, /* Wait tx finished */
+ PE_IDLE2,
+
+ PD_NR_PE_STATES,
+
+ PE_VIRT_HARD_RESET,
+ PE_VIRT_READY,
+};
+
+enum pd_dpm_vdm_request_type {
+ PD_DPM_VDM_REQUEST_DISCOVER_ID =
+ PE_DFP_UFP_VDM_IDENTITY_REQUEST,
+ PD_DPM_VDM_REQUEST_DISCOVER_SVIDS =
+ PE_DFP_VDM_SVIDS_REQUEST,
+ PD_DPM_VDM_REQUEST_DISCOVER_MODES =
+ PE_DFP_VDM_MODES_REQUEST,
+ PD_DPM_VDM_REQUEST_ENTRY_MODE =
+ PE_DFP_VDM_MODE_ENTRY_REQUEST,
+ PD_DPM_VDM_REQUEST_EXIT_MODE =
+ PE_DFP_VDM_MODE_EXIT_REQUEST,
+
+ PD_DPM_VDM_REQUEST_ATTENTION =
+ PE_UFP_VDM_ATTENTION_REQUEST,
+};
+
+/*
+ * > 0 : Process Event
+ * = 0 : No Event
+ * < 0 : Error
+ */
+
+int pd_policy_engine_run(struct tcpc_device *tcpc_dev);
+
+/* ---- Policy Engine (General) ---- */
+
+void pe_power_ready_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- Policy Engine (SNK) ---- */
+
+void pe_snk_startup_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_discovery_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_wait_for_capabilities_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_evaluate_capability_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_select_capability_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_transition_sink_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_ready_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_hard_reset_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_transition_to_default_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_give_sink_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_get_source_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_send_soft_reset_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_soft_reset_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_snk_wait_for_capabilities_exit(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_select_capability_exit(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_transition_sink_exit(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_snk_transition_to_default_exit(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- Policy Engine (SRC) ---- */
+
+void pe_src_startup_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_discovery_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_send_capabilities_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_negotiate_capabilities_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_transition_supply_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_transition_supply2_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_ready_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_disabled_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_capability_response_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_hard_reset_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_hard_reset_received_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_transition_to_default_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_give_source_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_get_sink_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_wait_new_capabilities_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_send_soft_reset_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_soft_reset_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_ping_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+
+#ifdef CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+void pe_src_vdm_identity_request_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_vdm_identity_acked_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_vdm_identity_naked_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+#endif
+
+void pe_src_send_capabilities_exit(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_transition_supply_exit(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_transition_to_default_exit(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_src_get_sink_cap_exit(pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- Policy Engine (DRS) ---- */
+
+void pe_drs_dfp_ufp_evaluate_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_drs_dfp_ufp_accept_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_drs_dfp_ufp_change_to_ufp_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_drs_dfp_ufp_send_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_drs_dfp_ufp_reject_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_drs_ufp_dfp_evaluate_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_drs_ufp_dfp_accept_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_drs_ufp_dfp_change_to_dfp_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_drs_ufp_dfp_send_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_drs_ufp_dfp_reject_dr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- Policy Engine (PRS) ---- */
+
+void pe_prs_src_snk_evaluate_pr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_src_snk_accept_pr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_src_snk_transition_to_off_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_src_snk_assert_rd_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_src_snk_wait_source_on_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_src_snk_send_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_src_snk_reject_pr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_prs_snk_src_evaluate_pr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_snk_src_accept_pr_swap_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_snk_src_transition_to_off_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_snk_src_assert_rp_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_snk_src_source_on_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_snk_src_send_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_snk_src_reject_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_prs_src_snk_wait_source_on_exit(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_snk_src_transition_to_off_exit(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_prs_snk_src_source_on_exit(pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- Policy Engine (DR) ---- */
+
+void pe_dr_src_get_source_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dr_src_give_sink_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dr_snk_get_sink_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dr_snk_give_source_cap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_dr_src_get_source_cap_exit(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dr_snk_get_sink_cap_exit(pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- Policy Engine (VCS) ---- */
+
+void pe_vcs_send_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_vcs_evaluate_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_vcs_accept_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_vcs_reject_vconn_swap_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_vcs_wait_for_vconn_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_vcs_turn_off_vconn_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_vcs_turn_on_vconn_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_vcs_send_ps_rdy_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_vcs_wait_for_vconn_exit(pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- Policy Engine (UFP) ---- */
+
+void pe_ufp_vdm_get_identity_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_ufp_vdm_send_identity_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_ufp_vdm_get_identity_nak_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_ufp_vdm_get_svids_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_ufp_vdm_send_svids_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_ufp_vdm_get_svids_nak_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_ufp_vdm_get_modes_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_ufp_vdm_send_modes_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_ufp_vdm_get_modes_nak_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_ufp_vdm_evaluate_mode_entry_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_ufp_vdm_mode_entry_ack_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_ufp_vdm_mode_entry_nak_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_ufp_vdm_mode_exit_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_ufp_vdm_mode_exit_ack_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_ufp_vdm_mode_exit_nak_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_ufp_vdm_attention_request_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- Policy Engine (DFP) ---- */
+
+void pe_dfp_ufp_vdm_identity_request_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dfp_ufp_vdm_identity_acked_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dfp_ufp_vdm_identity_naked_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_dfp_cbl_vdm_identity_request_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dfp_cbl_vdm_identity_acked_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dfp_cbl_vdm_identity_naked_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_dfp_vdm_svids_request_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dfp_vdm_svids_acked_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dfp_vdm_svids_naked_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_dfp_vdm_modes_request_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dfp_vdm_modes_acked_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dfp_vdm_modes_naked_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_dfp_vdm_mode_entry_request_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dfp_vdm_mode_entry_acked_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dfp_vdm_mode_entry_naked_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_dfp_vdm_mode_exit_request_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+void pe_dfp_vdm_mode_exit_acked_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+
+void pe_dfp_vdm_attention_request_entry(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+/* ---- Policy Engine (DBG) ---- */
+
+#ifdef CONFIG_USB_PD_CUSTOM_DBGACC
+void pe_dbg_ready_entry(pd_port_t *pd_port, pd_event_t *pd_event);
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
+
+#endif /* PD_POLICY_ENGINE_H_ */
diff --git a/include/linux/hisi/usb/pd/richtek/pd_process_evt.h b/include/linux/hisi/usb/pd/richtek/pd_process_evt.h
new file mode 100644
index 000000000000..f0beb8dd4b68
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/pd_process_evt.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PD_PROCESS_EVT_H_
+#define PD_PROCESS_EVT_H_
+
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+#include <linux/hisi/usb/pd/richtek/pd_policy_engine.h>
+
+typedef struct __pe_state_transition {
+ u8 curr_state; /*state, msg, or cmd */
+ u8 next_state;
+} pe_state_transition_t;
+
+typedef struct __pe_state_reaction {
+ u16 nr_transition;
+ const pe_state_transition_t *state_transition;
+} pe_state_reaction_t;
+
+#define DECL_PE_STATE_TRANSITION(state) \
+ static const pe_state_transition_t state##_state_transition[]
+
+#define DECL_PE_STATE_REACTION(state) \
+ static const pe_state_reaction_t state##_reactions = {\
+ .nr_transition = ARRAY_SIZE(state##_state_transition),\
+ .state_transition = state##_state_transition,\
+ }
+
+static inline bool pd_check_pe_state_ready(pd_port_t *pd_port)
+{
+ /* TODO: Handle Port Partner first (skip our get_cap state )*/
+ switch (pd_port->pe_state_curr) {
+ case PE_SNK_READY:
+ case PE_SRC_READY:
+
+#ifdef CONFIG_USB_PD_CUSTOM_DBGACC
+ case PE_DBG_READY:
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/*
+ *--------------------------------------------------------------
+ * Sink & Source Common Event
+ *--------------------------------------------------------------
+ */
+
+bool pd_process_data_msg_bist(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+bool pd_process_protocol_error(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+bool pd_process_ctrl_msg_dr_swap(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+bool pd_process_dpm_msg_dr_swap(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+bool pd_process_ctrl_msg_pr_swap(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+bool pd_process_dpm_msg_pr_swap(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+bool pd_process_ctrl_msg_vconn_swap(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+bool pd_process_dpm_msg_vconn_swap(
+ pd_port_t *pd_port, pd_event_t *pd_event);
+
+bool pd_process_recv_hard_reset(
+ pd_port_t *pd_port, pd_event_t *pd_event, uint8_t hreset_state);
+
+/*
+ *------------------------------------------------------------
+ */
+
+#define PE_TRANSIT_STATE(pd_port, state) \
+ ((pd_port)->pe_state_next = state)
+
+#define PE_TRANSIT_POWER_STATE(pd_port, sink, source) \
+ (pd_port->pe_state_next =\
+ ((pd_port->power_role == PD_ROLE_SINK) ? sink : source))
+
+#define PE_TRANSIT_DATA_STATE(pd_port, ufp, dfp) \
+ (pd_port->pe_state_next =\
+ ((pd_port->data_role == PD_ROLE_UFP) ? ufp : dfp))
+
+#define PE_TRANSIT_READY_STATE(pd_port) \
+ PE_TRANSIT_POWER_STATE(pd_port, PE_SNK_READY, PE_SRC_READY)
+
+#define PE_TRANSIT_HARD_RESET_STATE(pd_port) \
+ PE_TRANSIT_POWER_STATE(pd_port, PE_SNK_HARD_RESET, PE_SRC_HARD_RESET)
+
+#define PE_TRANSIT_SOFT_RESET_STATE(pd_port) \
+ PE_TRANSIT_POWER_STATE(pd_port, PE_SNK_SOFT_RESET, PE_SRC_SOFT_RESET)
+
+#define PE_TRANSIT_VCS_SWAP_STATE(pd_port) \
+ PE_TRANSIT_STATE(pd_port, pd_port->vconn_source ? \
+ PE_VCS_WAIT_FOR_VCONN : PE_VCS_TURN_ON_VCONN)
+
+#define PE_TRANSIT_SEND_SOFT_RESET_STATE(pd_port) \
+ PE_TRANSIT_POWER_STATE(pd_port, \
+ PE_SNK_SEND_SOFT_RESET, PE_SRC_SEND_SOFT_RESET)
+
+/*
+ * ---------------------------------------------------------
+ */
+
+#define PE_MAKE_STATE_TRANSIT(state) \
+ pd_make_pe_state_transit(\
+ pd_port, pd_port->pe_state_curr, &state##_reactions)
+/* PE_MAKE_STATE_TRANSIT */
+
+#define PE_MAKE_STATE_TRANSIT_VIRT(state) \
+ pd_make_pe_state_transit_virt(\
+ pd_port, pd_port->pe_state_curr, &state##_reactions)
+/* PE_MAKE_STATE_TRANSIT_VIRT */
+
+#define PE_MAKE_STATE_TRANSIT_FORCE(state, force) \
+ pd_make_pe_state_transit_force(\
+ pd_port, pd_port->pe_state_curr, force, &state##_reactions)
+/* PE_MAKE_STATE_TRANSIT_FORCE */
+
+#define VDM_CMD_STATE_MASK(raw) ((raw) & 0xdf)
+
+#define PE_MAKE_VDM_CMD_STATE_TRANSIT(state) \
+ pd_make_pe_state_transit(\
+ pd_port, \
+ VDM_CMD_STATE_MASK(pd_event->pd_msg->payload[0]), \
+ &state##_reactions)
+/* PE_MAKE_VDM_CMD_STATE_TRANSIT */
+
+#define PE_MAKE_VDM_CMD_STATE_TRANSIT_VIRT(state) \
+ pd_make_pe_state_transit_virt(\
+ pd_port, \
+ VDM_CMD_STATE_MASK(pd_event->pd_msg->payload[0]), \
+ &state##_reactions)
+/* PE_MAKE_VDM_CMD_STATE_TRANSIT_VIRT */
+
+bool pd_make_pe_state_transit(pd_port_t *pd_port, uint8_t curr_state,
+ const pe_state_reaction_t *state_reaction);
+
+bool pd_make_pe_state_transit_virt(pd_port_t *pd_port, uint8_t curr_state,
+ const pe_state_reaction_t *state_reaction);
+
+bool pd_make_pe_state_transit_force(pd_port_t *pd_port,
+ u8 curr_state, u8 force_state,
+ const pe_state_reaction_t
+ *state_reaction);
+
+bool pd_process_event(pd_port_t *pd_port, pd_event_t *pd_event, bool vdm_evt);
+
+bool pd_process_event_snk(pd_port_t *pd_port, pd_event_t *evt);
+bool pd_process_event_src(pd_port_t *pd_port, pd_event_t *evt);
+bool pd_process_event_drs(pd_port_t *pd_port, pd_event_t *evt);
+bool pd_process_event_prs(pd_port_t *pd_port, pd_event_t *evt);
+bool pd_process_event_vdm(pd_port_t *pd_port, pd_event_t *evt);
+bool pd_process_event_vcs(pd_port_t *pd_port, pd_event_t *evt);
+
+#ifdef CONFIG_USB_PD_CUSTOM_DBGACC
+bool pd_process_event_dbg(pd_port_t *pd_port, pd_event_t *evt);
+#endif /* CONFIG_USB_PD_CUSTOM_DBGACC */
+
+#endif /* PD_PROCESS_EVT_H_ */
diff --git a/include/linux/hisi/usb/pd/richtek/rt-regmap.h b/include/linux/hisi/usb/pd/richtek/rt-regmap.h
new file mode 100644
index 000000000000..257c8be903bb
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/rt-regmap.h
@@ -0,0 +1,296 @@
+/* include/linux/misc/rt-regmap.h
+ * Header of Richtek regmap with debugfs Driver
+ *
+ * Copyright (C) 2014 Richtek Technology Corp.
+ * Jeff Chang <jeff_chang@richtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef LINUX_MISC_RT_REGMAP_H
+#define LINUX_MISC_RT_REGMAP_H
+
+#include <linux/debugfs.h>
+
+#define RT_REGMAP_VERSION "1.1.7_G"
+
+enum rt_access_mode {
+ RT_1BYTE_MODE = 1,
+ RT_2BYTE_MODE = 2,
+ RT_4BYTE_MODE = 4,
+};
+
+/* start : the start address of group
+ * end : the end address of group
+ * mode : access mode (1,2,4 bytes)
+ */
+struct rt_access_group {
+ u32 start;
+ u32 end;
+ enum rt_access_mode mode;
+};
+
+/* rt_reg_type
+ * RT_NORMAL : Write data without mask
+ * Read from cache
+ * RT_WBITS : Write data with mask
+ * Read from cache
+ * RT_VOLATILE : Write data to chip directly
+ * Read data from chip
+ * RT_RESERVE : Reserve registers (Write/Read as RT_NORMAL)
+ */
+
+#define RT_REG_TYPE_MASK (0x03)
+#define RT_NORMAL (0x00)
+#define RT_WBITS (0x01)
+#define RT_VOLATILE (0x02)
+#define RT_RESERVE (0x03)
+
+/* RT_WR_ONCE : write once will check write data and cache data,
+ * if write data = cache data, data will not be writen.
+ */
+#define RT_WR_ONCE (0x08)
+#define RT_NORMAL_WR_ONCE (RT_NORMAL | RT_WR_ONCE)
+#define RT_WBITS_WR_ONCE (RT_WBITS | RT_WR_ONCE)
+
+enum rt_data_format {
+ RT_LITTLE_ENDIAN,
+ RT_BIG_ENDIAN,
+};
+
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+
+/* rt_regmap_mode
+ * 0 0 0 0 0 0 0 0
+ * | | | | | |
+ * | | | |__| byte_mode
+ * | |__| ||
+ * | || Cache_mode
+ * | Block_mode
+ * Debug_mode
+ */
+
+#define RT_BYTE_MODE_MASK (0x01)
+/* 1 byte for each register*/
+#define RT_SINGLE_BYTE (0 << 0)
+/* multi bytes for each regiseter*/
+#define RT_MULTI_BYTE BIT(0)
+
+#define RT_CACHE_MODE_MASK (0x06)
+/* write to cache and chip synchronously */
+#define RT_CACHE_WR_THROUGH (0 << 1)
+/* write to cache and chip asynchronously */
+#define RT_CACHE_WR_BACK BIT(1)
+/* disable cache */
+#define RT_CACHE_DISABLE (2 << 1)
+
+#define RT_IO_BLK_MODE_MASK (0x18)
+/* pass through all write function */
+#define RT_IO_PASS_THROUGH (0 << 3)
+/* block all write function */
+#define RT_IO_BLK_ALL BIT(3)
+/* block cache write function */
+#define RT_IO_BLK_CACHE (2 << 3)
+/* block chip write function */
+#define RT_IO_BLK_CHIP (3 << 3)
+
+#define DBG_MODE_MASK (0x20)
+/* create general debugfs for register map */
+#define RT_DBG_GENERAL (0 << 5)
+/* create node for each regisetr map by register address*/
+#define RT_DBG_SPECIAL BIT(5)
+
+/* struct rt_register
+ *
+ * Ricktek register map structure for store mapping data
+ * @addr: register address.
+ * @name: register name.
+ * @size: register byte size.
+ * @reg_type: register R/W type ( RT_NORMAL, RT_WBITS, RT_VOLATILE, RT_RESERVE)
+ * @wbit_mask: register writeable bits mask;
+ * @cache_data: cache data for store cache value.
+ */
+struct rt_register {
+ u32 addr;
+ const char *name;
+ unsigned int size;
+ unsigned char reg_type;
+ unsigned char *wbit_mask;
+ unsigned char *cache_data;
+};
+
+/* Declare a rt_register by RT_REG_DECL
+ * @_addr: regisetr address.
+ * @_reg_length: register data length.
+ * @_reg_type: register type (rt_reg_type).
+ * @_mask: register writealbe mask.
+ */
+#define RT_REG_DECL(_addr, _reg_length, _reg_type, _mask_...) \
+ static unsigned char rt_writable_mask_##_addr[_reg_length] = _mask_;\
+ static struct rt_register rt_register_##_addr = { \
+ .addr = _addr, \
+ .size = _reg_length,\
+ .reg_type = _reg_type,\
+ .wbit_mask = rt_writable_mask_##_addr,\
+ }
+
+/* Declare a rt_register by RT_NAMED_REG_DECL
+ * @_name: a name for a rt_register.
+ */
+#define RT_NAMED_REG_DECL(_addr, _name, _reg_length, _reg_type, _mask_...) \
+ static unsigned char rt_writable_mask_##_addr[_reg_length] = _mask_;\
+ static struct rt_register rt_register_##_addr = { \
+ .addr = _addr, \
+ .name = _name, \
+ .size = _reg_length,\
+ .reg_type = _reg_type,\
+ .wbit_mask = rt_writable_mask_##_addr,\
+ }
+
+typedef struct rt_register *rt_register_map_t;
+
+#define RT_REG(_addr) (&rt_register_##_addr)
+
+/* rt_regmap_properties
+ * @name: the name of debug node.
+ * @aliases: alisis name of rt_regmap_device.
+ * @register_num: the number of rt_register_map registers.
+ * @rm: rt_regiseter_map pointer array.
+ * @group: register map access group.
+ * @rt_format: default is little endian.
+ * @rt_regmap_mode: rt_regmap_device mode.
+ * @io_log_en: enable/disable io log
+ */
+struct rt_regmap_properties {
+ const char *name;
+ const char *aliases;
+ int register_num;
+ const rt_register_map_t *rm;
+ struct rt_access_group *group;
+ enum rt_data_format rt_format;
+ unsigned char rt_regmap_mode;
+ unsigned char io_log_en:1;
+};
+
+/* A passing struct for rt_regmap_reg_read and rt_regmap_reg_write function
+ * reg: regmap addr.
+ * mask: mask for update bits.
+ * rt_data: register value.
+ */
+struct rt_reg_data {
+ u32 reg;
+ u32 mask;
+ union {
+ u32 data_u32;
+ u16 data_u16;
+ u8 data_u8;
+ u8 data[4];
+ } rt_data;
+};
+
+struct rt_regmap_device;
+
+struct rt_debug_st {
+ void *info;
+ int id;
+};
+
+/* basic chip read/write function */
+struct rt_regmap_fops {
+ int (*read_device)(void *client, u32 addr, int leng, void *dst);
+ int (*write_device)(void *client, u32 addr, int leng, const void *src);
+};
+
+struct rt_regmap_device*
+ rt_regmap_device_register(struct rt_regmap_properties *props,
+ struct rt_regmap_fops *rops,
+ struct device *parent,
+ void *client, void *drvdata);
+
+void rt_regmap_device_unregister(struct rt_regmap_device *rd);
+
+int rt_regmap_cache_init(struct rt_regmap_device *rd);
+
+int rt_regmap_cache_reload(struct rt_regmap_device *rd);
+
+int rt_regmap_block_write(
+ struct rt_regmap_device *rd, u32 reg,
+ int bytes, const void *rc);
+int rt_asyn_regmap_block_write(
+ struct rt_regmap_device *rd, u32 reg,
+ int bytes, const void *rc);
+int rt_regmap_block_read(
+ struct rt_regmap_device *rd, u32 reg,
+ int bytes, void *dst);
+
+int _rt_regmap_reg_read(
+ struct rt_regmap_device *rd,
+ struct rt_reg_data *rrd);
+int _rt_regmap_reg_write(
+ struct rt_regmap_device *rd,
+ struct rt_reg_data *rrd);
+int _rt_asyn_regmap_reg_write(
+ struct rt_regmap_device *rd,
+ struct rt_reg_data *rrd);
+int _rt_regmap_update_bits(
+ struct rt_regmap_device *rd,
+ struct rt_reg_data *rrd);
+
+static inline int rt_regmap_reg_read(struct rt_regmap_device *rd,
+ struct rt_reg_data *rrd, u32 reg)
+{
+ rrd->reg = reg;
+ return _rt_regmap_reg_read(rd, rrd);
+};
+
+static inline int rt_regmap_reg_write(struct rt_regmap_device *rd,
+ struct rt_reg_data *rrd,
+ u32 reg, const u32 data)
+{
+ rrd->reg = reg;
+ rrd->rt_data.data_u32 = data;
+ return _rt_regmap_reg_write(rd, rrd);
+};
+
+static inline int rt_asyn_regmap_reg_write(struct rt_regmap_device *rd,
+ struct rt_reg_data *rrd,
+ u32 reg, const u32 data)
+{
+ rrd->reg = reg;
+ rrd->rt_data.data_u32 = data;
+ return _rt_asyn_regmap_reg_write(rd, rrd);
+};
+
+static inline int rt_regmap_update_bits(struct rt_regmap_device *rd,
+ struct rt_reg_data *rrd,
+ u32 reg, u32 mask, u32 data)
+{
+ rrd->reg = reg;
+ rrd->mask = mask;
+ rrd->rt_data.data_u32 = data;
+ return _rt_regmap_update_bits(rd, rrd);
+}
+
+void rt_regmap_cache_backup(struct rt_regmap_device *rd);
+
+void rt_regmap_cache_sync(struct rt_regmap_device *rd);
+void rt_regmap_cache_write_back(struct rt_regmap_device *rd, u32 reg);
+
+int rt_is_reg_readable(struct rt_regmap_device *rd, u32 reg);
+int rt_is_reg_volatile(struct rt_regmap_device *rd, u32 reg);
+int rt_get_regsize(struct rt_regmap_device *rd, u32 reg);
+void rt_cache_getlasterror(struct rt_regmap_device *rd, char *buf);
+void rt_cache_clrlasterror(struct rt_regmap_device *rd);
+
+int rt_regmap_add_debugfs(
+ struct rt_regmap_device *rd, const char *name,
+ umode_t mode, void *data,
+ const struct file_operations *fops);
+
+#define to_rt_regmap_device(obj) container_of(obj, struct rt_regmap_device, dev)
+
+#endif /*LINUX_MISC_RT_REGMAP_H*/
diff --git a/include/linux/hisi/usb/pd/richtek/rt1711h.h b/include/linux/hisi/usb/pd/richtek/rt1711h.h
new file mode 100644
index 000000000000..78ba3c8f4ea7
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/rt1711h.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_RT1711H_H
+#define __LINUX_RT1711H_H
+
+#include <linux/hisi/usb/pd/richtek/std_tcpci_v10.h>
+/*show debug message or not */
+#define ENABLE_RT1711_DBG 0
+
+/* RT1711H Private RegMap */
+
+#define RT1711H_REG_CLK_CTRL2 (0x87)
+#define RT1711H_REG_CLK_CTRL3 (0x88)
+
+#define RT1711H_REG_BMC_CTRL (0x90)
+#define RT1711H_REG_BMCIO_RXDZSEL (0x93)
+#define RT1711H_REG_VCONN_CLIMITEN (0x95)
+
+#define RT1711H_REG_RT_STATUS (0x97)
+#define RT1711H_REG_RT_INT (0x98)
+#define RT1711H_REG_RT_MASK (0x99)
+
+#define RT1711H_REG_IDLE_CTRL (0x9B)
+#define RT1711H_REG_INTRST_CTRL (0x9C)
+#define RT1711H_REG_WATCHDOG_CTRL (0x9D)
+#define RT1711H_REG_I2CRST_CTRL (0X9E)
+
+#define RT1711H_REG_SWRESET (0xA0)
+#define RT1711H_REG_TTCPC_FILTER (0xA1)
+#define RT1711H_REG_DRP_TOGGLE_CYCLE (0xA2)
+#define RT1711H_REG_DRP_DUTY_CTRL (0xA3)
+
+/*
+ * Device ID
+ */
+
+#define RT1711H_DID_A 0x2170
+#define RT1711H_DID_B 0x2171
+#define RT1711H_DID_C 0x2172
+
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+
+/*
+ * RT1711H_REG_CLK_CTRL2 (0x87)
+ */
+
+#define RT1711H_REG_CLK_DIV_600K_EN BIT(7)
+#define RT1711H_REG_CLK_BCLK2_EN BIT(6)
+#define RT1711H_REG_CLK_BCLK2_TG_EN BIT(5)
+#define RT1711H_REG_CLK_DIV_300K_EN BIT(3)
+#define RT1711H_REG_CLK_CK_300K_EN BIT(2)
+#define RT1711H_REG_CLK_BCLK_EN BIT(1)
+#define RT1711H_REG_CLK_BCLK_TH_EN BIT(0)
+
+/*
+ * RT1711H_REG_CLK_CTRL3 (0x88)
+ */
+
+#define RT1711H_REG_CLK_OSCMUX_RG_EN BIT(7)
+#define RT1711H_REG_CLK_CK_24M_EN BIT(6)
+#define RT1711H_REG_CLK_OSC_RG_EN BIT(5)
+#define RT1711H_REG_CLK_DIV_2P4M_EN BIT(4)
+#define RT1711H_REG_CLK_CK_2P4M_EN BIT(3)
+#define RT1711H_REG_CLK_PCLK_EN BIT(2)
+#define RT1711H_REG_CLK_PCLK_RG_EN BIT(1)
+#define RT1711H_REG_CLK_PCLK_TG_EN BIT(0)
+
+/*
+ * RT1711H_REG_BMC_CTRL (0x90)
+ */
+
+#define RT1711H_REG_IDLE_EN BIT(6)
+#define RT1711H_REG_DISCHARGE_EN BIT(5)
+#define RT1711H_REG_BMCIO_LPRPRD BIT(4)
+#define RT1711H_REG_BMCIO_LPEN BIT(3)
+#define RT1711H_REG_BMCIO_BG_EN BIT(2)
+#define RT1711H_REG_VBUS_DET_EN BIT(1)
+#define RT1711H_REG_BMCIO_OSC_EN BIT(0)
+
+/*
+ * RT1711H_REG_RT_STATUS (0x97)
+ */
+
+#define RT1711H_REG_RA_DETACH BIT(5)
+#define RT1711H_REG_VBUS_80 BIT(1)
+
+/*
+ * RT1711H_REG_RT_INT (0x98)
+ */
+
+#define RT1711H_REG_INT_RA_DETACH BIT(5)
+#define RT1711H_REG_INT_WATCHDOG BIT(2)
+#define RT1711H_REG_INT_VBUS_80 BIT(1)
+#define RT1711H_REG_INT_WAKEUP BIT(0)
+
+/*
+ * RT1711H_REG_RT_MASK (0x99)
+ */
+
+#define RT1711H_REG_M_RA_DETACH BIT(5)
+#define RT1711H_REG_M_WATCHDOG BIT(2)
+#define RT1711H_REG_M_VBUS_80 BIT(1)
+#define RT1711H_REG_M_WAKEUP BIT(0)
+
+/*
+ * RT1711H_REG_IDLE_CTRL (0x9B)
+ */
+
+#define RT1711H_REG_CK_300K_SEL BIT(7)
+#define RT1711H_REG_SHIPPING_OFF BIT(5)
+#define RT1711H_REG_AUTOIDLE_EN BIT(3)
+
+/* timeout = (tout*2+1) * 6.4ms */
+#define RT1711H_REG_IDLE_SET(ck300, ship_dis, auto_idle, tout) \
+ (((ck300) << 7) | ((ship_dis) << 5) | \
+ ((auto_idle) << 3) | ((tout) & 0x07))
+
+/*
+ * RT1711H_REG_INTRST_CTRL (0x9C)
+ */
+
+#define RT1711H_REG_INTRST_EN BIT(7)
+
+/* timeout = (tout+1) * 0.2sec */
+#define RT1711H_REG_INTRST_SET(en, tout) \
+ (((en) << 7) | ((tout) & 0x03))
+
+/*
+ * RT1711H_REG_WATCHDOG_CTRL (0x9D)
+ */
+
+#define RT1711H_REG_WATCHDOG_EN BIT(7)
+
+/* timeout = (tout+1) * 0.4sec */
+#define RT1711H_REG_WATCHDOG_CTRL_SET(en, tout) \
+ (((en) << 7) | ((tout) & 0x07))
+
+#if ENABLE_RT1711_DBG
+#define RT1711H_INFO hisilog_err
+#else
+#define RT1711_INFO(foramt, args...)
+#endif
+
+#endif /* #ifndef __LINUX_RT1711H_H */
diff --git a/include/linux/hisi/usb/pd/richtek/std_tcpci_v10.h b/include/linux/hisi/usb/pd/richtek/std_tcpci_v10.h
new file mode 100644
index 000000000000..a50c66e8f67e
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/std_tcpci_v10.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef STD_TCPCI_V10_H_
+#define STD_TCPCI_V10_H_
+
+/* Standard TCPC V10 RegMap */
+
+#define TCPC_V10_REG_VID (0x00)
+#define TCPC_V10_REG_PID (0x02)
+#define TCPC_V10_REG_DID (0x04)
+#define TCPC_V10_REG_TYPEC_REV (0x06)
+#define TCPC_V10_REG_PD_REV (0x08)
+#define TCPC_V10_REG_PDIF_REV (0x0A)
+
+#define TCPC_V10_REG_ALERT (0x10)
+#define TCPC_V10_REG_ALERT_MASK (0x12)
+#define TCPC_V10_REG_POWER_STATUS_MASK (0x14)
+#define TCPC_V10_REG_FAULT_STATUS_MASK (0x15)
+
+#define TCPC_V10_REG_TCPC_CTRL (0x19)
+#define TCPC_V10_REG_ROLE_CTRL (0x1A)
+#define TCPC_V10_REG_FAULT_CTRL (0x1B)
+#define TCPC_V10_REG_POWER_CTRL (0x1C)
+
+#define TCPC_V10_REG_CC_STATUS (0x1D)
+#define TCPC_V10_REG_POWER_STATUS (0x1E)
+#define TCPC_V10_REG_FAULT_STATUS (0x1F)
+
+#define TCPC_V10_REG_COMMAND (0x23)
+
+#define TCPC_V10_REG_MSG_HDR_INFO (0x2e)
+
+#define TCPC_V10_REG_RX_DETECT (0x2f)
+
+#define TCPC_V10_REG_RX_BYTE_CNT (0x30)
+#define TCPC_V10_REG_RX_BUF_FRAME_TYPE (0x31)
+#define TCPC_V10_REG_RX_HDR (0x32)
+#define TCPC_V10_REG_RX_DATA (0x34)
+
+#define TCPC_V10_REG_TRANSMIT (0x50)
+#define TCPC_V10_REG_TX_BYTE_CNT (0x51)
+#define TCPC_V10_REG_TX_HDR (0x52)
+#define TCPC_V10_REG_TX_DATA (0x54)/* through 0x6f */
+
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+
+/*
+ * TCPC_V10_REG_ALERT (0x10)
+ * TCPC_V10_REG_ALERT_MASK (0x12)
+ */
+#define TCPC_V10_REG_VBUS_SINK_DISCONNECT BIT(11)
+#define TCPC_V10_REG_RX_OVERFLOW BIT(10)
+#define TCPC_V10_REG_ALERT_FAULT BIT(9)
+#define TCPC_V10_REG_ALERT_LO_VOLT BIT(8)
+#define TCPC_V10_REG_ALERT_HI_VOLT BIT(7)
+#define TCPC_V10_REG_ALERT_TX_SUCCESS BIT(6)
+#define TCPC_V10_REG_ALERT_TX_DISCARDED BIT(5)
+#define TCPC_V10_REG_ALERT_TX_FAILED BIT(4)
+#define TCPC_V10_REG_ALERT_RX_HARD_RST BIT(3)
+#define TCPC_V10_REG_ALERT_RX_STATUS BIT(2)
+#define TCPC_V10_REG_ALERT_POWER_STATUS BIT(1)
+#define TCPC_V10_REG_ALERT_CC_STATUS BIT(0)
+
+/*
+ * TCPC_V10_REG_POWER_STATUS_MASK (0x14)
+ * TCPC_V10_REG_POWER_STATUS (0x19)
+ */
+
+#define TCPC_V10_REG_POWER_STATUS_TCPC_INITIAL BIT(6)
+#define TCPC_V10_REG_POWER_STATUS_SRC_HV BIT(5)
+#define TCPC_V10_REG_POWER_STATUS_SRC_VBUS BIT(4)
+#define TCPC_V10_REG_POWER_STATUS_VBUS_PRES_DET BIT(3)
+#define TCPC_V10_REG_POWER_STATUS_VBUS_PRES BIT(2)
+#define TCPC_V10_REG_POWER_STATUS_VCONN_PRES BIT(1)
+#define TCPC_V10_REG_POWER_STATUS_SINK_VBUS BIT(0)
+
+/*
+ * TCPC_V10_REG_FAULT_STATUS_MASK (0x15)
+ * TCPC_V10_REG_FAULT_STATUS (0x1F)
+ */
+
+#define TCPC_V10_REG_FAULT_STATUS_VCONN_OV BIT(7)
+#define TCPC_V10_REG_FAULT_STATUS_FORCE_OFF_VBUS BIT(6)
+#define TCPC_V10_REG_FAULT_STATUS_AUTO_DISC_FAIL BIT(5)
+#define TCPC_V10_REG_FAULT_STATUS_FORCE_DISC_FAIL BIT(4)
+#define TCPC_V10_REG_FAULT_STATUS_VBUS_OC BIT(3)
+#define TCPC_V10_REG_FAULT_STATUS_VBUS_OV BIT(2)
+#define TCPC_V10_REG_FAULT_STATUS_VCONN_OC BIT(1)
+#define TCPC_V10_REG_FAULT_STATUS_I2C_ERROR BIT(0)
+
+/*
+ * TCPC_V10_REG_ROLE_CTRL (0x1A)
+ */
+
+#define TCPC_V10_REG_ROLE_CTRL_DRP BIT(6)
+
+#define TCPC_V10_REG_ROLE_CTRL_RES_SET(drp, rp, cc1, cc2) \
+ ((drp) << 6 | (rp) << 4 | (cc2) << 2 | (cc1))
+
+#define CC_RD 0x02
+#define CC_RP 0x01
+#define CC_OPEN 0x03
+#define CC_RA 0x00
+
+/*
+ * TCPC_V10_REG_TCPC_CTRL (0x19)
+ */
+
+#define TCPC_V10_REG_TCPC_CTRL_BIST_TEST_MODE BIT(1)
+#define TCPC_V10_REG_TCPC_CTRL_PLUG_ORIENT BIT(0)
+
+/*
+ * TCPC_V10_REG_FAULT_CTRL (0x1B)
+ */
+
+#define TCPC_V10_REG_FAULT_CTRL_DIS_VCONN_OV BIT(7)
+#define TCPC_V10_REG_FAULT_CTRL_DIS_SNK_VBUS_OC BIT(2)
+#define TCPC_V10_REG_FAULT_CTRL_DIS_VCONN_OC BIT(0)
+
+/*
+ * TCPC_V10_REG_POWER_CTRL (0x1C)
+ */
+
+#define TCPC_V10_REG_POWER_CTRL_VCONN BIT(0)
+
+/*
+ * TCPC_V10_REG_CC_STATUS (0x1D)
+ */
+
+#define TCPC_V10_REG_CC_STATUS_DRP_TOGGLING BIT(5)
+#define TCPC_V10_REG_CC_STATUS_DRP_RESULT(reg) (((reg) & 0x10) >> 4)
+#define TCPC_V10_REG_CC_STATUS_CC2(reg) (((reg) & 0xc) >> 2)
+#define TCPC_V10_REG_CC_STATUS_CC1(reg) ((reg) & 0x3)
+
+/*
+ * TCPC_V10_REG_COMMAND (0x23)
+ */
+
+enum tcpm_v10_command {
+ TCPM_CMD_WAKE_I2C = 0x11,
+ TCPM_CMD_DISABLE_VBUS_DETECT = 0x22,
+ TCPM_CMD_ENABLE_VBUS_DETECT = 0x33,
+ TCPM_CMD_DISABLE_SINK_VBUS = 0x44,
+ TCPM_CMD_ENABLE_SINK_VBUS = 0x55,
+ TCPM_CMD_DISABLE_SOURCE_VBUS = 0x66,
+ TCPM_CMD_ENABLE_SOURCE_VBUS = 0x77,
+ TCPM_CMD_SOURCE_VBUS_HV = 0x88,
+ TCPM_CMD_LOOK_CONNECTION = 0x99,
+ TCPM_CMD_RX_ONE_MODE = 0xAA,
+ TCPM_CMD_I2C_IDLE = 0xFF,
+};
+
+/*
+ * TCPC_V10_REG_MSG_HDR_INFO (0x2e)
+ */
+
+#define TCPC_V10_REG_MSG_HDR_INFO_SET(drole, prole) \
+ ((drole) << 3 | (PD_REV20 << 1) | (prole))
+#define TCPC_V10_REG_MSG_HDR_INFO_DROLE(reg) (((reg) & 0x8) >> 3)
+#define TCPC_V10_REG_MSG_HDR_INFO_PROLE(reg) ((reg) & 0x1)
+
+/*
+ * TCPC_V10_REG_TRANSMIT (0x50)
+ */
+
+#define TCPC_V10_REG_TRANSMIT_SET(type) \
+ (PD_RETRY_COUNT << 4 | (type))
+
+#endif /* STD_TCPCI_V10_H_ */
diff --git a/include/linux/hisi/usb/pd/richtek/tcpci.h b/include/linux/hisi/usb/pd/richtek/tcpci.h
new file mode 100644
index 000000000000..1e692f47f937
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/tcpci.h
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_RT_TCPC_H
+#define __LINUX_RT_TCPC_H
+
+#include <linux/device.h>
+#include <linux/hrtimer.h>
+#include <linux/workqueue.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+
+#include <linux/sched.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_core.h>
+#ifdef CONFIG_USB_POWER_DELIVERY
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+#define PE_STATE_FULL_NAME 0
+
+/* provide to TCPC interface */
+int tcpci_report_usb_port_changed(struct tcpc_device *tcpc);
+int tcpc_typec_init(struct tcpc_device *tcpc, u8 typec_role);
+void tcpc_typec_deinit(struct tcpc_device *tcpc);
+int tcpc_dual_role_phy_init(struct tcpc_device *tcpc);
+
+struct tcpc_device *tcpc_device_register(
+ struct device *parent, struct tcpc_desc *tcpc_desc,
+ struct tcpc_ops *ops, void *drv_data);
+void tcpc_device_unregister(
+ struct device *dev, struct tcpc_device *tcpc);
+
+int tcpc_schedule_init_work(struct tcpc_device *tcpc);
+
+void *tcpc_get_dev_data(struct tcpc_device *tcpc);
+void tcpci_lock_typec(struct tcpc_device *tcpc);
+void tcpci_unlock_typec(struct tcpc_device *tcpc);
+int tcpci_alert(struct tcpc_device *tcpc);
+
+void tcpci_vbus_level_init(
+ struct tcpc_device *tcpc, u16 power_status);
+
+static inline int tcpci_check_vbus_valid(struct tcpc_device *tcpc)
+{
+ return tcpc->vbus_level >= TCPC_VBUS_VALID;
+}
+
+static inline int tcpci_check_vsafe0v(struct tcpc_device *tcpc, bool detect_en)
+{
+ int ret = 0;
+
+#ifdef CONFIG_TCPC_VSAFE0V_DETECT_IC
+ ret = (tcpc->vbus_level == TCPC_VBUS_SAFE0V);
+#else
+ ret = (tcpc->vbus_level == TCPC_VBUS_INVALID);
+#endif
+
+ return ret;
+}
+
+static inline int tcpci_alert_status_clear(
+ struct tcpc_device *tcpc, u32 mask)
+{
+ return tcpc->ops->alert_status_clear(tcpc, mask);
+}
+
+static inline int tcpci_fault_status_clear(
+ struct tcpc_device *tcpc, u8 status)
+{
+ if (tcpc->ops->fault_status_clear)
+ return tcpc->ops->fault_status_clear(tcpc, status);
+ return 0;
+}
+
+static inline int tcpci_get_alert_status(
+ struct tcpc_device *tcpc, u32 *alert)
+{
+ return tcpc->ops->get_alert_status(tcpc, alert);
+}
+
+static inline int tcpci_get_fault_status(
+ struct tcpc_device *tcpc, u8 *fault)
+{
+ if (tcpc->ops->get_fault_status)
+ return tcpc->ops->get_fault_status(tcpc, fault);
+ *fault = 0;
+ return 0;
+}
+
+static inline int tcpci_get_power_status(
+ struct tcpc_device *tcpc, u16 *pw_status)
+{
+ return tcpc->ops->get_power_status(tcpc, pw_status);
+}
+
+static inline int tcpci_init(struct tcpc_device *tcpc, bool sw_reset)
+{
+ int ret;
+ u16 power_status;
+
+ ret = tcpc->ops->init(tcpc, sw_reset);
+ if (ret)
+ return ret;
+
+ ret = tcpci_get_power_status(tcpc, &power_status);
+ if (ret)
+ return ret;
+
+ tcpci_vbus_level_init(tcpc, power_status);
+ return 0;
+}
+
+static inline int tcpci_get_cc(struct tcpc_device *tcpc)
+{
+ int ret, cc1, cc2;
+
+ ret = tcpc->ops->get_cc(tcpc, &cc1, &cc2);
+ if (ret < 0)
+ return ret;
+
+ if ((cc1 == tcpc->typec_remote_cc[0]) &&
+ (cc2 == tcpc->typec_remote_cc[1])) {
+ return 0;
+ }
+
+ tcpc->typec_remote_cc[0] = cc1;
+ tcpc->typec_remote_cc[1] = cc2;
+ return 1;
+}
+
+static inline int tcpci_set_cc(struct tcpc_device *tcpc, int pull)
+{
+#ifdef CONFIG_USB_PD_DBG_ALWAYS_LOCAL_RP
+ if (pull == TYPEC_CC_RP)
+ pull = tcpc->typec_local_rp_level;
+#endif /* CONFIG_USB_PD_DBG_ALWAYS_LOCAL_RP */
+
+ if (pull & TYPEC_CC_DRP) {
+ tcpc->typec_remote_cc[0] =
+ tcpc->typec_remote_cc[1] =
+ TYPEC_CC_DRP_TOGGLING;
+ }
+
+#ifdef CONFIG_TYPEC_CHECK_LEGACY_CABLE
+ if ((pull == TYPEC_CC_DRP) && (tcpc->typec_legacy_cable)) {
+ TCPC_INFO("LegacyCable-->\r\n");
+ pull = TYPEC_CC_RP_1_5;
+ }
+#endif /* CONFIG_TYPEC_CHECK_LEGACY_CABLE */
+
+ tcpc->typec_local_cc = pull;
+ return tcpc->ops->set_cc(tcpc, pull);
+}
+
+static inline int tcpci_set_polarity(struct tcpc_device *tcpc, int polarity)
+{
+ return tcpc->ops->set_polarity(tcpc, polarity);
+}
+
+static inline int tcpci_set_vconn(struct tcpc_device *tcpc, int enable)
+{
+ struct tcp_notify tcp_noti;
+
+ tcp_noti.en_state.en = enable != 0;
+ srcu_notifier_call_chain(&tcpc->evt_nh,
+ TCP_NOTIFY_SOURCE_VCONN, &tcp_noti);
+
+ return tcpc->ops->set_vconn(tcpc, enable);
+}
+
+static inline int tcpci_set_low_power_mode(
+ struct tcpc_device *tcpc, bool en, int pull)
+{
+ int rv = 0;
+
+#ifdef CONFIG_TCPC_LOW_POWER_MODE
+ rv = tcpc->ops->set_low_power_mode(tcpc, en, pull);
+#endif
+ return rv;
+}
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+
+static inline int tcpci_set_msg_header(
+ struct tcpc_device *tcpc, int power_role, int data_role)
+{
+ return tcpc->ops->set_msg_header(tcpc, power_role, data_role);
+}
+
+static inline int tcpci_set_rx_enable(struct tcpc_device *tcpc, u8 enable)
+{
+ return tcpc->ops->set_rx_enable(tcpc, enable);
+}
+
+static inline int tcpci_get_message(struct tcpc_device *tcpc,
+ u32 *payload, u16 *head,
+ enum tcpm_transmit_type *type)
+{
+ return tcpc->ops->get_message(tcpc, payload, head, type);
+}
+
+static inline int tcpci_transmit(struct tcpc_device *tcpc,
+ enum tcpm_transmit_type type,
+ u16 header, const u32 *data)
+{
+ return tcpc->ops->transmit(tcpc, type, header, data);
+}
+
+static inline int tcpci_set_bist_test_mode(struct tcpc_device *tcpc, bool en)
+{
+ return tcpc->ops->set_bist_test_mode(tcpc, en);
+}
+
+static inline int tcpci_set_bist_carrier_mode(
+ struct tcpc_device *tcpc, u8 pattern)
+{
+ if (pattern) /* wait for GoodCRC */
+ usleep_range(240, 260);
+
+ return tcpc->ops->set_bist_carrier_mode(tcpc, pattern);
+}
+
+#ifdef CONFIG_USB_PD_RETRY_CRC_DISCARD
+static inline int tcpci_retransmit(struct tcpc_device *tcpc)
+{
+ return tcpc->ops->retransmit(tcpc);
+}
+#endif /* CONFIG_USB_PD_RETRY_CRC_DISCARD */
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+static inline int tcpci_notify_typec_state(
+ struct tcpc_device *tcpc)
+{
+ struct pd_dpm_typec_state typec_state;
+
+ typec_state.polarity = tcpc->typec_polarity;
+ typec_state.old_state = tcpc->typec_attach_old;
+ typec_state.new_state = tcpc->typec_attach_new;
+
+ pd_dpm_handle_pe_event(PD_DPM_PE_EVT_TYPEC_STATE, &typec_state);
+ return 0;
+}
+
+static inline int tcpci_notify_role_swap(
+ struct tcpc_device *tcpc, u8 event, u8 role)
+{
+#if 1
+ u8 dpm_event;
+ struct pd_dpm_swap_state swap_state;
+
+ switch (event) {
+ case TCP_NOTIFY_DR_SWAP:
+ dpm_event = PD_DPM_PE_EVT_DR_SWAP;
+ break;
+ case TCP_NOTIFY_PR_SWAP:
+ dpm_event = PD_DPM_PE_EVT_PR_SWAP;
+ break;
+ case TCP_NOTIFY_VCONN_SWAP:
+ dpm_event = PD_DPM_PE_EVT_VCONN_SWAP;
+ break;
+ default:
+ return 0;
+ }
+
+ swap_state.new_role = role;
+ return pd_dpm_handle_pe_event(event, &swap_state);
+#else
+ return 0;
+#endif
+}
+
+static inline int tcpci_notify_pd_state(
+ struct tcpc_device *tcpc, u8 connect)
+{
+ struct pd_dpm_pd_state pd_state;
+
+ pd_state.connected = connect;
+ return pd_dpm_handle_pe_event(
+ PD_DPM_PE_EVT_PD_STATE, &pd_state);
+}
+
+static inline int tcpci_disable_vbus_control(struct tcpc_device *tcpc)
+{
+ hisilog_err("%s: !!!++++++++\n", __func__);
+#ifdef CONFIG_TYPEC_USE_DIS_VBUS_CTRL
+ TCPC_DBG("disable_vbus\r\n");
+ pd_dpm_handle_pe_event(PD_DPM_PE_EVT_DIS_VBUS_CTRL, NULL);
+ return 0;
+#else
+ tcpci_sink_vbus(tcpc, TCP_VBUS_CTRL_REMOVE, TCPC_VBUS_SINK_0V, 0);
+ tcpci_source_vbus(tcpc, TCP_VBUS_CTRL_REMOVE, TCPC_VBUS_SOURCE_0V, 0);
+ return 0;
+#endif
+ hisilog_err("%s: !!!-----------\n",
+ __func__);
+}
+
+static inline int tcpci_source_vbus(
+ struct tcpc_device *tcpc, u8 type, int mv, int ma)
+{
+ struct pd_dpm_vbus_state vbus_state;
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ if (type >= TCP_VBUS_CTRL_PD && tcpc->pd_port.pd_prev_connected)
+ type |= TCP_VBUS_CTRL_PD_DETECT;
+#endif
+
+ if (ma < 0) {
+ if (mv != 0) {
+ switch (tcpc->typec_local_rp_level) {
+ case TYPEC_CC_RP_1_5:
+ ma = 1500;
+ break;
+ case TYPEC_CC_RP_3_0:
+ ma = 3000;
+ break;
+ default:
+ case TYPEC_CC_RP_DFT:
+ ma = 500;
+ break;
+ }
+ } else {
+ ma = 0;
+ }
+ }
+
+ vbus_state.ma = ma;
+ vbus_state.mv = mv;
+ vbus_state.vbus_type = type;
+
+ TCPC_DBG("source_vbus: %d mV, %d mA\r\n", ma, mv);
+ pd_dpm_handle_pe_event(PD_DPM_PE_EVT_SOURCE_VBUS, &vbus_state);
+ return 0;
+}
+
+static inline int tcpci_sink_vbus(
+ struct tcpc_device *tcpc, u8 type, int mv, int ma)
+{
+ struct pd_dpm_vbus_state vbus_state;
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ if (type >= TCP_VBUS_CTRL_PD && tcpc->pd_port.pd_prev_connected)
+ type |= TCP_VBUS_CTRL_PD_DETECT;
+#endif
+
+ if (ma < 0) {
+ if (mv != 0) {
+ switch (tcpc->typec_remote_rp_level) {
+ case TYPEC_CC_VOLT_SNK_1_5:
+ ma = 1500;
+ break;
+ case TYPEC_CC_VOLT_SNK_3_0:
+ ma = 3000;
+ break;
+ default:
+ case TYPEC_CC_VOLT_SNK_DFT:
+ ma = 500;
+ break;
+ }
+ } else {
+ ma = 0;
+ }
+ }
+
+ vbus_state.ma = ma;
+ vbus_state.mv = mv;
+ vbus_state.vbus_type = type;
+
+ TCPC_DBG("sink_vbus: %d mV, %d mA\r\n", ma, mv);
+ pd_dpm_handle_pe_event(PD_DPM_PE_EVT_SINK_VBUS, &vbus_state);
+ return 0;
+}
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+static inline int tcpci_enter_mode(struct tcpc_device *tcpc,
+ u16 svid, u8 ops, u32 mode)
+{
+ /* DFP_U : DisplayPort Mode, USB Configuration */
+ TCPC_INFO("EnterMode\r\n");
+ return 0;
+}
+
+static inline int tcpci_exit_mode(
+ struct tcpc_device *tcpc, u16 svid)
+{
+ TCPC_INFO("ExitMode\r\n");
+ return 0;
+}
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+#endif /* #ifndef __LINUX_RT_TCPC_H */
diff --git a/include/linux/hisi/usb/pd/richtek/tcpci_config.h b/include/linux/hisi/usb/pd/richtek/tcpci_config.h
new file mode 100644
index 000000000000..52037d2b1bd4
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/tcpci_config.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_TCPC_CONFIG_H
+#define __LINUX_TCPC_CONFIG_H
+
+/* default config */
+
+#define CONFIG_RT_REGMAP
+
+#define CONFIG_TYPEC_USE_DIS_VBUS_CTRL
+#define CONFIG_TYPEC_POWER_CTRL_INIT
+
+#define CONFIG_TYPEC_CAP_TRY_SOURCE
+#define CONFIG_TYPEC_CAP_TRY_SINK
+
+#define CONFIG_TYPEC_CAP_DBGACC_SNK
+#define CONFIG_TYPEC_CAP_CUSTOM_SRC
+
+#define CONFIG_TYPEC_ATTACHED_SRC_SAFE0V_TIMEOUT
+
+#define CONFIG_TYPEC_CHECK_LEGACY_CABLE
+
+#define CONFIG_TYPEC_CAP_RA_DETACH
+#define CONFIG_TYPEC_CAP_LPM_WAKEUP_WATCHDOG
+
+#define CONFIG_TYPEC_CAP_POWER_OFF_CHARGE
+
+#define CONFIG_TCPC_VSAFE0V_DETECT
+#define CONFIG_TCPC_VSAFE0V_DETECT_IC
+#define CONFIG_TCPC_LOW_POWER_MODE
+#define CONFIG_TCPC_CLOCK_GATING
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+
+#define CONFIG_USB_PD_SRC_STARTUP_DISCOVER_ID
+#define CONFIG_USB_PD_DFP_READY_DISCOVER_ID
+
+#define CONFIG_USB_PD_ATTEMP_DISCOVER_ID
+#define CONFIG_USB_PD_ATTEMP_DISCOVER_SVID
+
+#define CONFIG_USB_PD_CUSTOM_DBGACC
+
+#define CONFIG_USB_PD_SNK_DFT_NO_GOOD_CRC
+
+#define CONFIG_USB_PD_IGNORE_HRESET_COMPLETE_TIMER
+#define CONFIG_USB_PD_DROP_REPEAT_PING
+#define CONFIG_USB_PD_RETRY_CRC_DISCARD
+#define CONFIG_USB_PD_TRANSMIT_BIST2
+#define CONFIG_USB_PD_POSTPONE_VDM
+#define CONFIG_USB_PD_POSTPONE_RETRY_VDM
+#define CONFIG_USB_PD_POSTPONE_FIRST_VDM
+#define CONFIG_USB_PD_POSTPONE_OTHER_VDM
+#define CONFIG_USB_PD_SAFE0V_TIMEOUT
+
+#ifndef CONFIG_USB_PD_DFP_FLOW_RETRY_MAX
+#define CONFIG_USB_PD_DFP_FLOW_RETRY_MAX 2
+#endif /* CONFIG_USB_PD_DFP_FLOW_RETRY_MAX */
+
+#ifndef CONFIG_USB_PD_VBUS_STABLE_TOUT
+#define CONFIG_USB_PD_VBUS_STABLE_TOUT 125
+#endif /* CONFIG_USB_PD_VBUS_STABLE_TOUT */
+
+#ifndef CONFIG_USB_PD_VBUS_PRESENT_TOUT
+#define CONFIG_USB_PD_VBUS_PRESENT_TOUT 20
+#endif /* CONFIG_USB_PD_VBUS_PRESENT_TOUT */
+
+#endif /* CONFIG_USB_POWER_DELIVERY */
+#endif /* __LINUX_TCPC_CONFIG_H */
diff --git a/include/linux/hisi/usb/pd/richtek/tcpci_core.h b/include/linux/hisi/usb/pd/richtek/tcpci_core.h
new file mode 100644
index 000000000000..e7d9a321a260
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/tcpci_core.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_RT_TCPCI_CORE_H
+#define __LINUX_RT_TCPCI_CORE_H
+
+#include <linux/device.h>
+#include <linux/hrtimer.h>
+#include <linux/workqueue.h>
+#include <linux/notifier.h>
+#include <linux/semaphore.h>
+
+#include <linux/hisi/usb/pd/richtek/tcpm.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_timer.h>
+#include <linux/hisi/usb/pd/richtek/tcpci_config.h>
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+#include <linux/hisi/usb/pd/richtek/pd_core.h>
+#endif
+
+/* The switch of log message */
+#define TYPEC_INFO_ENABLE 1
+#define PE_EVENT_DBG_ENABLE 0
+#define PE_STATE_INFO_ENABLE 0
+#define TCPC_INFO_ENABLE 1
+#define TCPC_TIMER_DBG_EN 0
+#define TCPC_TIMER_INFO_EN 0
+#define PE_INFO_ENABLE 1
+#define TCPC_DBG_ENABLE 0
+#define DPM_DBG_ENABLE 0
+#define PD_ERR_ENABLE 1
+#define PE_DBG_ENABLE 0
+#define TYPEC_DBG_ENABLE 0
+
+#define TCPC_ENABLE_ANYMSG (TCPC_DBG_ENABLE | DPM_DBG_ENABLE | \
+ PD_ERR_ENABLE | PE_INFO_ENABLE | TCPC_TIMER_INFO_EN\
+ | PE_DBG_ENABLE | PE_EVENT_DBG_ENABLE | \
+ PE_STATE_INFO_ENABLE | TCPC_INFO_ENABLE | \
+ TCPC_TIMER_DBG_EN | TYPEC_DBG_ENABLE | \
+ TYPEC_INFO_ENABLE)
+
+#define PE_EVT_INFO_VDM_DIS 0
+#define PE_DBG_RESET_VDM_DIS 1
+
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+
+struct tcpc_device;
+
+struct tcpc_desc {
+ u8 role_def;
+ u8 rp_lvl;
+ int notifier_supply_num;
+ char *name;
+};
+
+/* TCPC Power Register Define */
+#define TCPC_REG_POWER_STATUS_EXT_VSAFE0V BIT(15) /* extend */
+#define TCPC_REG_POWER_STATUS_VBUS_PRES BIT(2)
+
+/* TCPC Alert Register Define */
+#define TCPC_REG_ALERT_EXT_RA_DETACH (1 << (16 + 5))
+#define TCPC_REG_ALERT_EXT_WATCHDOG (1 << (16 + 2))
+#define TCPC_REG_ALERT_EXT_VBUS_80 (1 << (16 + 1))
+#define TCPC_REG_ALERT_EXT_WAKEUP (1 << (16 + 0))
+
+#define TCPC_REG_ALERT_VBUS_DISCNCT BIT(11)
+#define TCPC_REG_ALERT_RX_BUF_OVF BIT(10)
+#define TCPC_REG_ALERT_FAULT BIT(9)
+#define TCPC_REG_ALERT_V_ALARM_LO BIT(8)
+#define TCPC_REG_ALERT_V_ALARM_HI BIT(7)
+#define TCPC_REG_ALERT_TX_SUCCESS BIT(6)
+#define TCPC_REG_ALERT_TX_DISCARDED BIT(5)
+#define TCPC_REG_ALERT_TX_FAILED BIT(4)
+#define TCPC_REG_ALERT_RX_HARD_RST BIT(3)
+#define TCPC_REG_ALERT_RX_STATUS BIT(2)
+#define TCPC_REG_ALERT_POWER_STATUS BIT(1)
+#define TCPC_REG_ALERT_CC_STATUS BIT(0)
+#define TCPC_REG_ALERT_TX_COMPLETE (TCPC_REG_ALERT_TX_SUCCESS | \
+ TCPC_REG_ALERT_TX_DISCARDED | \
+ TCPC_REG_ALERT_TX_FAILED)
+
+/* TCPC Behavior Flags */
+#define TCPC_FLAGS_RETRY_CRC_DISCARD BIT(0)
+#define TCPC_FLAGS_WAIT_HRESET_COMPLETE BIT(1)
+#define TCPC_FLAGS_CHECK_CC_STABLE BIT(2)
+#define TCPC_FLAGS_LPM_WAKEUP_WATCHDOG BIT(3)
+#define TCPC_FLAGS_CHECK_RA_DETACHE BIT(4)
+
+enum tcpc_cc_pull {
+ TYPEC_CC_RA = 0,
+ TYPEC_CC_RP = 1,
+ TYPEC_CC_RD = 2,
+ TYPEC_CC_OPEN = 3,
+ TYPEC_CC_DRP = 4, /* from Rd */
+
+ TYPEC_CC_RP_DFT = 1, /* 0x00 + 1 */
+ TYPEC_CC_RP_1_5 = 9, /* 0x08 + 1*/
+ TYPEC_CC_RP_3_0 = 17, /* 0x10 + 1 */
+
+ TYPEC_CC_DRP_DFT = 4, /* 0x00 + 4 */
+ TYPEC_CC_DRP_1_5 = 12, /* 0x08 + 4 */
+ TYPEC_CC_DRP_3_0 = 20, /* 0x10 + 4 */
+};
+
+#define TYPEC_CC_PULL_GET_RES(pull) ((pull) & 0x07)
+#define TYPEC_CC_PULL_GET_RP_LVL(pull) (((pull) & 0x18) >> 3)
+
+enum tcpm_transmit_type {
+ TCPC_TX_SOP = 0,
+ TCPC_TX_SOP_PRIME = 1,
+ TCPC_TX_SOP_PRIME_PRIME = 2,
+ TCPC_TX_SOP_DEBUG_PRIME = 3,
+ TCPC_TX_SOP_DEBUG_PRIME_PRIME = 4,
+ TCPC_TX_HARD_RESET = 5,
+ TCPC_TX_CABLE_RESET = 6,
+ TCPC_TX_BIST_MODE_2 = 7
+};
+
+enum tcpm_rx_cap_type {
+ TCPC_RX_CAP_SOP = 1 << 0,
+ TCPC_RX_CAP_SOP_PRIME = 1 << 1,
+ TCPC_RX_CAP_SOP_PRIME_PRIME = 1 << 2,
+ TCPC_RX_CAP_SOP_DEBUG_PRIME = 1 << 3,
+ TCPC_RX_CAP_SOP_DEBUG_PRIME_PRIME = 1 << 4,
+ TCPC_RX_CAP_HARD_RESET = 1 << 5,
+ TCPC_RX_CAP_CABLE_RESET = 1 << 6,
+};
+
+struct tcpc_ops {
+ int (*init)(struct tcpc_device *tcpc, bool sw_reset);
+ int (*alert_status_clear)(struct tcpc_device *tcpc, u32 mask);
+ int (*fault_status_clear)(struct tcpc_device *tcpc, u8 status);
+ int (*get_alert_status)(struct tcpc_device *tcpc, u32 *alert);
+ int (*get_power_status)(struct tcpc_device *tcpc, u16 *pwr_status);
+ int (*get_fault_status)(struct tcpc_device *tcpc, u8 *status);
+ int (*get_cc)(struct tcpc_device *tcpc, int *cc1, int *cc2);
+ int (*set_cc)(struct tcpc_device *tcpc, int pull);
+ int (*set_polarity)(struct tcpc_device *tcpc, int polarity);
+ int (*set_vconn)(struct tcpc_device *tcpc, int enable);
+
+#ifdef CONFIG_TCPC_LOW_POWER_MODE
+ int (*set_low_power_mode)(struct tcpc_device *tcpc, bool en, int pull);
+#endif /* CONFIG_TCPC_LOW_POWER_MODE */
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ int (*set_msg_header)(struct tcpc_device *tcpc,
+ int power_role, int data_role);
+ int (*set_rx_enable)(struct tcpc_device *tcpc, u8 enable);
+ int (*get_message)(struct tcpc_device *tcpc, u32 *payload,
+ u16 *head, enum tcpm_transmit_type *type);
+ int (*transmit)(struct tcpc_device *tcpc,
+ enum tcpm_transmit_type type,
+ u16 header, const u32 *data);
+ int (*set_bist_test_mode)(struct tcpc_device *tcpc, bool en);
+ int (*set_bist_carrier_mode)(struct tcpc_device *tcpc, u8 pattern);
+
+#ifdef CONFIG_USB_PD_RETRY_CRC_DISCARD
+ int (*retransmit)(struct tcpc_device *tcpc);
+#endif /* CONFIG_USB_PD_RETRY_CRC_DISCARD */
+#endif /* CONFIG_USB_POWER_DELIVERY */
+};
+
+#define TCPC_VBUS_SOURCE_0V (0)
+#define TCPC_VBUS_SOURCE_5V (5000)
+
+#define TCPC_VBUS_SINK_0V (0)
+#define TCPC_VBUS_SINK_5V (5000)
+
+#define TCPC_LEGACY_CABLE_CONFIRM 50
+
+struct tcpc_device {
+ struct i2c_client *client;
+ struct tcpc_ops *ops;
+ void *drv_data;
+ struct tcpc_desc desc;
+ struct device dev;
+ struct wakeup_source attach_wake_lock;
+ struct wakeup_source dettach_temp_wake_lock;
+ /* For tcpc timer & event */
+ u32 timer_handle_index;
+ struct hrtimer tcpc_timer[PD_TIMER_NR];
+
+ ktime_t last_expire[PD_TIMER_NR];
+ struct delayed_work timer_handle_work[2];
+ struct mutex access_lock;
+ struct mutex typec_lock;
+ struct mutex timer_lock;
+ struct semaphore timer_enable_mask_lock;
+ struct semaphore timer_tick_lock;
+ atomic_t pending_event;
+ u64 timer_tick;
+ u64 timer_enable_mask;
+ wait_queue_head_t event_loop_wait_que;
+ wait_queue_head_t timer_wait_que;
+ struct task_struct *event_task;
+ struct task_struct *timer_task;
+ bool timer_thead_stop;
+ bool event_loop_thead_stop;
+
+ struct delayed_work init_work;
+ struct srcu_notifier_head evt_nh;
+
+ /* For TCPC TypeC */
+ u8 typec_state;
+ u8 typec_role;
+ u8 typec_attach_old;
+ u8 typec_attach_new;
+ u8 typec_local_cc;
+ u8 typec_local_rp_level;
+ u8 typec_remote_cc[2];
+ u8 typec_remote_rp_level;
+ u8 typec_wait_ps_change;
+ bool typec_polarity;
+ bool typec_drp_try_timeout;
+ bool typec_lpm;
+ bool typec_cable_only;
+ bool typec_power_ctrl;
+
+#ifdef CONFIG_TYPEC_CHECK_LEGACY_CABLE
+ bool typec_legacy_cable;
+ u8 typec_legacy_cable_suspect;
+#endif /* CONFIG_TYPEC_CHECK_LEGACY_CABLE */
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+ /* Event */
+ u8 pd_event_count;
+ u8 pd_event_head_index;
+ u8 pd_msg_buffer_allocated;
+
+ u8 pd_last_vdm_msg_id;
+ bool pd_pending_vdm_event;
+ bool pd_pending_vdm_good_crc;
+ bool pd_postpone_vdm_timeout;
+
+ pd_msg_t pd_last_vdm_msg;
+ pd_event_t pd_vdm_event;
+
+ pd_msg_t pd_msg_buffer[PD_MSG_BUF_SIZE];
+ pd_event_t pd_event_ring_buffer[PD_EVENT_BUF_SIZE];
+
+ bool pd_wait_pe_idle;
+ bool pd_hard_reset_event_pending;
+ bool pd_wait_hard_reset_complete;
+ bool pd_wait_pr_swap_complete;
+ bool pd_wait_error_recovery;
+ bool pd_ping_event_pending;
+ u8 pd_bist_mode;
+ u8 pd_transmit_state;
+ int pd_wait_vbus_once;
+
+#ifdef CONFIG_USB_PD_RETRY_CRC_DISCARD
+ bool pd_discard_pending;
+#endif
+
+ u8 tcpc_flags;
+
+ pd_port_t pd_port;
+#endif /* CONFIG_USB_POWER_DELIVERY */
+ u8 vbus_level:2;
+ u8 irq_enabled:1;
+
+ struct notifier_block dpm_nb;
+};
+
+#define to_tcpc_device(obj) container_of(obj, struct tcpc_device, dev)
+
+#define RT_DBG_INFO hisilog_info
+#define RT_DBG_ERR hisilog_err
+
+#if TYPEC_DBG_ENABLE
+#define TYPEC_DBG(format, args...) \
+ RT_DBG_INFO("[TPC-D]" format, ##args)
+#else
+#define TYPEC_DBG(format, args...)
+#endif /* TYPEC_DBG_ENABLE */
+
+#if TYPEC_INFO_ENABLE
+#define TYPEC_INFO(format, args...) \
+ RT_DBG_INFO("TPC-I:" format, ##args)
+#else
+#define TYPEC_INFO(format, args...)
+#endif /* TYPEC_INFO_ENABLE */
+
+#if TCPC_INFO_ENABLE
+#define TCPC_INFO(format, args...) \
+ RT_DBG_INFO("[TCPC-I]" format, ##args)
+#else
+#define TCPC_INFO(foramt, args...)
+#endif /* TCPC_INFO_ENABLE */
+
+#if TCPC_DBG_ENABLE
+#define TCPC_DBG(format, args...) \
+ RT_DBG_INFO("[TCPC-D]" format, ##args)
+#else
+#define TCPC_DBG(format, args...)
+#endif /* TCPC_DBG_ENABLE */
+
+#define TCPC_ERR(format, args...) \
+ RT_DBG_ERR("[TCPC-E]" format, ##args)
+
+#define DP_ERR(format, args...) \
+ RT_DBG_ERR("[DP-E]" format, ##args)
+
+#if DPM_DBG_ENABLE
+#define DPM_DBG(format, args...) \
+ RT_DBG_INFO("DPM-D:" format, ##args)
+#else
+#define DPM_DBG(format, args...)
+#endif /* DPM_DBG_ENABLE */
+
+#if PD_ERR_ENABLE
+#define PD_ERR(format, args...) \
+ RT_DBG_ERR("PD-E:" format, ##args)
+#else
+#define PD_ERR(format, args...)
+#endif /* PD_ERR_ENABLE */
+
+#if PE_INFO_ENABLE
+#define PE_INFO(format, args...) \
+ RT_DBG_INFO("PE:" format, ##args)
+#else
+#define PE_INFO(format, args...)
+#endif /* PE_INFO_ENABLE */
+
+#if PE_EVENT_DBG_ENABLE
+#define PE_EVT_INFO(format, args...) \
+ RT_DBG_INFO("PE-E:" format, ##args)
+#else
+#define PE_EVT_INFO(format, args...)
+#endif /* PE_EVENT_DBG_ENABLE */
+
+#if PE_DBG_ENABLE
+#define PE_DBG(format, args...) \
+ RT_DBG_INFO("PE:" format, ##args)
+#else
+#define PE_DBG(format, args...)
+#endif /* PE_DBG_ENABLE */
+
+#if PE_STATE_INFO_ENABLE
+#define PE_STATE_INFO(format, args...) \
+ RT_DBG_INFO("PE:" format, ##args)
+#else
+#define PE_STATE_INFO(format, args...)
+#endif /* PE_STATE_IFNO_ENABLE */
+
+#endif /* #ifndef __LINUX_RT_TCPCI_CORE_H */
diff --git a/include/linux/hisi/usb/pd/richtek/tcpci_event.h b/include/linux/hisi/usb/pd/richtek/tcpci_event.h
new file mode 100644
index 000000000000..d2d9b39b06ad
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/tcpci_event.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef TCPC_EVENT_BUF_H_INCLUDED
+#define TCPC_EVENT_BUF_H_INCLUDED
+
+#include <linux/hisi/usb/pd/richtek/tcpci_timer.h>
+
+#define PD_MSG_BUF_SIZE (4 * 2)
+#define PD_EVENT_BUF_SIZE (8 * 2)
+
+struct tcpc_device;
+typedef struct __pd_port pd_port_t;
+
+typedef struct __pd_msg {
+ u8 frame_type;
+ u16 msg_hdr;
+ u32 payload[7];
+ unsigned long time_stamp;
+} pd_msg_t;
+
+typedef struct __pd_event {
+ u8 event_type;
+ u8 msg;
+ u8 msg_sec;
+ pd_msg_t *pd_msg;
+} pd_event_t;
+
+pd_msg_t *pd_alloc_msg(struct tcpc_device *tcpc_dev);
+void pd_free_msg(struct tcpc_device *tcpc_dev, pd_msg_t *pd_msg);
+
+bool pd_get_event(struct tcpc_device *tcpc_dev, pd_event_t *pd_event);
+bool pd_put_event(struct tcpc_device *tcpc_dev,
+ const pd_event_t *pd_event, bool from_port_partner);
+void pd_free_event(struct tcpc_device *tcpc_dev, pd_event_t *pd_event);
+void pd_event_buf_reset(struct tcpc_device *tcpc_dev);
+
+bool pd_get_vdm_event(struct tcpc_device *tcpc_dev, pd_event_t *pd_event);
+bool pd_put_vdm_event(struct tcpc_device *tcpc_dev,
+ pd_event_t *pd_event, bool from_port_partner);
+
+bool pd_put_last_vdm_event(struct tcpc_device *tcpc_dev);
+
+int tcpci_event_init(struct tcpc_device *tcpc_dev);
+int tcpci_event_deinit(struct tcpc_device *tcpc_dev);
+
+void pd_put_cc_detached_event(struct tcpc_device *tcpc_dev);
+void pd_put_recv_hard_reset_event(struct tcpc_device *tcpc_dev);
+void pd_put_sent_hard_reset_event(struct tcpc_device *tcpc_dev);
+bool pd_put_pd_msg_event(struct tcpc_device *tcpc_dev, pd_msg_t *pd_msg);
+void pd_put_hard_reset_completed_event(struct tcpc_device *tcpc_dev);
+void pd_put_vbus_changed_event(struct tcpc_device *tcpc_dev, bool from_ic);
+void pd_put_vbus_safe0v_event(struct tcpc_device *tcpc_dev);
+void pd_put_vbus_stable_event(struct tcpc_device *tcpc_dev);
+void pd_put_vbus_present_event(struct tcpc_device *tcpc_dev);
+
+enum pd_event_type {
+ PD_EVT_PD_MSG = 0, /* either ctrl msg or data msg */
+ PD_EVT_CTRL_MSG,
+ PD_EVT_DATA_MSG,
+
+ PD_EVT_DPM_MSG,
+ PD_EVT_HW_MSG,
+ PD_EVT_PE_MSG,
+ PD_EVT_TIMER_MSG,
+};
+
+/* Control Message type */
+enum pd_ctrl_msg_type {
+ /* 0 Reserved */
+ PD_CTRL_GOOD_CRC = 1,
+ PD_CTRL_GOTO_MIN = 2,
+ PD_CTRL_ACCEPT = 3,
+ PD_CTRL_REJECT = 4,
+ PD_CTRL_PING = 5,
+ PD_CTRL_PS_RDY = 6,
+ PD_CTRL_GET_SOURCE_CAP = 7,
+ PD_CTRL_GET_SINK_CAP = 8,
+ PD_CTRL_DR_SWAP = 9,
+ PD_CTRL_PR_SWAP = 10,
+ PD_CTRL_VCONN_SWAP = 11,
+ PD_CTRL_WAIT = 12,
+ PD_CTRL_SOFT_RESET = 13,
+ /* 14-15 Reserved */
+ PD_CTRL_MSG_NR,
+};
+
+/* Data message type */
+enum pd_data_msg_type {
+ /* 0 Reserved */
+ PD_DATA_SOURCE_CAP = 1,
+ PD_DATA_REQUEST = 2,
+ PD_DATA_BIST = 3,
+ PD_DATA_SINK_CAP = 4,
+ /* 5-14 Reserved */
+ PD_DATA_VENDOR_DEF = 15,
+ PD_DATA_MSG_NR,
+};
+
+/* HW Message type */
+enum pd_hw_msg_type {
+ PD_HW_CC_DETACHED = 0,
+ PD_HW_CC_ATTACHED,
+ PD_HW_RECV_HARD_RESET,
+ PD_HW_VBUS_PRESENT,
+ PD_HW_VBUS_ABSENT,
+ PD_HW_VBUS_SAFE0V,
+ PD_HW_VBUS_STABLE,
+ PD_HW_TX_FAILED, /* no good crc or discard */
+ PD_HW_RETRY_VDM, /* discard vdm msg */
+ PD_HW_MSG_NR,
+};
+
+/* PE Message type*/
+enum pd_pe_msg_type {
+ PD_PE_RESET_PRL_COMPLETED = 0,
+ PD_PE_POWER_ROLE_AT_DEFAULT,
+ PD_PE_HARD_RESET_COMPLETED,
+ PD_PE_IDLE,
+ PD_PE_MSG_NR,
+};
+
+/* DPM Message type */
+
+enum pd_dpm_msg_type {
+ PD_DPM_NOTIFIED = 0,
+ PD_DPM_ACK = PD_DPM_NOTIFIED,
+ PD_DPM_NAK,
+
+ PD_DPM_PD_REQUEST,
+ PD_DPM_VDM_REQUEST,
+
+ PD_DPM_DISCOVER_CABLE_ID,
+ PD_DPM_CAP_CHANGED,
+
+ PD_DPM_ERROR_RECOVERY,
+
+ PD_DPM_MSG_NR,
+};
+
+enum pd_dpm_notify_type {
+ PD_DPM_NOTIFY_OK = 0,
+ PD_DPM_NOTIFY_CAP_MISMATCH,
+};
+
+enum pd_dpm_nak_type {
+ PD_DPM_NAK_REJECT = 0,
+ PD_DPM_NAK_WAIT = 1,
+ PD_DPM_NAK_REJECT_INVALID = 2,
+};
+
+enum pd_dpm_pd_request_type {
+ PD_DPM_PD_REQUEST_PR_SWAP = 0,
+ PD_DPM_PD_REQUEST_DR_SWAP,
+ PD_DPM_PD_REQUEST_VCONN_SWAP,
+ PD_DPM_PD_REQUEST_GOTOMIN,
+
+ PD_DPM_PD_REQUEST_SOFTRESET,
+ PD_DPM_PD_REQUEST_HARDRESET,
+
+ PD_DPM_PD_REQUEST_GET_SOURCE_CAP,
+ PD_DPM_PD_REQUEST_GET_SINK_CAP,
+
+ PD_DPM_PD_REQUEST_PW_REQUEST,
+ PD_DPM_PD_REQUEST_BIST_CM2,
+ PD_DPM_PD_REQUEST_NR,
+};
+
+enum pd_tx_transmit_state {
+ PD_TX_STATE_GOOD_CRC = 0,
+ PD_TX_STATE_NO_GOOD_CRC,
+ PD_TX_STATE_DISCARD,
+ PD_TX_STATE_HARD_RESET,
+ PD_TX_STATE_NO_RESPONSE,
+
+ PD_TX_STATE_WAIT,
+ PD_TX_STATE_WAIT_CRC_VDM = PD_TX_STATE_WAIT,
+ PD_TX_STATE_WAIT_CRC_PD,
+ PD_TX_STATE_WAIT_HARD_RESET,
+};
+
+static inline bool pd_event_msg_match(pd_event_t *pd_event,
+ u8 type, uint8_t msg)
+{
+ if (pd_event->event_type != type)
+ return false;
+
+ return (pd_event->msg == msg);
+}
+
+#endif /* TCPC_EVENT_BUF_H_INCLUDED */
diff --git a/include/linux/hisi/usb/pd/richtek/tcpci_timer.h b/include/linux/hisi/usb/pd/richtek/tcpci_timer.h
new file mode 100644
index 000000000000..3a38849c2c17
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/tcpci_timer.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef TCPC_TIMER_H_INCLUDED
+#define TCPC_TIMER_H_INCLUDED
+
+#include <linux/kernel.h>
+
+struct tcpc_device;
+enum {
+#ifdef CONFIG_USB_POWER_DELIVERY
+ PD_TIMER_BIST_CONT_MODE = 0,
+ PD_TIMER_DISCOVER_ID,
+ PD_TIMER_HARD_RESET_COMPLETE,
+ PD_TIMER_NO_RESPONSE,
+ PD_TIMER_PS_HARD_RESET,
+ PD_TIMER_PS_SOURCE_OFF,
+ PD_TIMER_PS_SOURCE_ON,
+ PD_TIMER_PS_TRANSITION,
+ PD_TIMER_SENDER_RESPONSE,
+ PD_TIMER_SINK_ACTIVITY,
+ PD_TIMER_SINK_REQUEST,
+ PD_TIMER_SINK_WAIT_CAP,
+ PD_TIMER_SOURCE_ACTIVITY,
+ PD_TIMER_SOURCE_CAPABILITY,
+ PD_TIMER_SOURCE_START,
+ PD_TIMER_VCONN_ON,
+ PD_TIMER_VDM_MODE_ENTRY,
+ PD_TIMER_VDM_MODE_EXIT,
+ PD_TIMER_VDM_RESPONSE,
+ PD_TIMER_SOURCE_TRANSITION,
+ PD_TIMER_SRC_RECOVER,
+ PD_TIMER_VSAFE0V_DELAY,
+ PD_TIMER_VSAFE0V_TOUT,
+ PD_TIMER_DISCARD,
+ PD_TIMER_VBUS_STABLE,
+ PD_TIMER_VBUS_PRESENT,
+ PD_PE_VDM_POSTPONE,
+ PD_PE_TIMER_END_ID,
+
+ /* TYPEC-RT-TIMER */
+ TYPEC_RT_TIMER_START_ID = PD_PE_TIMER_END_ID,
+ TYPEC_RT_TIMER_PE_IDLE = TYPEC_RT_TIMER_START_ID,
+ TYPEC_RT_TIMER_SAFE0V_DELAY,
+ TYPEC_RT_TIMER_SAFE0V_TOUT,
+
+ /* TYPEC-TRY-TIMER */
+ TYPEC_TRY_TIMER_START_ID,
+ TYPEC_TRY_TIMER_DRP_TRY = TYPEC_TRY_TIMER_START_ID,
+ TYPEC_TRY_TIMER_DRP_TRYWAIT,
+
+ /* TYPEC-DEBOUNCE-TIMER */
+ TYPEC_TIMER_START_ID,
+ TYPEC_TIMER_CCDEBOUNCE = TYPEC_TIMER_START_ID,
+ TYPEC_TIMER_PDDEBOUNCE,
+ TYPEC_TIMER_ERROR_RECOVERY,
+ TYPEC_TIMER_WAKEUP,
+ TYPEC_TIMER_DRP_SRC_TOGGLE,
+#else
+ TYPEC_RT_TIMER_START_ID = 0,
+ TYPEC_RT_TIMER_SAFE0V_DELAY = TYPEC_RT_TIMER_START_ID,
+ TYPEC_RT_TIMER_SAFE0V_TOUT,
+
+ TYPEC_TRY_TIMER_START_ID,
+ TYPEC_TRY_TIMER_DRP_TRY = TYPEC_TRY_TIMER_START_ID,
+ TYPEC_TRY_TIMER_DRP_TRYWAIT,
+
+ TYPEC_TIMER_START_ID,
+ TYPEC_TIMER_CCDEBOUNCE = TYPEC_TIMER_START_ID,
+ TYPEC_TIMER_PDDEBOUNCE,
+ TYPEC_TIMER_WAKEUP,
+ TYPEC_TIMER_DRP_SRC_TOGGLE,
+#endif /* CONFIG_USB_POWER_DELIVERY */
+ PD_TIMER_NR,
+};
+
+int tcpci_timer_init(struct tcpc_device *tcpc);
+int tcpci_timer_deinit(struct tcpc_device *tcpc);
+void tcpc_restart_timer(struct tcpc_device *tcpc, u32 timer_id);
+void tcpc_enable_timer(struct tcpc_device *tcpc, u32 timer_id);
+void tcpc_disable_timer(
+ struct tcpc_device *tcpc, u32 timer_id);
+void tcpc_reset_typec_try_timer(struct tcpc_device *tcpc);
+void tcpc_reset_typec_debounce_timer(struct tcpc_device *tcpc);
+
+void tcpc_reset_pe_timer(struct tcpc_device *tcpc);
+
+#endif /* TCPC_TIMER_H_INCLUDED */
diff --git a/include/linux/hisi/usb/pd/richtek/tcpci_typec.h b/include/linux/hisi/usb/pd/richtek/tcpci_typec.h
new file mode 100644
index 000000000000..9e9d8b266624
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/tcpci_typec.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_TCPCI_TYPEC_H
+#define __LINUX_TCPCI_TYPEC_H
+#include <linux/hisi/usb/pd/richtek/tcpci.h>
+
+struct tcpc_device;
+
+/******************************************************************************
+ * Call following function to trigger TYPEC Connection State Change
+ *
+ * 1. H/W -> CC/PS Change.
+ * 2. Timer -> CCDebounce or PDDebounce or others Timeout
+ * 3. Policy Engine -> PR_SWAP, Error_Recovery, PE_Idle
+ *****************************************************************************/
+
+int tcpc_typec_handle_cc_change(
+ struct tcpc_device *tcpc_dev);
+
+int tcpc_typec_handle_ps_change(
+ struct tcpc_device *tcpc_dev, int vbus_level);
+
+int tcpc_typec_handle_timeout(
+ struct tcpc_device *tcpc_dev, u32 timer_id);
+
+int tcpc_typec_handle_vsafe0v(struct tcpc_device *tcpc_dev);
+
+int tcpc_typec_set_rp_level(struct tcpc_device *tcpc_dev, u8 res);
+
+int tcpc_typec_change_role(
+ struct tcpc_device *tcpc_dev, u8 typec_role);
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+int tcpc_typec_advertise_explicit_contract(struct tcpc_device *tcpc_dev);
+int tcpc_typec_handle_pe_pr_swap(struct tcpc_device *tcpc_dev);
+#else
+int tcpc_typec_swap_role(struct tcpc_device *tcpc_dev);
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+#endif /* #ifndef __LINUX_TCPCI_TYPEC_H */
diff --git a/include/linux/hisi/usb/pd/richtek/tcpm.h b/include/linux/hisi/usb/pd/richtek/tcpm.h
new file mode 100644
index 000000000000..af3316cb50f8
--- /dev/null
+++ b/include/linux/hisi/usb/pd/richtek/tcpm.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright (C) 2016 Richtek Technology Corp.
+ *
+ * Author: TH <tsunghan_tsai@richtek.com>
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef TCPM_H_
+#define TCPM_H_
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+
+#include <linux/hisi/usb/pd/richtek/tcpci_config.h>
+
+#include <linux/hisi/usb/hisi_pd_dev.h>
+#include <linux/hisi/log/hisi_log.h>
+
+#ifndef HISILOG_TAG
+#define HISILOG_TAG hisi_pd
+HISILOG_REGIST();
+#endif
+
+struct tcpc_device;
+
+/*
+ * Type-C Port Notify Chain
+ */
+
+enum typec_attach_type {
+ TYPEC_UNATTACHED = 0,
+ TYPEC_ATTACHED_SNK,
+ TYPEC_ATTACHED_SRC,
+ TYPEC_ATTACHED_AUDIO,
+ TYPEC_ATTACHED_DEBUG,
+
+#ifdef CONFIG_TYPEC_CAP_DBGACC_SNK
+ TYPEC_ATTACHED_DBGACC_SNK, /* Rp, Rp */
+#endif /* CONFIG_TYPEC_CAP_DBGACC_SNK */
+
+#ifdef CONFIG_TYPEC_CAP_CUSTOM_SRC
+ TYPEC_ATTACHED_CUSTOM_SRC, /* Same Rp */
+#endif /* CONFIG_TYPEC_CAP_CUSTOM_SRC */
+};
+
+enum dpm_request_state {
+ DPM_REQ_NULL,
+ DPM_REQ_QUEUE,
+ DPM_REQ_RUNNING,
+ DPM_REQ_SUCCESS,
+ DPM_REQ_FAILED,
+
+ /* Request failed */
+
+ DPM_REQ_ERR_IDLE = DPM_REQ_FAILED,
+
+ DPM_REQ_ERR_NOT_READY,
+ DPM_REQ_ERR_WRONG_ROLE,
+
+ DPM_REQ_ERR_RECV_HRESET,
+ DPM_REQ_ERR_RECV_SRESET,
+ DPM_REQ_ERR_SEND_HRESET,
+ DPM_REQ_ERR_SEND_SRESET,
+ DPM_REQ_ERR_SEND_BIST,
+
+ /* Internal */
+ DPM_REQ_SUCCESS_CODE,
+
+ DPM_REQ_E_UVDM_ACK,
+ DPM_REQ_E_UVDM_NAK,
+};
+
+/* Power role */
+#define PD_ROLE_SINK 0
+#define PD_ROLE_SOURCE 1
+
+/* Data role */
+#define PD_ROLE_UFP 0
+#define PD_ROLE_DFP 1
+
+/* Vconn role */
+#define PD_ROLE_VCONN_OFF 0
+#define PD_ROLE_VCONN_ON 1
+
+enum {
+ TCP_NOTIFY_DIS_VBUS_CTRL,
+ TCP_NOTIFY_SOURCE_VCONN,
+ TCP_NOTIFY_SOURCE_VBUS,
+ TCP_NOTIFY_SINK_VBUS,
+ TCP_NOTIFY_PR_SWAP,
+ TCP_NOTIFY_DR_SWAP,
+ TCP_NOTIFY_VCONN_SWAP,
+ TCP_NOTIFY_ENTER_MODE,
+ TCP_NOTIFY_EXIT_MODE,
+ TCP_NOTIFY_AMA_DP_STATE,
+ TCP_NOTIFY_AMA_DP_ATTENTION,
+ TCP_NOTIFY_AMA_DP_HPD_STATE,
+
+ TCP_NOTIFY_TYPEC_STATE,
+ TCP_NOTIFY_PD_STATE,
+};
+
+struct tcp_ny_pd_state {
+ u8 connected;
+};
+
+struct tcp_ny_swap_state {
+ u8 new_role;
+};
+
+struct tcp_ny_enable_state {
+ bool en;
+};
+
+struct tcp_ny_typec_state {
+ u8 rp_level;
+ u8 polarity;
+ u8 old_state;
+ u8 new_state;
+};
+
+enum {
+ TCP_VBUS_CTRL_REMOVE = 0,
+ TCP_VBUS_CTRL_TYPEC = 1,
+ TCP_VBUS_CTRL_PD = 2,
+
+ TCP_VBUS_CTRL_HRESET = TCP_VBUS_CTRL_PD,
+ TCP_VBUS_CTRL_PR_SWAP = 3,
+ TCP_VBUS_CTRL_REQUEST = 4,
+
+ TCP_VBUS_CTRL_PD_DETECT = (1 << 7),
+
+ TCP_VBUS_CTRL_PD_HRESET =
+ TCP_VBUS_CTRL_HRESET | TCP_VBUS_CTRL_PD_DETECT,
+
+ TCP_VBUS_CTRL_PD_PR_SWAP =
+ TCP_VBUS_CTRL_PR_SWAP | TCP_VBUS_CTRL_PD_DETECT,
+
+ TCP_VBUS_CTRL_PD_REQUEST =
+ TCP_VBUS_CTRL_REQUEST | TCP_VBUS_CTRL_PD_DETECT,
+};
+
+struct tcp_ny_vbus_state {
+ int mv;
+ int ma;
+ u8 type;
+};
+
+struct tcp_ny_mode_ctrl {
+ u16 svid;
+ u8 ops;
+ u32 mode;
+};
+
+enum {
+ SW_USB = 0,
+ SW_DFP_D,
+ SW_UFP_D,
+};
+
+struct tcp_ny_ama_dp_state {
+ u8 sel_config;
+ u8 signal;
+ u8 pin_assignment;
+ u8 polarity;
+ u8 active;
+};
+
+enum {
+ TCP_DP_UFP_U_MASK = 0x7C,
+ TCP_DP_UFP_U_POWER_LOW = 1 << 2,
+ TCP_DP_UFP_U_ENABLED = 1 << 3,
+ TCP_DP_UFP_U_MF_PREFER = 1 << 4,
+ TCP_DP_UFP_U_USB_CONFIG = 1 << 5,
+ TCP_DP_UFP_U_EXIT_MODE = 1 << 6,
+};
+
+struct tcp_ny_ama_dp_attention {
+ u8 state;
+};
+
+struct tcp_ny_ama_dp_hpd_state {
+ bool irq : 1;
+ bool state : 1;
+};
+
+struct tcp_ny_uvdm {
+ bool ack;
+ u8 uvdm_cnt;
+ u16 uvdm_svid;
+ u32 *uvdm_data;
+};
+
+struct tcp_notify {
+ union {
+ struct tcp_ny_enable_state en_state;
+ struct tcp_ny_vbus_state vbus_state;
+ struct tcp_ny_typec_state typec_state;
+ struct tcp_ny_swap_state swap_state;
+ struct tcp_ny_pd_state pd_state;
+ struct tcp_ny_mode_ctrl mode_ctrl;
+ struct tcp_ny_ama_dp_state ama_dp_state;
+ struct tcp_ny_ama_dp_attention ama_dp_attention;
+ struct tcp_ny_ama_dp_hpd_state ama_dp_hpd_state;
+ struct tcp_ny_uvdm uvdm_msg;
+ };
+};
+
+struct tcpc_device *tcpc_dev_get_by_name(const char *name);
+
+int register_tcp_dev_notifier(
+ struct tcpc_device *tcp_dev,
+ struct notifier_block *nb);
+int unregister_tcp_dev_notifier(
+ struct tcpc_device *tcp_dev,
+ struct notifier_block *nb);
+
+struct tcpc_device *notify_tcp_dev_ready(const char *name);
+
+/*
+ * Type-C Port Control I/F
+ */
+
+enum tcpm_error_list {
+ TCPM_SUCCESS = 0,
+ TCPM_ERROR_UNKNOWN = -1,
+ TCPM_ERROR_UNATTACHED = -2,
+ TCPM_ERROR_PARAMETER = -3,
+ TCPM_ERROR_PUT_EVENT = -4,
+};
+
+#define TCPM_PDO_MAX_SIZE 7
+
+struct tcpm_power_cap {
+ u8 cnt;
+ u32 pdos[TCPM_PDO_MAX_SIZE];
+};
+
+/* Inquire TCPM status */
+
+enum tcpc_cc_voltage_status {
+ TYPEC_CC_VOLT_OPEN = 0,
+ TYPEC_CC_VOLT_RA = 1,
+ TYPEC_CC_VOLT_RD = 2,
+
+ TYPEC_CC_VOLT_SNK_DFT = 5,
+ TYPEC_CC_VOLT_SNK_1_5 = 6,
+ TYPEC_CC_VOLT_SNK_3_0 = 7,
+
+ TYPEC_CC_DRP_TOGGLING = 15,
+};
+
+enum tcpm_vbus_level {
+#ifdef CONFIG_TCPC_VSAFE0V_DETECT
+ TCPC_VBUS_SAFE0V = 0,
+ TCPC_VBUS_INVALID,
+ TCPC_VBUS_VALID,
+#else
+ TCPC_VBUS_INVALID = 0,
+ TCPC_VBUS_VALID,
+#endif
+};
+
+enum typec_role_defination {
+ TYPEC_ROLE_UNKNOWN = 0,
+ TYPEC_ROLE_SNK,
+ TYPEC_ROLE_SRC,
+ TYPEC_ROLE_DRP,
+ TYPEC_ROLE_TRY_SRC,
+ TYPEC_ROLE_TRY_SNK,
+ TYPEC_ROLE_NR,
+};
+
+int tcpm_inquire_remote_cc(
+ struct tcpc_device *tcpc_dev,
+ u8 *cc1, u8 *cc2, bool from_ic);
+int tcpm_inquire_vbus_level(struct tcpc_device *tcpc_dev, bool from_ic);
+bool tcpm_inquire_cc_polarity(struct tcpc_device *tcpc_dev);
+u8 tcpm_inquire_typec_attach_state(struct tcpc_device *tcpc_dev);
+u8 tcpm_inquire_typec_role(struct tcpc_device *tcpc_dev);
+u8 tcpm_inquire_typec_local_rp(struct tcpc_device *tcpc_dev);
+
+int tcpm_typec_set_rp_level(struct tcpc_device *tcpc_dev, u8 level);
+
+int tcpm_typec_change_role(struct tcpc_device *tcpc_dev, u8 typec_role);
+
+#ifdef CONFIG_USB_POWER_DELIVERY
+
+bool tcpm_inquire_pd_connected(struct tcpc_device *tcpc_dev);
+
+bool tcpm_inquire_pd_prev_connected(struct tcpc_device *tcpc_dev);
+
+u8 tcpm_inquire_pd_data_role(struct tcpc_device *tcpc_dev);
+
+u8 tcpm_inquire_pd_power_role(struct tcpc_device *tcpc_dev);
+
+u8 tcpm_inquire_pd_vconn_role(struct tcpc_device *tcpc_dev);
+
+#endif /* CONFIG_USB_POWER_DELIVERY */
+
+/* Request TCPM to send PD Request */
+
+int tcpm_power_role_swap(struct tcpc_device *tcpc_dev);
+int tcpm_data_role_swap(struct tcpc_device *tcpc_dev);
+int tcpm_vconn_swap(struct tcpc_device *tcpc_dev);
+int tcpm_goto_min(struct tcpc_device *tcpc_dev);
+int tcpm_soft_reset(struct tcpc_device *tcpc_dev);
+int tcpm_hard_reset(struct tcpc_device *tcpc_dev);
+int tcpm_get_source_cap(
+ struct tcpc_device *tcpc_dev, struct tcpm_power_cap *cap);
+int tcpm_get_sink_cap(struct tcpc_device *tcpc_dev, struct tcpm_power_cap *cap);
+int tcpm_bist_cm2(struct tcpc_device *tcpc_dev);
+int tcpm_request(struct tcpc_device *tcpc_dev, int mv, int ma);
+int tcpm_error_recovery(struct tcpc_device *tcpc_dev);
+
+/* Request TCPM to send VDM */
+
+int tcpm_discover_cable(struct tcpc_device *tcpc_dev, u32 *vdos);
+
+int tcpm_vdm_request_id(struct tcpc_device *tcpc_dev, u8 *cnt, u8 *payload);
+
+/* Notify TCPM */
+
+int tcpm_notify_vbus_stable(struct tcpc_device *tcpc_dev);
+#endif /* TCPM_H_ */
diff --git a/include/linux/hisi_ion.h b/include/linux/hisi_ion.h
new file mode 100644
index 000000000000..0d7be75f795f
--- /dev/null
+++ b/include/linux/hisi_ion.h
@@ -0,0 +1,178 @@
+/*
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_HISI_ION_H
+#define _LINUX_HISI_ION_H
+
+#include <linux/ion.h>
+#include <linux/sizes.h>
+
+/**
+ * These are the only ids that should be used for Ion heap ids.
+ * The ids listed are the order in which allocation will be attempted
+ * if specified. Don't swap the order of heap ids unless you know what
+ * you are doing!
+ * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
+ * possible fallbacks)
+ */
+
+enum ion_heap_ids {
+ INVALID_HEAP_ID = -1,
+ ION_SYSTEM_HEAP_ID = 0,
+ ION_SYSTEM_CONTIG_HEAP_ID = 1,
+ ION_GRALLOC_HEAP_ID = 2,
+ ION_DMA_HEAP_ID = 3,
+ ION_DMA_POOL_HEAP_ID = 4,
+ ION_CPU_DRAW_HEAP_ID = 5,
+ ION_CAMERA_HEAP_ID = 6,
+ ION_OVERLAY_HEAP_ID = 7,
+ ION_VCODEC_HEAP_ID = 8,
+ ION_ISP_HEAP_ID = 9,
+ ION_FB_HEAP_ID = 10,
+ ION_VPU_HEAP_ID = 11,
+ ION_JPU_HEAP_ID = 12,
+ HISI_ION_HEAP_IOMMU_ID = 13,
+ ION_MISC_HEAP_ID = 14,
+ ION_DRM_GRALLOC_HEAP_ID=15,
+ ION_DRM_VCODEC_HEAP_ID =16,
+ ION_TUI_HEAP_ID=17,
+ ION_IRIS_HEAP_ID=18,
+ ION_RESERV2_ID=19,
+ ION_DRM_HEAP_ID=20,
+ ION_HEAP_ID_RESERVED = 31, /* Bit reserved */
+};
+
+
+/**
+ * Macro should be used with ion_heap_ids defined above.
+ */
+#define ION_HEAP(bit) (1 << (bit))
+#define ION_8K_ALIGN(len) ALIGN(len, SZ_8K)
+#define IOMMU_PAGE_SIZE SZ_8K
+
+#define ION_VMALLOC_HEAP_NAME "vmalloc"
+#define ION_KMALLOC_HEAP_NAME "kmalloc"
+#define ION_GRALLOC_HEAP_NAME "gralloc"
+
+
+#define ION_SET_CACHED(__cache) (__cache | ION_FLAG_CACHED)
+#define ION_SET_UNCACHED(__cache) (__cache & ~ION_FLAG_CACHED)
+
+#define ION_IS_CACHED(__flags) ((__flags) & ION_FLAG_CACHED)
+
+//struct used for get phys addr of contig heap
+struct ion_phys_data {
+ int fd_buffer;
+ unsigned int size;
+ union {
+ unsigned int phys;
+ unsigned int phys_l;
+ };
+ unsigned int phys_h;
+};
+
+struct ion_flag_data {
+ int shared_fd;
+ int flags;
+};
+
+struct ion_smart_pool_info_data {
+ int water_mark;
+};
+
+#define HISI_ION_NAME_LEN 16
+
+struct ion_heap_info_data{
+ char name[HISI_ION_NAME_LEN];
+ phys_addr_t heap_phy;
+ unsigned int heap_size;
+};
+struct ion_kern_va_data {
+ int handle_id;
+ unsigned int kern_va_h;
+ unsigned int kern_va_l;
+};
+struct ion_issupport_iommu_data{
+ int is_support_iommu;
+};
+
+struct ion_flush_data {
+ int fd;
+ void *vaddr;
+ unsigned int offset;
+ unsigned int length;
+};
+
+
+//user command add for additional use
+enum ION_HISI_CUSTOM_CMD {
+ ION_HISI_CUSTOM_PHYS,
+ ION_HISI_CLEAN_CACHES,
+ ION_HISI_INV_CACHES,
+ ION_HISI_CLEAN_INV_CACHES,
+ ION_HISI_CUSTOM_GET_KERN_VA,
+ ION_HISI_CUSTOM_FREE_KERN_VA,
+ ION_HISI_CUSTOM_ISSUPPORT_IOMMU,
+ ION_HISI_CUSTOM_GET_MEDIA_HEAP_MODE,
+ ION_HISI_CUSTOM_SET_FLAG,
+ ION_HISI_CUSTOM_SET_SMART_POOL_INFO,
+};
+
+enum ION_HISI_HEAP_MODE {
+ ION_CARVEROUT_MODE=0,
+ ION_IOMMU_MODE=1,
+};
+
+#define TINY_SYSTEM 0x0 /* tiny version system for chip test*/
+#define FULL_SYSTEM 0x1 /* full version system */
+/**
+ * hisi_ion_client_create() - create iommu mapping for the given handle
+ * @heap_mask: ion heap type mask
+ * @name: the client name
+ * @return: the client handle
+ *
+ * This function should called by high-level user in kernel. Before users
+ * can access a buffer, they should get a client via calling this function.
+ */
+struct ion_client *
+hisi_ion_client_create(const char *name);
+int hisi_ion_get_heap_info(unsigned int id,struct ion_heap_info_data* data);
+int hisi_ion_get_media_mode(void);
+unsigned long long get_system_type(void);
+struct ion_device * get_ion_device(void);
+#define ION_IOC_HISI_MAGIC 'H'
+/**
+ *DOC: ION_IOC_FLUSH_ALL_CACHES - flush all the caches pf L1 and L2
+ *
+ *flush all the caches pf L1 and L2
+ */
+#define ION_IOC_FLUSH_ALL_CACHES _IOWR(ION_IOC_HISI_MAGIC, 3, \
+ struct ion_flush_data)
+
+#ifdef CONFIG_ION
+extern unsigned long hisi_ion_total(void);
+#else
+static inline unsigned long hisi_ion_total(void)
+{
+ return 0;
+}
+#endif
+
+/*k3 add to calc free memory*/
+void hisi_ionsysinfo(struct sysinfo *si);
+int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
+ unsigned long *flags);
+int hisi_ion_memory_info(bool verbose);
+#endif
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 436dc21318af..a4510152f5b5 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -31,6 +31,11 @@
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
#define IOMMU_NOEXEC (1 << 3)
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
+#ifdef CONFIG_HISI_IOMMU
+#define IOMMU_DEVICE (1 << 4)
+#define IOMMU_SEC (1 << 5)
+#define IOMMU_EXEC (1 << 6)
+#endif
struct iommu_ops;
struct iommu_group;
@@ -84,6 +89,7 @@ struct iommu_domain {
void *handler_token;
struct iommu_domain_geometry geometry;
void *iova_cookie;
+ void *priv;
};
enum iommu_cap {
@@ -117,6 +123,26 @@ enum iommu_attr {
DOMAIN_ATTR_MAX,
};
+/* metadata for iommu mapping */
+struct iommu_map_format {
+ unsigned long iova_start;
+ unsigned long iova_size;
+ unsigned long iommu_ptb_base;
+ unsigned long iommu_iova_base;
+ unsigned long header_size;
+ unsigned long phys_page_line;
+ unsigned long virt_page_line;
+ unsigned long is_tile;
+ unsigned long prot;
+};
+
+struct tile_format {
+ unsigned long header_size;
+ unsigned long is_tile;
+ unsigned long phys_page_line;
+ unsigned long virt_page_line;
+};
+
/**
* struct iommu_dm_region - descriptor for a direct mapped memory region
* @list: Linked list pointers
@@ -201,6 +227,14 @@ struct iommu_ops {
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
+#ifdef CONFIG_HISI_IOMMU
+ int (*map_tile)(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, size_t size, int prot,
+ struct tile_format *format);
+ size_t (*unmap_tile)(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
+#endif
+
unsigned long pgsize_bitmap;
};
@@ -272,7 +306,14 @@ struct device *iommu_device_create(struct device *parent, void *drvdata,
void iommu_device_destroy(struct device *dev);
int iommu_device_link(struct device *dev, struct device *link);
void iommu_device_unlink(struct device *dev, struct device *link);
-
+#ifdef CONFIG_HISI_IOMMU
+int iommu_map_tile(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, size_t size, int prot,
+ struct tile_format *format);
+
+int iommu_unmap_tile(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
+#endif
/* Window handling function prototypes */
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t offset, u64 size,
diff --git a/include/linux/ion.h b/include/linux/ion.h
new file mode 100644
index 000000000000..2598e7a7e6f0
--- /dev/null
+++ b/include/linux/ion.h
@@ -0,0 +1,5 @@
+#ifndef _INCLUDE_LINUX_ION_H_
+#define _INCLUDE_LINUX_ION_H_
+#include "../../drivers/staging/android/uapi/ion.h"
+#include "../../drivers/staging/android/ion/ion.h"
+#endif /* _INCLUDE_LINUX_ION_H_ */
diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h
index 587273e35acf..2580c08db7b1 100644
--- a/include/linux/mfd/hi6421-pmic.h
+++ b/include/linux/mfd/hi6421-pmic.h
@@ -38,4 +38,9 @@ struct hi6421_pmic {
struct regmap *regmap;
};
+enum hi6421_type {
+ HI6421 = 0,
+ HI6421_V530,
+};
+
#endif /* __HI6421_PMIC_H */
diff --git a/include/linux/platform_data/nanohub.h b/include/linux/platform_data/nanohub.h
new file mode 100644
index 000000000000..f3050bdfb3b1
--- /dev/null
+++ b/include/linux/platform_data/nanohub.h
@@ -0,0 +1,26 @@
+#ifndef __LINUX_PLATFORM_DATA_NANOHUB_H
+#define __LINUX_PLATFORM_DATA_NANOHUB_H
+
+#include <linux/types.h>
+
+struct nanohub_flash_bank {
+ int bank;
+ u32 address;
+ size_t length;
+};
+
+struct nanohub_platform_data {
+ u32 wakeup_gpio;
+ u32 nreset_gpio;
+ u32 boot0_gpio;
+ u32 irq1_gpio;
+ u32 irq2_gpio;
+ u32 spi_cs_gpio;
+ u32 bl_addr;
+ u32 num_flash_banks;
+ struct nanohub_flash_bank *flash_banks;
+ u32 num_shared_flash_banks;
+ struct nanohub_flash_bank *shared_flash_banks;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_NANOHUB_H */
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
new file mode 100644
index 000000000000..ca33c5fd7b5c
--- /dev/null
+++ b/include/linux/serdev.h
@@ -0,0 +1,291 @@
+/*
+ * Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _LINUX_SERDEV_H
+#define _LINUX_SERDEV_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/termios.h>
+
+struct serdev_controller;
+struct serdev_device;
+
+/*
+ * serdev device structures
+ */
+
+/**
+ * struct serdev_device_ops - Callback operations for a serdev device
+ * @receive_buf: Function called with data received from device.
+ * @write_wakeup: Function called when ready to transmit more data.
+ */
+struct serdev_device_ops {
+ int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t);
+ void (*write_wakeup)(struct serdev_device *);
+};
+
+/**
+ * struct serdev_device - Basic representation of an serdev device
+ * @dev: Driver model representation of the device.
+ * @nr: Device number on serdev bus.
+ * @ctrl: serdev controller managing this device.
+ * @ops: Device operations.
+ * @write_comp Completion used by serdev_device_write() internally
+ * @write_lock Lock to serialize access when writing data
+ */
+struct serdev_device {
+ struct device dev;
+ int nr;
+ struct serdev_controller *ctrl;
+ const struct serdev_device_ops *ops;
+ struct completion write_comp;
+ struct mutex write_lock;
+};
+
+static inline struct serdev_device *to_serdev_device(struct device *d)
+{
+ return container_of(d, struct serdev_device, dev);
+}
+
+/**
+ * struct serdev_device_driver - serdev slave device driver
+ * @driver: serdev device drivers should initialize name field of this
+ * structure.
+ * @probe: binds this driver to a serdev device.
+ * @remove: unbinds this driver from the serdev device.
+ */
+struct serdev_device_driver {
+ struct device_driver driver;
+ int (*probe)(struct serdev_device *);
+ void (*remove)(struct serdev_device *);
+};
+
+static inline struct serdev_device_driver *to_serdev_device_driver(struct device_driver *d)
+{
+ return container_of(d, struct serdev_device_driver, driver);
+}
+
+/*
+ * serdev controller structures
+ */
+struct serdev_controller_ops {
+ int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t);
+ void (*write_flush)(struct serdev_controller *);
+ int (*write_room)(struct serdev_controller *);
+ int (*open)(struct serdev_controller *);
+ void (*close)(struct serdev_controller *);
+ void (*set_flow_control)(struct serdev_controller *, bool);
+ unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int);
+ void (*wait_until_sent)(struct serdev_controller *, long);
+ int (*get_tiocm)(struct serdev_controller *);
+ int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int);
+};
+
+/**
+ * struct serdev_controller - interface to the serdev controller
+ * @dev: Driver model representation of the device.
+ * @nr: number identifier for this controller/bus.
+ * @serdev: Pointer to slave device for this controller.
+ * @ops: Controller operations.
+ */
+struct serdev_controller {
+ struct device dev;
+ unsigned int nr;
+ struct serdev_device *serdev;
+ const struct serdev_controller_ops *ops;
+};
+
+static inline struct serdev_controller *to_serdev_controller(struct device *d)
+{
+ return container_of(d, struct serdev_controller, dev);
+}
+
+static inline void *serdev_device_get_drvdata(const struct serdev_device *serdev)
+{
+ return dev_get_drvdata(&serdev->dev);
+}
+
+static inline void serdev_device_set_drvdata(struct serdev_device *serdev, void *data)
+{
+ dev_set_drvdata(&serdev->dev, data);
+}
+
+/**
+ * serdev_device_put() - decrement serdev device refcount
+ * @serdev serdev device.
+ */
+static inline void serdev_device_put(struct serdev_device *serdev)
+{
+ if (serdev)
+ put_device(&serdev->dev);
+}
+
+static inline void serdev_device_set_client_ops(struct serdev_device *serdev,
+ const struct serdev_device_ops *ops)
+{
+ serdev->ops = ops;
+}
+
+static inline
+void *serdev_controller_get_drvdata(const struct serdev_controller *ctrl)
+{
+ return ctrl ? dev_get_drvdata(&ctrl->dev) : NULL;
+}
+
+static inline void serdev_controller_set_drvdata(struct serdev_controller *ctrl,
+ void *data)
+{
+ dev_set_drvdata(&ctrl->dev, data);
+}
+
+/**
+ * serdev_controller_put() - decrement controller refcount
+ * @ctrl serdev controller.
+ */
+static inline void serdev_controller_put(struct serdev_controller *ctrl)
+{
+ if (ctrl)
+ put_device(&ctrl->dev);
+}
+
+struct serdev_device *serdev_device_alloc(struct serdev_controller *);
+int serdev_device_add(struct serdev_device *);
+void serdev_device_remove(struct serdev_device *);
+
+struct serdev_controller *serdev_controller_alloc(struct device *, size_t);
+int serdev_controller_add(struct serdev_controller *);
+void serdev_controller_remove(struct serdev_controller *);
+
+static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl)
+{
+ struct serdev_device *serdev = ctrl->serdev;
+
+ if (!serdev || !serdev->ops->write_wakeup)
+ return;
+
+ serdev->ops->write_wakeup(ctrl->serdev);
+}
+
+static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
+ const unsigned char *data,
+ size_t count)
+{
+ struct serdev_device *serdev = ctrl->serdev;
+
+ if (!serdev || !serdev->ops->receive_buf)
+ return -EINVAL;
+
+ return serdev->ops->receive_buf(ctrl->serdev, data, count);
+}
+
+#if IS_ENABLED(CONFIG_SERIAL_DEV_BUS)
+
+int serdev_device_open(struct serdev_device *);
+void serdev_device_close(struct serdev_device *);
+unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
+void serdev_device_set_flow_control(struct serdev_device *, bool);
+int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
+void serdev_device_wait_until_sent(struct serdev_device *, long);
+int serdev_device_get_tiocm(struct serdev_device *);
+int serdev_device_set_tiocm(struct serdev_device *, int, int);
+void serdev_device_write_wakeup(struct serdev_device *);
+int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, unsigned long);
+void serdev_device_write_flush(struct serdev_device *);
+int serdev_device_write_room(struct serdev_device *);
+
+/*
+ * serdev device driver functions
+ */
+int __serdev_device_driver_register(struct serdev_device_driver *, struct module *);
+#define serdev_device_driver_register(sdrv) \
+ __serdev_device_driver_register(sdrv, THIS_MODULE)
+
+/**
+ * serdev_device_driver_unregister() - unregister an serdev client driver
+ * @sdrv: the driver to unregister
+ */
+static inline void serdev_device_driver_unregister(struct serdev_device_driver *sdrv)
+{
+ if (sdrv)
+ driver_unregister(&sdrv->driver);
+}
+
+#define module_serdev_device_driver(__serdev_device_driver) \
+ module_driver(__serdev_device_driver, serdev_device_driver_register, \
+ serdev_device_driver_unregister)
+
+#else
+
+static inline int serdev_device_open(struct serdev_device *sdev)
+{
+ return -ENODEV;
+}
+static inline void serdev_device_close(struct serdev_device *sdev) {}
+static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev, unsigned int baudrate)
+{
+ return 0;
+}
+static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
+static inline int serdev_device_write_buf(struct serdev_device *serdev,
+ const unsigned char *buf,
+ size_t count)
+{
+ return -ENODEV;
+}
+static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {}
+static inline int serdev_device_get_tiocm(struct serdev_device *serdev)
+{
+ return -ENOTSUPP;
+}
+static inline int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear)
+{
+ return -ENOTSUPP;
+}
+static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf,
+ size_t count, unsigned long timeout)
+{
+ return -ENODEV;
+}
+static inline void serdev_device_write_flush(struct serdev_device *sdev) {}
+static inline int serdev_device_write_room(struct serdev_device *sdev)
+{
+ return 0;
+}
+
+#define serdev_device_driver_register(x)
+#define serdev_device_driver_unregister(x)
+
+#endif /* CONFIG_SERIAL_DEV_BUS */
+
+/*
+ * serdev hooks into TTY core
+ */
+struct tty_port;
+struct tty_driver;
+
+#ifdef CONFIG_SERIAL_DEV_CTRL_TTYPORT
+struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *parent,
+ struct tty_driver *drv, int idx);
+void serdev_tty_port_unregister(struct tty_port *port);
+#else
+static inline struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *parent,
+ struct tty_driver *drv, int idx)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline void serdev_tty_port_unregister(struct tty_port *port) {}
+#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
+
+#endif /*_LINUX_SERDEV_H */
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index f2293028ab9d..81154b6d8985 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -86,6 +86,7 @@ struct st_proto_s {
extern long st_register(struct st_proto_s *);
extern long st_unregister(struct st_proto_s *);
+extern struct ti_st_plat_data *dt_pdata;
/*
* header information used by st_core.c
diff --git a/include/linux/trusty/sm_err.h b/include/linux/trusty/sm_err.h
new file mode 100644
index 000000000000..32ee08e499c3
--- /dev/null
+++ b/include/linux/trusty/sm_err.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013 Google Inc. All rights reserved
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __LINUX_TRUSTY_SM_ERR_H
+#define __LINUX_TRUSTY_SM_ERR_H
+
+/* Errors from the secure monitor */
+#define SM_ERR_UNDEFINED_SMC 0xFFFFFFFF /* Unknown SMC (defined by ARM DEN 0028A(0.9.0) */
+#define SM_ERR_INVALID_PARAMETERS -2
+#define SM_ERR_INTERRUPTED -3 /* Got interrupted. Call back with restart SMC */
+#define SM_ERR_UNEXPECTED_RESTART -4 /* Got an restart SMC when we didn't expect it */
+#define SM_ERR_BUSY -5 /* Temporarily busy. Call back with original args */
+#define SM_ERR_INTERLEAVED_SMC -6 /* Got a trusted_service SMC when a restart SMC is required */
+#define SM_ERR_INTERNAL_FAILURE -7 /* Unknown error */
+#define SM_ERR_NOT_SUPPORTED -8
+#define SM_ERR_NOT_ALLOWED -9 /* SMC call not allowed */
+#define SM_ERR_END_OF_INPUT -10
+#define SM_ERR_PANIC -11 /* Secure OS crashed */
+#define SM_ERR_FIQ_INTERRUPTED -12 /* Got interrupted by FIQ. Call back with SMC_SC_RESTART_FIQ on same CPU */
+#define SM_ERR_CPU_IDLE -13 /* SMC call waiting for another CPU */
+#define SM_ERR_NOP_INTERRUPTED -14 /* Got interrupted. Call back with new SMC_SC_NOP */
+#define SM_ERR_NOP_DONE -15 /* Cpu idle after SMC_SC_NOP (not an error) */
+
+#endif
diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h
new file mode 100644
index 000000000000..ca66be546e71
--- /dev/null
+++ b/include/linux/trusty/smcall.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2013-2014 Google Inc. All rights reserved
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __LINUX_TRUSTY_SMCALL_H
+#define __LINUX_TRUSTY_SMCALL_H
+
+#define SMC_NUM_ENTITIES 64
+#define SMC_NUM_ARGS 4
+#define SMC_NUM_PARAMS (SMC_NUM_ARGS - 1)
+
+#define SMC_IS_FASTCALL(smc_nr) ((smc_nr) & 0x80000000)
+#define SMC_IS_SMC64(smc_nr) ((smc_nr) & 0x40000000)
+#define SMC_ENTITY(smc_nr) (((smc_nr) & 0x3F000000) >> 24)
+#define SMC_FUNCTION(smc_nr) ((smc_nr) & 0x0000FFFF)
+
+#define SMC_NR(entity, fn, fastcall, smc64) ((((fastcall) & 0x1) << 31) | \
+ (((smc64) & 0x1) << 30) | \
+ (((entity) & 0x3F) << 24) | \
+ ((fn) & 0xFFFF) \
+ )
+
+#define SMC_FASTCALL_NR(entity, fn) SMC_NR((entity), (fn), 1, 0)
+#define SMC_STDCALL_NR(entity, fn) SMC_NR((entity), (fn), 0, 0)
+#define SMC_FASTCALL64_NR(entity, fn) SMC_NR((entity), (fn), 1, 1)
+#define SMC_STDCALL64_NR(entity, fn) SMC_NR((entity), (fn), 0, 1)
+
+#define SMC_ENTITY_ARCH 0 /* ARM Architecture calls */
+#define SMC_ENTITY_CPU 1 /* CPU Service calls */
+#define SMC_ENTITY_SIP 2 /* SIP Service calls */
+#define SMC_ENTITY_OEM 3 /* OEM Service calls */
+#define SMC_ENTITY_STD 4 /* Standard Service calls */
+#define SMC_ENTITY_RESERVED 5 /* Reserved for future use */
+#define SMC_ENTITY_TRUSTED_APP 48 /* Trusted Application calls */
+#define SMC_ENTITY_TRUSTED_OS 50 /* Trusted OS calls */
+#define SMC_ENTITY_LOGGING 51 /* Used for secure -> nonsecure logging */
+#define SMC_ENTITY_SECURE_MONITOR 60 /* Trusted OS calls internal to secure monitor */
+
+/* FC = Fast call, SC = Standard call */
+#define SMC_SC_RESTART_LAST SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0)
+#define SMC_SC_LOCKED_NOP SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1)
+
+/**
+ * SMC_SC_RESTART_FIQ - Re-enter trusty after it was interrupted by an fiq
+ *
+ * No arguments, no return value.
+ *
+ * Re-enter trusty after returning to ns to process an fiq. Must be called iff
+ * trusty returns SM_ERR_FIQ_INTERRUPTED.
+ *
+ * Enable by selecting api version TRUSTY_API_VERSION_RESTART_FIQ (1) or later.
+ */
+#define SMC_SC_RESTART_FIQ SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2)
+
+/**
+ * SMC_SC_NOP - Enter trusty to run pending work.
+ *
+ * No arguments.
+ *
+ * Returns SM_ERR_NOP_INTERRUPTED or SM_ERR_NOP_DONE.
+ * If SM_ERR_NOP_INTERRUPTED is returned, the call must be repeated.
+ *
+ * Enable by selecting api version TRUSTY_API_VERSION_SMP (2) or later.
+ */
+#define SMC_SC_NOP SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3)
+
+/*
+ * Return from secure os to non-secure os with return value in r1
+ */
+#define SMC_SC_NS_RETURN SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0)
+
+#define SMC_FC_RESERVED SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0)
+#define SMC_FC_FIQ_EXIT SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1)
+#define SMC_FC_REQUEST_FIQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2)
+#define SMC_FC_GET_NEXT_IRQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3)
+#define SMC_FC_FIQ_ENTER SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 4)
+
+#define SMC_FC64_SET_FIQ_HANDLER SMC_FASTCALL64_NR(SMC_ENTITY_SECURE_MONITOR, 5)
+#define SMC_FC64_GET_FIQ_REGS SMC_FASTCALL64_NR (SMC_ENTITY_SECURE_MONITOR, 6)
+
+#define SMC_FC_CPU_SUSPEND SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 7)
+#define SMC_FC_CPU_RESUME SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 8)
+
+#define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9)
+#define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 10)
+
+/**
+ * SMC_FC_API_VERSION - Find and select supported API version.
+ *
+ * @r1: Version supported by client.
+ *
+ * Returns version supported by trusty.
+ *
+ * If multiple versions are supported, the client should start by calling
+ * SMC_FC_API_VERSION with the largest version it supports. Trusty will then
+ * return a version it supports. If the client does not support the version
+ * returned by trusty and the version returned is less than the version
+ * requested, repeat the call with the largest supported version less than the
+ * last returned version.
+ *
+ * This call must be made before any calls that are affected by the api version.
+ */
+#define TRUSTY_API_VERSION_RESTART_FIQ (1)
+#define TRUSTY_API_VERSION_SMP (2)
+#define TRUSTY_API_VERSION_SMP_NOP (3)
+#define TRUSTY_API_VERSION_CURRENT (3)
+#define SMC_FC_API_VERSION SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 11)
+
+#define SMC_FC_FIQ_RESUME SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 12)
+
+/* TRUSTED_OS entity calls */
+#define SMC_SC_VIRTIO_GET_DESCR SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20)
+#define SMC_SC_VIRTIO_START SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21)
+#define SMC_SC_VIRTIO_STOP SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22)
+
+#define SMC_SC_VDEV_RESET SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23)
+#define SMC_SC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24)
+#define SMC_NC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 25)
+
+#endif /* __LINUX_TRUSTY_SMCALL_H */
diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h
new file mode 100644
index 000000000000..742c09e9e6d7
--- /dev/null
+++ b/include/linux/trusty/trusty.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __LINUX_TRUSTY_TRUSTY_H
+#define __LINUX_TRUSTY_TRUSTY_H
+
+#include <linux/kernel.h>
+#include <linux/trusty/sm_err.h>
+#include <linux/device.h>
+#include <linux/pagemap.h>
+
+
+#ifdef CONFIG_TRUSTY
+s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2);
+s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2);
+#ifdef CONFIG_64BIT
+s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2);
+#endif
+#else
+static inline s32 trusty_std_call32(struct device *dev, u32 smcnr,
+ u32 a0, u32 a1, u32 a2)
+{
+ return SM_ERR_UNDEFINED_SMC;
+}
+static inline s32 trusty_fast_call32(struct device *dev, u32 smcnr,
+ u32 a0, u32 a1, u32 a2)
+{
+ return SM_ERR_UNDEFINED_SMC;
+}
+#ifdef CONFIG_64BIT
+static inline s64 trusty_fast_call64(struct device *dev,
+ u64 smcnr, u64 a0, u64 a1, u64 a2)
+{
+ return SM_ERR_UNDEFINED_SMC;
+}
+#endif
+#endif
+
+struct notifier_block;
+enum {
+ TRUSTY_CALL_PREPARE,
+ TRUSTY_CALL_RETURNED,
+};
+int trusty_call_notifier_register(struct device *dev,
+ struct notifier_block *n);
+int trusty_call_notifier_unregister(struct device *dev,
+ struct notifier_block *n);
+const char *trusty_version_str_get(struct device *dev);
+u32 trusty_get_api_version(struct device *dev);
+
+struct ns_mem_page_info {
+ uint64_t attr;
+};
+
+int trusty_encode_page_info(struct ns_mem_page_info *inf,
+ struct page *page, pgprot_t pgprot);
+
+int trusty_call32_mem_buf(struct device *dev, u32 smcnr,
+ struct page *page, u32 size,
+ pgprot_t pgprot);
+
+struct trusty_nop {
+ struct list_head node;
+ u32 args[3];
+};
+
+static inline void trusty_nop_init(struct trusty_nop *nop,
+ u32 arg0, u32 arg1, u32 arg2) {
+ INIT_LIST_HEAD(&nop->node);
+ nop->args[0] = arg0;
+ nop->args[1] = arg1;
+ nop->args[2] = arg2;
+}
+
+void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop);
+void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop);
+
+#endif
diff --git a/include/linux/trusty/trusty_ipc.h b/include/linux/trusty/trusty_ipc.h
new file mode 100644
index 000000000000..4ca15938a854
--- /dev/null
+++ b/include/linux/trusty/trusty_ipc.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __LINUX_TRUSTY_TRUSTY_IPC_H
+#define __LINUX_TRUSTY_TRUSTY_IPC_H
+
+struct tipc_chan;
+
+struct tipc_msg_buf {
+ void *buf_va;
+ phys_addr_t buf_pa;
+ size_t buf_sz;
+ size_t wpos;
+ size_t rpos;
+ struct list_head node;
+};
+
+enum tipc_chan_event {
+ TIPC_CHANNEL_CONNECTED = 1,
+ TIPC_CHANNEL_DISCONNECTED,
+ TIPC_CHANNEL_SHUTDOWN,
+};
+
+struct tipc_chan_ops {
+ void (*handle_event)(void *cb_arg, int event);
+ struct tipc_msg_buf *(*handle_msg)(void *cb_arg,
+ struct tipc_msg_buf *mb);
+};
+
+struct tipc_chan *tipc_create_channel(struct device *dev,
+ const struct tipc_chan_ops *ops,
+ void *cb_arg);
+
+int tipc_chan_connect(struct tipc_chan *chan, const char *port);
+
+int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb);
+
+int tipc_chan_shutdown(struct tipc_chan *chan);
+
+void tipc_chan_destroy(struct tipc_chan *chan);
+
+struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan);
+
+void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb);
+
+struct tipc_msg_buf *
+tipc_chan_get_txbuf_timeout(struct tipc_chan *chan, long timeout);
+
+void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb);
+
+static inline size_t mb_avail_space(struct tipc_msg_buf *mb)
+{
+ return mb->buf_sz - mb->wpos;
+}
+
+static inline size_t mb_avail_data(struct tipc_msg_buf *mb)
+{
+ return mb->wpos - mb->rpos;
+}
+
+static inline void *mb_put_data(struct tipc_msg_buf *mb, size_t len)
+{
+ void *pos = (u8 *)mb->buf_va + mb->wpos;
+ BUG_ON(mb->wpos + len > mb->buf_sz);
+ mb->wpos += len;
+ return pos;
+}
+
+static inline void *mb_get_data(struct tipc_msg_buf *mb, size_t len)
+{
+ void *pos = (u8 *)mb->buf_va + mb->rpos;
+ BUG_ON(mb->rpos + len > mb->wpos);
+ mb->rpos += len;
+ return pos;
+}
+
+#endif /* __LINUX_TRUSTY_TRUSTY_IPC_H */
+
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 40144f382516..1017e904c0a3 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -217,12 +217,18 @@ struct tty_port_operations {
/* Called on the final put of a port */
void (*destruct)(struct tty_port *port);
};
-
+
+struct tty_port_client_operations {
+ int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t);
+ void (*write_wakeup)(struct tty_port *port);
+};
+
struct tty_port {
struct tty_bufhead buf; /* Locked internally */
struct tty_struct *tty; /* Back pointer */
struct tty_struct *itty; /* internal back ptr */
const struct tty_port_operations *ops; /* Port operations */
+ const struct tty_port_client_operations *client_ops; /* Port client operations */
spinlock_t lock; /* Lock protecting tty field */
int blocked_open; /* Waiting to open */
int count; /* Usage count */
@@ -241,6 +247,7 @@ struct tty_port {
based drain is needed else
set to size of fifo */
struct kref kref; /* Ref counter */
+ void *client_data;
};
/* tty_port::iflags bits -- use atomic bit ops */
@@ -528,6 +535,7 @@ extern int tty_alloc_file(struct file *file);
extern void tty_add_file(struct tty_struct *tty, struct file *file);
extern void tty_free_file(struct file *file);
extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
+extern void tty_release_struct(struct tty_struct *tty, int idx);
extern int tty_release(struct inode *inode, struct file *filp);
extern void tty_init_termios(struct tty_struct *tty);
extern int tty_standard_install(struct tty_driver *driver,
@@ -656,7 +664,7 @@ extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
extern void tty_ldisc_release(struct tty_struct *tty);
extern void tty_ldisc_init(struct tty_struct *tty);
extern void tty_ldisc_deinit(struct tty_struct *tty);
-extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
+extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
char *f, int count);
/* n_tty.c */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index eba1f10e8cfd..f3f5d8a396e4 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -354,6 +354,7 @@ struct usb_devmap {
*/
struct usb_bus {
struct device *controller; /* host/master side hardware */
+ struct device *sysdev; /* as seen from firmware or bus */
int busnum; /* Bus number (in order of reg) */
const char *bus_name; /* stable id (PCI slot_name etc) */
u8 uses_dma; /* Does the host controller use DMA? */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 492034126876..3a5d591f45e7 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -437,6 +437,9 @@ extern int usb_hcd_alloc_bandwidth(struct usb_device *udev,
struct usb_host_interface *new_alt);
extern int usb_hcd_get_frame_number(struct usb_device *udev);
+struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
+ struct device *sysdev, struct device *dev, const char *bus_name,
+ struct usb_hcd *primary_hcd);
extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
struct device *dev, const char *bus_name);
extern struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 554671c81f4a..5ee5913c4408 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -383,6 +383,11 @@ struct hci_dev {
DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
+#ifdef CONFIG_BT_LEDS
+ struct led_trigger *tx_led, *rx_led;
+ char tx_led_name[32], rx_led_name[32];
+#endif
+
__s8 adv_tx_power;
__u8 adv_data[HCI_MAX_AD_LENGTH];
__u8 adv_data_len;
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 3228d582234a..305bd8413d73 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -39,6 +39,7 @@
#define VIRTIO_ID_9P 9 /* 9p virtio console */
#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
#define VIRTIO_ID_CAIF 12 /* Virtio caif */
+#define VIRTIO_ID_TRUSTY_IPC 13 /* virtio trusty ipc */
#define VIRTIO_ID_GPU 16 /* virtio GPU */
#define VIRTIO_ID_INPUT 18 /* virtio input */
#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 06c31b9a68b0..a9f293770019 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -27,7 +27,7 @@ menuconfig BT
L2CAP (Logical Link Control and Adaptation Protocol)
SMP (Security Manager Protocol) on LE (Low Energy) links
HCI Device drivers (Interface to the hardware)
- RFCOMM Module (RFCOMM Protocol)
+ RFCOMM Module (RFCOMM Protocol)
BNEP Module (Bluetooth Network Encapsulation Protocol)
CMTP Module (CAPI Message Transport Protocol)
HIDP Module (Human Interface Device Protocol)
@@ -45,6 +45,15 @@ config BT_BREDR
depends on BT
default y
+config BT_LEDS
+ bool "Enable LED triggers"
+ depends on BT
+ depends on LEDS_CLASS
+ select LEDS_TRIGGERS
+ ---help---
+ This option enables LED triggers for bluetooth
+ packet receive/transmit.
+
source "net/bluetooth/rfcomm/Kconfig"
source "net/bluetooth/bnep/Kconfig"
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index b3ff12eb9b6d..77a8d2c09223 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_BT_BNEP) += bnep/
obj-$(CONFIG_BT_CMTP) += cmtp/
obj-$(CONFIG_BT_HIDP) += hidp/
obj-$(CONFIG_BT_6LOWPAN) += bluetooth_6lowpan.o
-
bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
@@ -21,4 +20,6 @@ bluetooth-$(CONFIG_BT_LEDS) += leds.o
bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
+bluetooth-$(CONFIG_BT_LEDS) += led.o
+
subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 3ac89e9ace71..e1f94a43a90a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -39,6 +39,7 @@
#include "hci_request.h"
#include "hci_debugfs.h"
+#include "led.h"
#include "smp.h"
#include "leds.h"
@@ -3086,6 +3087,9 @@ int hci_register_dev(struct hci_dev *hdev)
if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
hci_dev_set_flag(hdev, HCI_RFKILLED);
+ bluetooth_led_names(hdev);
+ bluetooth_led_init(hdev);
+
hci_dev_set_flag(hdev, HCI_SETUP);
hci_dev_set_flag(hdev, HCI_AUTO_OFF);
@@ -3156,6 +3160,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_sock_dev_event(hdev, HCI_DEV_UNREG);
+ bluetooth_led_exit(hdev);
+
if (hdev->rfkill) {
rfkill_unregister(hdev->rfkill);
rfkill_destroy(hdev->rfkill);
@@ -3249,6 +3255,8 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb_queue_tail(&hdev->rx_q, skb);
queue_work(hdev->workqueue, &hdev->rx_work);
+ bluetooth_led_rx(hdev);
+
return 0;
}
EXPORT_SYMBOL(hci_recv_frame);
@@ -3348,6 +3356,8 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
BT_ERR("%s sending frame failed (%d)", hdev->name, err);
kfree_skb(skb);
}
+
+ bluetooth_led_tx(hdev);
}
/* Send HCI command */
diff --git a/net/bluetooth/led.c b/net/bluetooth/led.c
new file mode 100644
index 000000000000..8dd3516f6ae7
--- /dev/null
+++ b/net/bluetooth/led.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2015, Guodong Xu <guodong.xu@linaro.org>
+ * Copyright 2006, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include "led.h"
+
+#define BLUETOOTH_BLINK_DELAY 50 /* ms */
+
+void bluetooth_led_rx(struct hci_dev *hdev)
+{
+ unsigned long led_delay = BLUETOOTH_BLINK_DELAY;
+ if (unlikely(!hdev->rx_led))
+ return;
+ led_trigger_blink_oneshot(hdev->rx_led, &led_delay, &led_delay, 0);
+}
+
+void bluetooth_led_tx(struct hci_dev *hdev)
+{
+ unsigned long led_delay = BLUETOOTH_BLINK_DELAY;
+ if (unlikely(!hdev->tx_led))
+ return;
+ led_trigger_blink_oneshot(hdev->tx_led, &led_delay, &led_delay, 0);
+}
+
+void bluetooth_led_names(struct hci_dev *hdev)
+{
+ snprintf(hdev->rx_led_name, sizeof(hdev->rx_led_name),
+ "%srx", hdev->name);
+ snprintf(hdev->tx_led_name, sizeof(hdev->tx_led_name),
+ "%stx", hdev->name);
+}
+
+void bluetooth_led_init(struct hci_dev *hdev)
+{
+ hdev->rx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
+ if (hdev->rx_led) {
+ hdev->rx_led->name = hdev->rx_led_name;
+ if (led_trigger_register(hdev->rx_led)) {
+ kfree(hdev->rx_led);
+ hdev->rx_led = NULL;
+ }
+ }
+
+ hdev->tx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
+ if (hdev->tx_led) {
+ hdev->tx_led->name = hdev->tx_led_name;
+ if (led_trigger_register(hdev->tx_led)) {
+ kfree(hdev->tx_led);
+ hdev->tx_led = NULL;
+ }
+ }
+}
+
+void bluetooth_led_exit(struct hci_dev *hdev)
+{
+ if (hdev->tx_led) {
+ led_trigger_unregister(hdev->tx_led);
+ kfree(hdev->tx_led);
+ }
+ if (hdev->rx_led) {
+ led_trigger_unregister(hdev->rx_led);
+ kfree(hdev->rx_led);
+ }
+}
diff --git a/net/bluetooth/led.h b/net/bluetooth/led.h
new file mode 100644
index 000000000000..766a211203ff
--- /dev/null
+++ b/net/bluetooth/led.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015, Guodong Xu <guodong.xu@linaro.org>
+ * Copyright 2006, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/leds.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#ifdef CONFIG_BT_LEDS
+void bluetooth_led_rx(struct hci_dev *hdev);
+void bluetooth_led_tx(struct hci_dev *hdev);
+void bluetooth_led_names(struct hci_dev *hdev);
+void bluetooth_led_init(struct hci_dev *hdev);
+void bluetooth_led_exit(struct hci_dev *hdev);
+#else
+static inline void bluetooth_led_rx(struct hci_dev *hdev)
+{
+}
+static inline void bluetooth_led_tx(struct hci_dev *hdev)
+{
+}
+static inline void bluetooth_led_names(struct hci_dev *hdev)
+{
+}
+static inline void bluetooth_led_init(struct hci_dev *hdev)
+{
+}
+static inline void bluetooth_led_exit(struct hci_dev *hdev)
+{
+}
+#endif
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 182d92efc7c8..9df9658b552b 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -47,6 +47,7 @@ source "sound/soc/cirrus/Kconfig"
source "sound/soc/davinci/Kconfig"
source "sound/soc/dwc/Kconfig"
source "sound/soc/fsl/Kconfig"
+source "sound/soc/hisilicon/Kconfig"
source "sound/soc/jz4740/Kconfig"
source "sound/soc/nuc900/Kconfig"
source "sound/soc/omap/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 9a30f21d16ee..2f6aabb8b4c3 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_SND_SOC) += cirrus/
obj-$(CONFIG_SND_SOC) += davinci/
obj-$(CONFIG_SND_SOC) += dwc/
obj-$(CONFIG_SND_SOC) += fsl/
+obj-$(CONFIG_SND_SOC) += hisilicon/
obj-$(CONFIG_SND_SOC) += jz4740/
obj-$(CONFIG_SND_SOC) += img/
obj-$(CONFIG_SND_SOC) += intel/
diff --git a/sound/soc/hisilicon/Kconfig b/sound/soc/hisilicon/Kconfig
new file mode 100644
index 000000000000..e5547797ee35
--- /dev/null
+++ b/sound/soc/hisilicon/Kconfig
@@ -0,0 +1,11 @@
+config SND_I2S_HI6210_I2S
+ tristate "Hisilicon Hi6210 I2S controller"
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Hisilicon I2S
+
+config SND_I2S_HISI_I2S
+ tristate "Hisilicon 960 I2S controller"
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Hisilicon I2S
diff --git a/sound/soc/hisilicon/Makefile b/sound/soc/hisilicon/Makefile
new file mode 100644
index 000000000000..30100de7491b
--- /dev/null
+++ b/sound/soc/hisilicon/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SND_I2S_HI6210_I2S) += hi6210-i2s.o
+obj-$(CONFIG_SND_I2S_HISI_I2S) += hisi-i2s.o
diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
new file mode 100644
index 000000000000..a05888ac6672
--- /dev/null
+++ b/sound/soc/hisilicon/hi6210-i2s.c
@@ -0,0 +1,628 @@
+/*
+ * linux/sound/soc/m8m/hi6210_i2s.c - I2S IP driver
+ *
+ * Copyright (C) 2015 Linaro, Ltd
+ * Author: Andy Green <andy.green@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver only deals with S2 interface (BT)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/reset-controller.h>
+#include <linux/clk.h>
+
+#include "hi6210-i2s.h"
+
+struct hi6210_i2s {
+ struct device *dev;
+ struct reset_control *rc;
+ struct clk *clk[8];
+ int clocks;
+ struct snd_soc_dai_driver dai;
+ void __iomem *base;
+ struct regmap *sysctrl;
+ phys_addr_t base_phys;
+ struct snd_dmaengine_dai_dma_data dma_data[2];
+ int clk_rate;
+ spinlock_t lock;
+ int rate;
+ int format;
+ u8 bits;
+ u8 channels;
+ u8 id;
+ u8 channel_length;
+ u8 use;
+ u32 master:1;
+ u32 status:1;
+};
+
+#define SC_PERIPH_CLKEN1 0x210
+#define SC_PERIPH_CLKDIS1 0x214
+
+#define SC_PERIPH_CLKEN3 0x230
+#define SC_PERIPH_CLKDIS3 0x234
+
+#define SC_PERIPH_CLKEN12 0x270
+#define SC_PERIPH_CLKDIS12 0x274
+
+#define SC_PERIPH_RSTEN1 0x310
+#define SC_PERIPH_RSTDIS1 0x314
+#define SC_PERIPH_RSTSTAT1 0x318
+
+#define SC_PERIPH_RSTEN2 0x320
+#define SC_PERIPH_RSTDIS2 0x324
+#define SC_PERIPH_RSTSTAT2 0x328
+
+#define SOC_PMCTRL_BBPPLLALIAS 0x48
+
+enum {
+ CLK_DACODEC,
+ CLK_I2S_BASE,
+};
+
+static inline void hi6210_write_reg(struct hi6210_i2s *i2s, int reg, u32 val)
+{
+ writel(val, i2s->base + reg);
+}
+
+static inline u32 hi6210_read_reg(struct hi6210_i2s *i2s, int reg)
+{
+ return readl(i2s->base + reg);
+}
+
+int hi6210_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
+ int ret, n;
+ u32 val;
+
+ /* deassert reset on ABB */
+ regmap_read(i2s->sysctrl, SC_PERIPH_RSTSTAT2, &val);
+ if (val & BIT(4))
+ regmap_write(i2s->sysctrl, SC_PERIPH_RSTDIS2, BIT(4));
+
+ for (n = 0; n < i2s->clocks; n++) {
+ ret = clk_prepare_enable(i2s->clk[n]);
+ if (ret) {
+ while (n--)
+ clk_disable_unprepare(i2s->clk[n]);
+ return ret;
+ }
+ }
+
+ ret = clk_set_rate(i2s->clk[CLK_I2S_BASE], 49152000);
+ if (ret) {
+ dev_err(i2s->dev, "%s: setting 49.152MHz base rate failed %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* enable clock before frequency division */
+ regmap_write(i2s->sysctrl, SC_PERIPH_CLKEN12, BIT(9));
+
+ /* enable codec working clock / == "codec bus clock" */
+ regmap_write(i2s->sysctrl, SC_PERIPH_CLKEN1, BIT(5));
+
+ /* deassert reset on codec / interface clock / working clock */
+ regmap_write(i2s->sysctrl, SC_PERIPH_RSTEN1, BIT(5));
+ regmap_write(i2s->sysctrl, SC_PERIPH_RSTDIS1, BIT(5));
+
+ /* not interested in i2s irqs */
+ val = hi6210_read_reg(i2s, HII2S_CODEC_IRQ_MASK);
+ val |= 0x3f;
+ hi6210_write_reg(i2s, HII2S_CODEC_IRQ_MASK, val);
+
+
+ /* reset the stereo downlink fifo */
+ val = hi6210_read_reg(i2s, HII2S_APB_AFIFO_CFG_1);
+ val |= (BIT(5) | BIT(4));
+ hi6210_write_reg(i2s, HII2S_APB_AFIFO_CFG_1, val);
+
+ val = hi6210_read_reg(i2s, HII2S_APB_AFIFO_CFG_1);
+ val &= ~(BIT(5) | BIT(4));
+ hi6210_write_reg(i2s, HII2S_APB_AFIFO_CFG_1, val);
+
+
+ val = hi6210_read_reg(i2s, HII2S_SW_RST_N);
+ val &= ~(HII2S_SW_RST_N__ST_DL_WORDLEN_MASK <<
+ HII2S_SW_RST_N__ST_DL_WORDLEN_SHIFT);
+ val |= (HII2S_BITS_16 << HII2S_SW_RST_N__ST_DL_WORDLEN_SHIFT);
+ hi6210_write_reg(i2s, HII2S_SW_RST_N, val);
+
+ val = hi6210_read_reg(i2s, HII2S_MISC_CFG);
+ /* mux 11/12 = APB not i2s */
+ val &= ~HII2S_MISC_CFG__ST_DL_TEST_SEL;
+ /* BT R ch 0 = mixer op of DACR ch */
+ val &= ~HII2S_MISC_CFG__S2_DOUT_RIGHT_SEL;
+ val &= ~HII2S_MISC_CFG__S2_DOUT_TEST_SEL;
+
+ val |= HII2S_MISC_CFG__S2_DOUT_RIGHT_SEL;
+ /* BT L ch = 1 = mux 7 = "mixer output of DACL */
+ val |= HII2S_MISC_CFG__S2_DOUT_TEST_SEL;
+ hi6210_write_reg(i2s, HII2S_MISC_CFG, val);
+
+ val = hi6210_read_reg(i2s, HII2S_SW_RST_N);
+ val |= HII2S_SW_RST_N__SW_RST_N;
+ hi6210_write_reg(i2s, HII2S_SW_RST_N, val);
+
+ return 0;
+}
+void hi6210_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
+ int n;
+
+ for (n = 0; n < i2s->clocks; n++)
+ clk_disable_unprepare(i2s->clk[n]);
+
+ regmap_write(i2s->sysctrl, SC_PERIPH_RSTEN1, BIT(5));
+}
+
+static void hi6210_i2s_txctrl(struct snd_soc_dai *cpu_dai, int on)
+{
+ struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
+ u32 val;
+
+ spin_lock(&i2s->lock);
+ if (on) {
+ /* enable S2 TX */
+ val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
+ val |= HII2S_I2S_CFG__S2_IF_TX_EN;
+ hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
+ } else {
+ /* disable S2 TX */
+ val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
+ val &= ~HII2S_I2S_CFG__S2_IF_TX_EN;
+ hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
+ }
+ spin_unlock(&i2s->lock);
+}
+
+static void hi6210_i2s_rxctrl(struct snd_soc_dai *cpu_dai, int on)
+{
+ struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
+ u32 val;
+
+ spin_lock(&i2s->lock);
+ if (on) {
+ val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
+ val |= HII2S_I2S_CFG__S2_IF_RX_EN;
+ hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
+ } else {
+ val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
+ val &= ~HII2S_I2S_CFG__S2_IF_RX_EN;
+ hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
+ }
+ spin_unlock(&i2s->lock);
+}
+
+static int hi6210_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
+{
+ struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
+
+ /*
+ * We don't actually set the hardware until the hw_params
+ * call, but we need to validate the user input here.
+ */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_LEFT_J:
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i2s->format = fmt;
+ i2s->master = (i2s->format & SND_SOC_DAIFMT_MASTER_MASK) ==
+ SND_SOC_DAIFMT_CBS_CFS;
+
+ return 0;
+}
+
+static int hi6210_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
+ u32 bits = 0, rate = 0, signed_data = 0, fmt = 0;
+ u32 val;
+ struct snd_dmaengine_dai_dma_data *dma_data;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_U16_LE:
+ signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT;
+ /* fallthru */
+ case SNDRV_PCM_FORMAT_S16_LE:
+ bits = HII2S_BITS_16;
+ break;
+ case SNDRV_PCM_FORMAT_U24_LE:
+ signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT;
+ /* fallthru */
+ case SNDRV_PCM_FORMAT_S24_LE:
+ bits = HII2S_BITS_24;
+ break;
+ default:
+ dev_err(cpu_dai->dev, "Bad format\n");
+ return -EINVAL;
+ }
+
+
+ switch (params_rate(params)) {
+ case 8000:
+ rate = HII2S_FS_RATE_8KHZ;
+ break;
+ case 16000:
+ rate = HII2S_FS_RATE_16KHZ;
+ break;
+ case 32000:
+ rate = HII2S_FS_RATE_32KHZ;
+ break;
+ case 48000:
+ rate = HII2S_FS_RATE_48KHZ;
+ break;
+ case 96000:
+ rate = HII2S_FS_RATE_96KHZ;
+ break;
+ case 192000:
+ rate = HII2S_FS_RATE_192KHZ;
+ break;
+ default:
+ dev_err(cpu_dai->dev, "Bad rate: %d\n", params_rate(params));
+ return -EINVAL;
+ }
+
+ if (!(params_channels(params))) {
+ dev_err(cpu_dai->dev, "Bad channels\n");
+ return -EINVAL;
+ }
+
+ dma_data = snd_soc_dai_get_dma_data(cpu_dai, substream);
+
+ switch (bits) {
+ case HII2S_BITS_24:
+ i2s->bits = 32;
+ dma_data->addr_width = 3;
+ break;
+ default:
+ i2s->bits = 16;
+ dma_data->addr_width = 2;
+ }
+ i2s->rate = params_rate(params);
+ i2s->channels = params_channels(params);
+ i2s->channel_length = i2s->channels * i2s->bits;
+
+ val = hi6210_read_reg(i2s, HII2S_ST_DL_FIFO_TH_CFG);
+ val &= ~((HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AEMPTY_MASK <<
+ HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AEMPTY_SHIFT) |
+ (HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AFULL_MASK <<
+ HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AFULL_SHIFT) |
+ (HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AEMPTY_MASK <<
+ HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AEMPTY_SHIFT) |
+ (HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AFULL_MASK <<
+ HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AFULL_SHIFT));
+ val |= ((16 << HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AEMPTY_SHIFT) |
+ (30 << HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AFULL_SHIFT) |
+ (16 << HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AEMPTY_SHIFT) |
+ (30 << HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AFULL_SHIFT));
+ hi6210_write_reg(i2s, HII2S_ST_DL_FIFO_TH_CFG, val);
+
+
+ val = hi6210_read_reg(i2s, HII2S_IF_CLK_EN_CFG);
+ val |= (BIT(19) | BIT(18) | BIT(17) |
+ HII2S_IF_CLK_EN_CFG__S2_IF_CLK_EN |
+ HII2S_IF_CLK_EN_CFG__S2_OL_MIXER_EN |
+ HII2S_IF_CLK_EN_CFG__S2_OL_SRC_EN |
+ HII2S_IF_CLK_EN_CFG__ST_DL_R_EN |
+ HII2S_IF_CLK_EN_CFG__ST_DL_L_EN);
+ hi6210_write_reg(i2s, HII2S_IF_CLK_EN_CFG, val);
+
+
+ val = hi6210_read_reg(i2s, HII2S_DIG_FILTER_CLK_EN_CFG);
+ val &= ~(HII2S_DIG_FILTER_CLK_EN_CFG__DACR_SDM_EN |
+ HII2S_DIG_FILTER_CLK_EN_CFG__DACR_HBF2I_EN |
+ HII2S_DIG_FILTER_CLK_EN_CFG__DACR_AGC_EN |
+ HII2S_DIG_FILTER_CLK_EN_CFG__DACL_SDM_EN |
+ HII2S_DIG_FILTER_CLK_EN_CFG__DACL_HBF2I_EN |
+ HII2S_DIG_FILTER_CLK_EN_CFG__DACL_AGC_EN);
+ val |= (HII2S_DIG_FILTER_CLK_EN_CFG__DACR_MIXER_EN |
+ HII2S_DIG_FILTER_CLK_EN_CFG__DACL_MIXER_EN);
+ hi6210_write_reg(i2s, HII2S_DIG_FILTER_CLK_EN_CFG, val);
+
+
+ val = hi6210_read_reg(i2s, HII2S_DIG_FILTER_MODULE_CFG);
+ val &= ~(HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_IN2_MUTE |
+ HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_IN2_MUTE);
+ hi6210_write_reg(i2s, HII2S_DIG_FILTER_MODULE_CFG, val);
+
+ val = hi6210_read_reg(i2s, HII2S_MUX_TOP_MODULE_CFG);
+ val &= ~(HII2S_MUX_TOP_MODULE_CFG__S2_OL_MIXER_IN1_MUTE |
+ HII2S_MUX_TOP_MODULE_CFG__S2_OL_MIXER_IN2_MUTE |
+ HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_MIXER_IN1_MUTE |
+ HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_MIXER_IN2_MUTE);
+ hi6210_write_reg(i2s, HII2S_MUX_TOP_MODULE_CFG, val);
+
+
+ switch (i2s->format & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ i2s->master = false;
+ val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
+ val |= HII2S_I2S_CFG__S2_MST_SLV;
+ hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ i2s->master = true;
+ val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
+ val &= ~HII2S_I2S_CFG__S2_MST_SLV;
+ hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
+ break;
+ default:
+ WARN_ONCE(1, "Invalid i2s->fmt MASTER_MASK. This shouldn't happen\n");
+ }
+
+ switch (i2s->format & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ fmt = HII2S_FORMAT_I2S;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ fmt = HII2S_FORMAT_LEFT_JUST;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ fmt = HII2S_FORMAT_RIGHT_JUST;
+ break;
+ default:
+ WARN_ONCE(1, "Invalid i2s->fmt FORMAT_MASK. This shouldn't happen\n");
+ }
+
+ val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
+ val &= ~(HII2S_I2S_CFG__S2_FUNC_MODE_MASK <<
+ HII2S_I2S_CFG__S2_FUNC_MODE_SHIFT);
+ val |= fmt << HII2S_I2S_CFG__S2_FUNC_MODE_SHIFT;
+ hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
+
+
+ val = hi6210_read_reg(i2s, HII2S_CLK_SEL);
+ val &= ~(HII2S_CLK_SEL__I2S_BT_FM_SEL | /* BT gets the I2S */
+ HII2S_CLK_SEL__EXT_12_288MHZ_SEL);
+ hi6210_write_reg(i2s, HII2S_CLK_SEL, val);
+
+ dma_data->maxburst = 2;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dma_data->addr = i2s->base_phys + HII2S_ST_DL_CHANNEL;
+ else
+ dma_data->addr = i2s->base_phys + HII2S_STEREO_UPLINK_CHANNEL;
+
+ switch (i2s->channels) {
+ case 1:
+ val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
+ val |= HII2S_I2S_CFG__S2_FRAME_MODE;
+ hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
+ break;
+ default:
+ val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
+ val &= ~HII2S_I2S_CFG__S2_FRAME_MODE;
+ hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
+ }
+
+ /* clear loopback, set signed type and word length */
+ val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
+ val &= ~HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT;
+ val &= ~(HII2S_I2S_CFG__S2_CODEC_IO_WORDLENGTH_MASK <<
+ HII2S_I2S_CFG__S2_CODEC_IO_WORDLENGTH_SHIFT);
+ val &= ~(HII2S_I2S_CFG__S2_DIRECT_LOOP_MASK <<
+ HII2S_I2S_CFG__S2_DIRECT_LOOP_SHIFT);
+ val |= signed_data;
+ val |= (bits << HII2S_I2S_CFG__S2_CODEC_IO_WORDLENGTH_SHIFT);
+ hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
+
+
+ if (!i2s->master)
+ return 0;
+
+ /* set DAC and related units to correct rate */
+ val = hi6210_read_reg(i2s, HII2S_FS_CFG);
+ val &= ~(HII2S_FS_CFG__FS_S2_MASK << HII2S_FS_CFG__FS_S2_SHIFT);
+ val &= ~(HII2S_FS_CFG__FS_DACLR_MASK << HII2S_FS_CFG__FS_DACLR_SHIFT);
+ val &= ~(HII2S_FS_CFG__FS_ST_DL_R_MASK <<
+ HII2S_FS_CFG__FS_ST_DL_R_SHIFT);
+ val &= ~(HII2S_FS_CFG__FS_ST_DL_L_MASK <<
+ HII2S_FS_CFG__FS_ST_DL_L_SHIFT);
+ val |= (rate << HII2S_FS_CFG__FS_S2_SHIFT);
+ val |= (rate << HII2S_FS_CFG__FS_DACLR_SHIFT);
+ val |= (rate << HII2S_FS_CFG__FS_ST_DL_R_SHIFT);
+ val |= (rate << HII2S_FS_CFG__FS_ST_DL_L_SHIFT);
+ hi6210_write_reg(i2s, HII2S_FS_CFG, val);
+
+ return 0;
+}
+
+static int hi6210_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *cpu_dai)
+{
+ pr_debug("%s\n", __func__);
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ hi6210_i2s_rxctrl(cpu_dai, 1);
+ else
+ hi6210_i2s_txctrl(cpu_dai, 1);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ hi6210_i2s_rxctrl(cpu_dai, 0);
+ else
+ hi6210_i2s_txctrl(cpu_dai, 0);
+ break;
+ default:
+ dev_err(cpu_dai->dev, "uknown cmd\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int hi6210_i2s_dai_probe(struct snd_soc_dai *dai)
+{
+ struct hi6210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ snd_soc_dai_init_dma_data(dai,
+ &i2s->dma_data[SNDRV_PCM_STREAM_PLAYBACK],
+ &i2s->dma_data[SNDRV_PCM_STREAM_CAPTURE]);
+
+ return 0;
+}
+
+
+static struct snd_soc_dai_ops hi6210_i2s_dai_ops = {
+ .trigger = hi6210_i2s_trigger,
+ .hw_params = hi6210_i2s_hw_params,
+ .set_fmt = hi6210_i2s_set_fmt,
+ .startup = hi6210_i2s_startup,
+ .shutdown = hi6210_i2s_shutdown,
+};
+
+struct snd_soc_dai_driver hi6210_i2s_dai_init = {
+ .name = "hi6210_i2s",
+ .probe = hi6210_i2s_dai_probe,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_U16_LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ },
+ .capture = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_U16_LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ },
+ .ops = &hi6210_i2s_dai_ops,
+};
+
+static const struct snd_soc_component_driver hi6210_i2s_i2s_comp = {
+ .name = "hi6210_i2s-i2s",
+};
+
+static int hi6210_i2s_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct hi6210_i2s *i2s;
+ struct resource *res;
+ int ret;
+
+ i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL);
+ if (!i2s)
+ return -ENOMEM;
+
+ i2s->dev = dev;
+ spin_lock_init(&i2s->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2s->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(i2s->base))
+ return PTR_ERR(i2s->base);
+
+ i2s->base_phys = (phys_addr_t)res->start;
+ i2s->dai = hi6210_i2s_dai_init;
+ dev_set_drvdata(&pdev->dev, i2s);
+
+ i2s->sysctrl = syscon_regmap_lookup_by_phandle(node,
+ "hisilicon,sysctrl-syscon");
+ if (IS_ERR(i2s->sysctrl))
+ return PTR_ERR(i2s->sysctrl);
+
+ i2s->clk[CLK_DACODEC] = devm_clk_get(&pdev->dev, "dacodec");
+ if (IS_ERR_OR_NULL(i2s->clk[CLK_DACODEC]))
+ return PTR_ERR(i2s->clk[CLK_DACODEC]);
+ i2s->clocks++;
+
+ i2s->clk[CLK_I2S_BASE] = devm_clk_get(&pdev->dev, "i2s-base");
+ if (IS_ERR_OR_NULL(i2s->clk[CLK_I2S_BASE]))
+ return PTR_ERR(i2s->clk[CLK_I2S_BASE]);
+ i2s->clocks++;
+
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = snd_soc_register_component(&pdev->dev, &hi6210_i2s_i2s_comp,
+ &i2s->dai, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register dai\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hi6210_i2s_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_component(&pdev->dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id hi6210_i2s_dt_ids[] = {
+ { .compatible = "hisilicon,hi6210-i2s" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, hi6210_i2s_dt_ids);
+
+static struct platform_driver hi6210_i2s_driver = {
+ .probe = hi6210_i2s_probe,
+ .remove = hi6210_i2s_remove,
+ .driver = {
+ .name = "hi6210_i2s",
+ .of_match_table = hi6210_i2s_dt_ids,
+ },
+};
+
+module_platform_driver(hi6210_i2s_driver);
+
+MODULE_DESCRIPTION("Hisilicon HI6210 I2S driver");
+MODULE_AUTHOR("Andy Green <andy.green@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/hisilicon/hi6210-i2s.h b/sound/soc/hisilicon/hi6210-i2s.h
new file mode 100644
index 000000000000..85cecc4939a0
--- /dev/null
+++ b/sound/soc/hisilicon/hi6210-i2s.h
@@ -0,0 +1,276 @@
+/*
+ * linux/sound/soc/hisilicon/hi6210-i2s.h
+ *
+ * Copyright (C) 2015 Linaro, Ltd
+ * Author: Andy Green <andy.green@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Note at least on 6220, S2 == BT, S1 == Digital FM Radio IF
+ */
+
+#ifndef _HI6210_I2S_H
+#define _HI6210_I2S_H
+
+#define HII2S_SW_RST_N 0
+
+#define HII2S_SW_RST_N__STEREO_UPLINK_WORDLEN_SHIFT 28
+#define HII2S_SW_RST_N__STEREO_UPLINK_WORDLEN_MASK 3
+#define HII2S_SW_RST_N__THIRDMD_UPLINK_WORDLEN_SHIFT 26
+#define HII2S_SW_RST_N__THIRDMD_UPLINK_WORDLEN_MASK 3
+#define HII2S_SW_RST_N__VOICE_UPLINK_WORDLEN_SHIFT 24
+#define HII2S_SW_RST_N__VOICE_UPLINK_WORDLEN_MASK 3
+#define HII2S_SW_RST_N__ST_DL_WORDLEN_SHIFT 20
+#define HII2S_SW_RST_N__ST_DL_WORDLEN_MASK 3
+#define HII2S_SW_RST_N__THIRDMD_DLINK_WORDLEN_SHIFT 18
+#define HII2S_SW_RST_N__THIRDMD_DLINK_WORDLEN_MASK 3
+#define HII2S_SW_RST_N__VOICE_DLINK_WORDLEN_SHIFT 16
+#define HII2S_SW_RST_N__VOICE_DLINK_WORDLEN_MASK 3
+
+#define HII2S_SW_RST_N__SW_RST_N BIT(0)
+
+enum hi6210_bits {
+ HII2S_BITS_16,
+ HII2S_BITS_18,
+ HII2S_BITS_20,
+ HII2S_BITS_24,
+};
+
+
+#define HII2S_IF_CLK_EN_CFG 4
+
+#define HII2S_IF_CLK_EN_CFG__THIRDMD_UPLINK_EN BIT(25)
+#define HII2S_IF_CLK_EN_CFG__THIRDMD_DLINK_EN BIT(24)
+#define HII2S_IF_CLK_EN_CFG__S3_IF_CLK_EN BIT(20)
+#define HII2S_IF_CLK_EN_CFG__S2_IF_CLK_EN BIT(16)
+#define HII2S_IF_CLK_EN_CFG__S2_OL_MIXER_EN BIT(15)
+#define HII2S_IF_CLK_EN_CFG__S2_OL_SRC_EN BIT(14)
+#define HII2S_IF_CLK_EN_CFG__S2_IR_PGA_EN BIT(13)
+#define HII2S_IF_CLK_EN_CFG__S2_IL_PGA_EN BIT(12)
+#define HII2S_IF_CLK_EN_CFG__S1_IR_PGA_EN BIT(10)
+#define HII2S_IF_CLK_EN_CFG__S1_IL_PGA_EN BIT(9)
+#define HII2S_IF_CLK_EN_CFG__S1_IF_CLK_EN BIT(8)
+#define HII2S_IF_CLK_EN_CFG__VOICE_DLINK_SRC_EN BIT(7)
+#define HII2S_IF_CLK_EN_CFG__VOICE_DLINK_EN BIT(6)
+#define HII2S_IF_CLK_EN_CFG__ST_DL_R_EN BIT(5)
+#define HII2S_IF_CLK_EN_CFG__ST_DL_L_EN BIT(4)
+#define HII2S_IF_CLK_EN_CFG__VOICE_UPLINK_R_EN BIT(3)
+#define HII2S_IF_CLK_EN_CFG__VOICE_UPLINK_L_EN BIT(2)
+#define HII2S_IF_CLK_EN_CFG__STEREO_UPLINK_R_EN BIT(1)
+#define HII2S_IF_CLK_EN_CFG__STEREO_UPLINK_L_EN BIT(0)
+
+#define HII2S_DIG_FILTER_CLK_EN_CFG 8
+#define HII2S_DIG_FILTER_CLK_EN_CFG__DACR_SDM_EN BIT(30)
+#define HII2S_DIG_FILTER_CLK_EN_CFG__DACR_HBF2I_EN BIT(28)
+#define HII2S_DIG_FILTER_CLK_EN_CFG__DACR_MIXER_EN BIT(25)
+#define HII2S_DIG_FILTER_CLK_EN_CFG__DACR_AGC_EN BIT(24)
+#define HII2S_DIG_FILTER_CLK_EN_CFG__DACL_SDM_EN BIT(22)
+#define HII2S_DIG_FILTER_CLK_EN_CFG__DACL_HBF2I_EN BIT(20)
+#define HII2S_DIG_FILTER_CLK_EN_CFG__DACL_MIXER_EN BIT(17)
+#define HII2S_DIG_FILTER_CLK_EN_CFG__DACL_AGC_EN BIT(16)
+
+#define HII2S_FS_CFG 0xc
+
+#define HII2S_FS_CFG__FS_S2_SHIFT 28
+#define HII2S_FS_CFG__FS_S2_MASK 7
+#define HII2S_FS_CFG__FS_S1_SHIFT 24
+#define HII2S_FS_CFG__FS_S1_MASK 7
+#define HII2S_FS_CFG__FS_ADCLR_SHIFT 20
+#define HII2S_FS_CFG__FS_ADCLR_MASK 7
+#define HII2S_FS_CFG__FS_DACLR_SHIFT 16
+#define HII2S_FS_CFG__FS_DACLR_MASK 7
+#define HII2S_FS_CFG__FS_ST_DL_R_SHIFT 8
+#define HII2S_FS_CFG__FS_ST_DL_R_MASK 7
+#define HII2S_FS_CFG__FS_ST_DL_L_SHIFT 4
+#define HII2S_FS_CFG__FS_ST_DL_L_MASK 7
+#define HII2S_FS_CFG__FS_VOICE_DLINK_SHIFT 0
+#define HII2S_FS_CFG__FS_VOICE_DLINK_MASK 7
+
+enum hi6210_i2s_rates {
+ HII2S_FS_RATE_8KHZ = 0,
+ HII2S_FS_RATE_16KHZ = 1,
+ HII2S_FS_RATE_32KHZ = 2,
+ HII2S_FS_RATE_48KHZ = 4,
+ HII2S_FS_RATE_96KHZ = 5,
+ HII2S_FS_RATE_192KHZ = 6,
+};
+
+#define HII2S_I2S_CFG 0x10
+
+#define HII2S_I2S_CFG__S2_IF_TX_EN BIT(31)
+#define HII2S_I2S_CFG__S2_IF_RX_EN BIT(30)
+#define HII2S_I2S_CFG__S2_FRAME_MODE BIT(29)
+#define HII2S_I2S_CFG__S2_MST_SLV BIT(28)
+#define HII2S_I2S_CFG__S2_LRCK_MODE BIT(27)
+#define HII2S_I2S_CFG__S2_CHNNL_MODE BIT(26)
+#define HII2S_I2S_CFG__S2_CODEC_IO_WORDLENGTH_SHIFT 24
+#define HII2S_I2S_CFG__S2_CODEC_IO_WORDLENGTH_MASK 3
+#define HII2S_I2S_CFG__S2_DIRECT_LOOP_SHIFT 22
+#define HII2S_I2S_CFG__S2_DIRECT_LOOP_MASK 3
+#define HII2S_I2S_CFG__S2_TX_CLK_SEL BIT(21)
+#define HII2S_I2S_CFG__S2_RX_CLK_SEL BIT(20)
+#define HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT BIT(19)
+#define HII2S_I2S_CFG__S2_FUNC_MODE_SHIFT 16
+#define HII2S_I2S_CFG__S2_FUNC_MODE_MASK 7
+#define HII2S_I2S_CFG__S1_IF_TX_EN BIT(15)
+#define HII2S_I2S_CFG__S1_IF_RX_EN BIT(14)
+#define HII2S_I2S_CFG__S1_FRAME_MODE BIT(13)
+#define HII2S_I2S_CFG__S1_MST_SLV BIT(12)
+#define HII2S_I2S_CFG__S1_LRCK_MODE BIT(11)
+#define HII2S_I2S_CFG__S1_CHNNL_MODE BIT(10)
+#define HII2S_I2S_CFG__S1_CODEC_IO_WORDLENGTH_SHIFT 8
+#define HII2S_I2S_CFG__S1_CODEC_IO_WORDLENGTH_MASK 3
+#define HII2S_I2S_CFG__S1_DIRECT_LOOP_SHIFT 6
+#define HII2S_I2S_CFG__S1_DIRECT_LOOP_MASK 3
+#define HII2S_I2S_CFG__S1_TX_CLK_SEL BIT(5)
+#define HII2S_I2S_CFG__S1_RX_CLK_SEL BIT(4)
+#define HII2S_I2S_CFG__S1_CODEC_DATA_FORMAT BIT(3)
+#define HII2S_I2S_CFG__S1_FUNC_MODE_SHIFT 0
+#define HII2S_I2S_CFG__S1_FUNC_MODE_MASK 7
+
+enum hi6210_i2s_formats {
+ HII2S_FORMAT_I2S,
+ HII2S_FORMAT_PCM_STD,
+ HII2S_FORMAT_PCM_USER,
+ HII2S_FORMAT_LEFT_JUST,
+ HII2S_FORMAT_RIGHT_JUST,
+};
+
+#define HII2S_DIG_FILTER_MODULE_CFG 0x14
+
+#define HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_GAIN_SHIFT 28
+#define HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_GAIN_MASK 3
+#define HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_IN4_MUTE BIT(27)
+#define HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_IN3_MUTE BIT(26)
+#define HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_IN2_MUTE BIT(25)
+#define HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_IN1_MUTE BIT(24)
+#define HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_GAIN_SHIFT 20
+#define HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_GAIN_MASK 3
+#define HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_IN4_MUTE BIT(19)
+#define HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_IN3_MUTE BIT(18)
+#define HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_IN2_MUTE BIT(17)
+#define HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_IN1_MUTE BIT(16)
+#define HII2S_DIG_FILTER_MODULE_CFG__SW_DACR_SDM_DITHER BIT(9)
+#define HII2S_DIG_FILTER_MODULE_CFG__SW_DACL_SDM_DITHER BIT(8)
+#define HII2S_DIG_FILTER_MODULE_CFG__LM_CODEC_DAC2ADC_SHIFT 4
+#define HII2S_DIG_FILTER_MODULE_CFG__LM_CODEC_DAC2ADC_MASK 7
+#define HII2S_DIG_FILTER_MODULE_CFG__RM_CODEC_DAC2ADC_SHIFT 0
+#define HII2S_DIG_FILTER_MODULE_CFG__RM_CODEC_DAC2ADC_MASK 7
+
+enum hi6210_gains {
+ HII2S_GAIN_100PC,
+ HII2S_GAIN_50PC,
+ HII2S_GAIN_25PC,
+};
+
+#define HII2S_MUX_TOP_MODULE_CFG 0x18
+
+#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_MIXER_GAIN_SHIFT 14
+#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_MIXER_GAIN_MASK 3
+#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_MIXER_IN2_MUTE BIT(13)
+#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_MIXER_IN1_MUTE BIT(12)
+#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_MIXER_GAIN_SHIFT 10
+#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_MIXER_GAIN_MASK 3
+#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_MIXER_IN2_MUTE BIT(9)
+#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_MIXER_IN1_MUTE BIT(8)
+#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_SRC_RDY BIT(6)
+#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_SRC_MODE_SHIFT 4
+#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_SRC_MODE_MASK 3
+#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_SRC_RDY BIT(3)
+#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_SRC_MODE_SHIFT 0
+#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_SRC_MODE_MASK 7
+
+enum hi6210_s2_src_mode {
+ HII2S_S2_SRC_MODE_3,
+ HII2S_S2_SRC_MODE_12,
+ HII2S_S2_SRC_MODE_6,
+ HII2S_S2_SRC_MODE_2,
+};
+
+enum hi6210_voice_dlink_src_mode {
+ HII2S_VOICE_DL_SRC_MODE_12 = 1,
+ HII2S_VOICE_DL_SRC_MODE_6,
+ HII2S_VOICE_DL_SRC_MODE_2,
+ HII2S_VOICE_DL_SRC_MODE_3,
+};
+
+#define HII2S_ADC_PGA_CFG 0x1c
+#define HII2S_S1_INPUT_PGA_CFG 0x20
+#define HII2S_S2_INPUT_PGA_CFG 0x24
+#define HII2S_ST_DL_PGA_CFG 0x28
+#define HII2S_VOICE_SIDETONE_DLINK_PGA_CFG 0x2c
+#define HII2S_APB_AFIFO_CFG_1 0x30
+#define HII2S_APB_AFIFO_CFG_2 0x34
+#define HII2S_ST_DL_FIFO_TH_CFG 0x38
+
+#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AEMPTY_SHIFT 24
+#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AEMPTY_MASK 0x1f
+#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AFULL_SHIFT 16
+#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AFULL_MASK 0x1f
+#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AEMPTY_SHIFT 8
+#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AEMPTY_MASK 0x1f
+#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AFULL_SHIFT 0
+#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AFULL_MASK 0x1f
+
+#define HII2S_STEREO_UPLINK_FIFO_TH_CFG 0x3c
+#define HII2S_VOICE_UPLINK_FIFO_TH_CFG 0x40
+#define HII2S_CODEC_IRQ_MASK 0x44
+#define HII2S_CODEC_IRQ 0x48
+#define HII2S_DACL_AGC_CFG_1 0x4c
+#define HII2S_DACL_AGC_CFG_2 0x50
+#define HII2S_DACR_AGC_CFG_1 0x54
+#define HII2S_DACR_AGC_CFG_2 0x58
+#define HII2S_DMIC_SIF_CFG 0x5c
+#define HII2S_MISC_CFG 0x60
+
+#define HII2S_MISC_CFG__THIRDMD_DLINK_TEST_SEL BIT(17)
+#define HII2S_MISC_CFG__THIRDMD_DLINK_DIN_SEL BIT(16)
+#define HII2S_MISC_CFG__S3_DOUT_RIGHT_SEL BIT(14)
+#define HII2S_MISC_CFG__S3_DOUT_LEFT_SEL BIT(13)
+#define HII2S_MISC_CFG__S3_DIN_TEST_SEL BIT(12)
+#define HII2S_MISC_CFG__VOICE_DLINK_SRC_UP_DOUT_VLD_SEL BIT(8)
+#define HII2S_MISC_CFG__VOICE_DLINK_TEST_SEL BIT(7)
+#define HII2S_MISC_CFG__VOICE_DLINK_DIN_SEL BIT(6)
+#define HII2S_MISC_CFG__ST_DL_TEST_SEL BIT(4)
+#define HII2S_MISC_CFG__S2_DOUT_RIGHT_SEL BIT(3)
+#define HII2S_MISC_CFG__S2_DOUT_TEST_SEL BIT(2)
+#define HII2S_MISC_CFG__S1_DOUT_TEST_SEL BIT(1)
+#define HII2S_MISC_CFG__S2_DOUT_LEFT_SEL BIT(0)
+
+#define HII2S_S2_SRC_CFG 0x64
+#define HII2S_MEM_CFG 0x68
+#define HII2S_THIRDMD_PCM_PGA_CFG 0x6c
+#define HII2S_THIRD_MODEM_FIFO_TH 0x70
+#define HII2S_S3_ANTI_FREQ_JITTER_TX_INC_CNT 0x74
+#define HII2S_S3_ANTI_FREQ_JITTER_TX_DEC_CNT 0x78
+#define HII2S_S3_ANTI_FREQ_JITTER_RX_INC_CNT 0x7c
+#define HII2S_S3_ANTI_FREQ_JITTER_RX_DEC_CNT 0x80
+#define HII2S_ANTI_FREQ_JITTER_EN 0x84
+#define HII2S_CLK_SEL 0x88
+
+/* 0 = BT owns the i2s */
+#define HII2S_CLK_SEL__I2S_BT_FM_SEL BIT(0)
+/* 0 = internal source, 1 = ext */
+#define HII2S_CLK_SEL__EXT_12_288MHZ_SEL BIT(1)
+
+
+#define HII2S_THIRDMD_DLINK_CHANNEL 0xe8
+#define HII2S_THIRDMD_ULINK_CHANNEL 0xec
+#define HII2S_VOICE_DLINK_CHANNEL 0xf0
+
+/* shovel data in here for playback */
+#define HII2S_ST_DL_CHANNEL 0xf4
+#define HII2S_STEREO_UPLINK_CHANNEL 0xf8
+#define HII2S_VOICE_UPLINK_CHANNEL 0xfc
+
+#endif/* _HI6210_I2S_H */
diff --git a/sound/soc/hisilicon/hisi-i2s.c b/sound/soc/hisilicon/hisi-i2s.c
new file mode 100644
index 000000000000..4c74a6cca99c
--- /dev/null
+++ b/sound/soc/hisilicon/hisi-i2s.c
@@ -0,0 +1,435 @@
+/*
+ * linux/sound/soc/m8m/hisi_i2s.c - I2S IP driver
+ *
+ * Copyright (C) 2015 Linaro, Ltd
+ * Author: Andy Green <andy.green@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver only deals with S2 interface (BT)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/reset-controller.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "hisi-i2s.h"
+
+struct hisi_i2s {
+ struct device *dev;
+ struct reset_control *rc;
+ int clocks;
+ struct regulator *regu_asp;
+ struct pinctrl *pctrl;
+ struct pinctrl_state *pin_default;
+ struct pinctrl_state *pin_idle;
+ struct clk *asp_subsys_clk;
+ struct snd_soc_dai_driver dai;
+ void __iomem *base;
+ void __iomem *base_syscon;
+ phys_addr_t base_phys;
+ struct snd_dmaengine_dai_dma_data dma_data[2];
+ spinlock_t lock;
+ int rate;
+ int format;
+ int bits;
+ int channels;
+ u32 master;
+ u32 status;
+};
+
+static void hisi_bits(struct hisi_i2s *i2s, u32 ofs, u32 reset, u32 set)
+{
+ u32 val = readl(i2s->base + ofs) & ~reset;
+
+ writel(val | set, i2s->base + ofs);
+}
+
+static void hisi_syscon_bits(struct hisi_i2s *i2s, u32 ofs, u32 reset, u32 set)
+{
+ u32 val = readl(i2s->base_syscon + ofs) & ~reset;
+
+ writel(val | set, i2s->base_syscon + ofs);
+}
+
+static int _hisi_i2s_set_fmt(struct hisi_i2s *i2s,
+ struct snd_pcm_substream *substream)
+{
+ switch (i2s->format & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ i2s->master = false;
+ hisi_syscon_bits(i2s, HI_ASP_CFG_R_CLK_SEL_REG, 0, HI_ASP_CFG_R_CLK_SEL_EN);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ i2s->master = true;
+ hisi_syscon_bits(i2s, HI_ASP_CFG_R_CLK_SEL_REG, HI_ASP_CFG_R_CLK_SEL_EN,0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hisi_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct hisi_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
+
+ /* deassert reset on sio_bt*/
+ hisi_syscon_bits(i2s, HI_ASP_CFG_R_RST_CTRLDIS_REG, 0,BIT(2)|BIT(6)|BIT(8)|BIT(16));
+
+ /* enable clk before frequency division */
+ hisi_syscon_bits(i2s, HI_ASP_CFG_R_GATE_EN_REG, 0,BIT(5)|BIT(6));
+
+ /* enable frequency division */
+ hisi_syscon_bits(i2s, HI_ASP_CFG_R_GATE_CLKDIV_EN_REG, 0,BIT(2)|BIT(5));
+
+ /* select clk */
+ hisi_syscon_bits(i2s, HI_ASP_CFG_R_CLK_SEL_REG, HI_ASP_MASK,HI_ASP_CFG_R_CLK_SEL);
+
+ /* select clk_div */
+ hisi_syscon_bits(i2s, HI_ASP_CFG_R_CLK1_DIV_REG, HI_ASP_MASK,HI_ASP_CFG_R_CLK1_DIV_SEL);
+ hisi_syscon_bits(i2s, HI_ASP_CFG_R_CLK4_DIV_REG, HI_ASP_MASK,HI_ASP_CFG_R_CLK4_DIV_SEL);
+ hisi_syscon_bits(i2s, HI_ASP_CFG_R_CLK6_DIV_REG, HI_ASP_MASK,HI_ASP_CFG_R_CLK6_DIV_SEL);
+
+ /* sio config */
+ hisi_bits(i2s, HI_ASP_SIO_MODE_REG, HI_ASP_MASK, 0x0);
+ hisi_bits(i2s, HI_ASP_SIO_DATA_WIDTH_SET_REG, HI_ASP_MASK, 0x9);
+ hisi_bits(i2s, HI_ASP_SIO_I2S_POS_MERGE_EN_REG, HI_ASP_MASK, 0x1);
+ hisi_bits(i2s, HI_ASP_SIO_I2S_START_POS_REG, HI_ASP_MASK, 0x0);
+
+ return 0;
+}
+void hisi_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct hisi_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
+
+ if (!IS_ERR_OR_NULL(i2s->asp_subsys_clk)) {
+ clk_disable_unprepare(i2s->asp_subsys_clk);
+ }
+
+}
+
+static void hisi_i2s_txctrl(struct snd_soc_dai *cpu_dai, int on)
+{
+ struct hisi_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
+
+ spin_lock(&i2s->lock);
+
+ if (on) {
+ /* enable SIO TX */
+ hisi_bits(i2s, HI_ASP_SIO_CT_SET_REG, 0,
+ HI_ASP_SIO_TX_ENABLE | HI_ASP_SIO_TX_DATA_MERGE | HI_ASP_SIO_TX_FIFO_THRESHOLD |
+ HI_ASP_SIO_RX_ENABLE |HI_ASP_SIO_RX_DATA_MERGE | HI_ASP_SIO_RX_FIFO_THRESHOLD);
+ } else
+ /* disable SIO TX */
+ hisi_bits(i2s, HI_ASP_SIO_CT_CLR_REG, 0, HI_ASP_SIO_TX_ENABLE |HI_ASP_SIO_RX_ENABLE);
+ spin_unlock(&i2s->lock);
+}
+
+static void hisi_i2s_rxctrl(struct snd_soc_dai *cpu_dai, int on)
+{
+ struct hisi_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
+
+ spin_lock(&i2s->lock);
+ if (on)
+ /* enable SIO RX */
+ hisi_bits(i2s, HI_ASP_SIO_CT_SET_REG, 0,
+ HI_ASP_SIO_TX_ENABLE | HI_ASP_SIO_TX_DATA_MERGE | HI_ASP_SIO_TX_FIFO_THRESHOLD |
+ HI_ASP_SIO_RX_ENABLE |HI_ASP_SIO_RX_DATA_MERGE | HI_ASP_SIO_RX_FIFO_THRESHOLD);
+ else
+ /* disable SIO RX */
+ hisi_bits(i2s, HI_ASP_SIO_CT_CLR_REG,0, HI_ASP_SIO_TX_ENABLE |HI_ASP_SIO_RX_ENABLE);
+ spin_unlock(&i2s->lock);
+}
+
+static int hisi_i2s_set_sysclk(struct snd_soc_dai *cpu_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ return 0;
+}
+
+static int hisi_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
+{
+ struct hisi_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
+
+ i2s->format = fmt;
+ i2s->master = (i2s->format & SND_SOC_DAIFMT_MASTER_MASK) ==
+ SND_SOC_DAIFMT_CBS_CFS;
+
+ return 0;
+}
+
+static int hisi_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct hisi_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
+ struct snd_dmaengine_dai_dma_data *dma_data;
+
+ dma_data = snd_soc_dai_get_dma_data(cpu_dai, substream);
+
+ _hisi_i2s_set_fmt(i2s, substream);
+
+ dma_data->maxburst = 4;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dma_data->addr = i2s->base_phys + HI_ASP_SIO_I2S_DUAL_TX_CHN_REG;
+ else
+ dma_data->addr = i2s->base_phys + HI_ASP_SIO_I2S_DUAL_RX_CHN_REG;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_U16_LE:
+ case SNDRV_PCM_FORMAT_S16_LE:
+ i2s->bits = 16;
+ dma_data->addr_width = 4;
+ break;
+
+ case SNDRV_PCM_FORMAT_U24_LE:
+ case SNDRV_PCM_FORMAT_S24_LE:
+ i2s->bits = 32;
+ dma_data->addr_width = 4;
+ break;
+ default:
+ dev_err(cpu_dai->dev, "Bad format\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hisi_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *cpu_dai)
+{
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ hisi_i2s_rxctrl(cpu_dai, 1);
+ else
+ hisi_i2s_txctrl(cpu_dai, 1);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ hisi_i2s_rxctrl(cpu_dai, 0);
+ else
+ hisi_i2s_txctrl(cpu_dai, 0);
+ break;
+ default:
+ dev_err(cpu_dai->dev, "uknown cmd\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hisi_i2s_dai_probe(struct snd_soc_dai *dai)
+{
+ struct hisi_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ snd_soc_dai_init_dma_data(dai,
+ &i2s->dma_data[SNDRV_PCM_STREAM_PLAYBACK],
+ &i2s->dma_data[SNDRV_PCM_STREAM_CAPTURE]);
+
+ return 0;
+}
+
+
+static struct snd_soc_dai_ops hisi_i2s_dai_ops = {
+ .trigger = hisi_i2s_trigger,
+ .hw_params = hisi_i2s_hw_params,
+ .set_fmt = hisi_i2s_set_fmt,
+ .set_sysclk = hisi_i2s_set_sysclk,
+ .startup = hisi_i2s_startup,
+ .shutdown = hisi_i2s_shutdown,
+};
+
+struct snd_soc_dai_driver hisi_i2s_dai_init = {
+ .name = "hisi_i2s",
+ .probe = hisi_i2s_dai_probe,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_U16_LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ },
+ .capture = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_U16_LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ },
+ .ops = &hisi_i2s_dai_ops,
+};
+
+static const struct snd_soc_component_driver hisi_i2s_i2s_comp = {
+ .name = "hisi_i2s-i2s",
+};
+
+#include <sound/dmaengine_pcm.h>
+
+static const struct snd_pcm_hardware snd_hisi_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_HALF_DUPLEX,
+ .period_bytes_min = 4096,
+ .period_bytes_max = 4096,
+ .periods_min = 4,
+ .periods_max = UINT_MAX,
+ .buffer_bytes_max = SIZE_MAX,
+};
+
+static const struct snd_dmaengine_pcm_config hisi_dmaengine_pcm_config = {
+ .pcm_hardware = &snd_hisi_hardware,
+ .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
+ .prealloc_buffer_size = 64 * 1024,
+};
+
+static int hisi_i2s_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_i2s *i2s;
+ struct resource *res;
+ int ret;
+
+ i2s = devm_kzalloc(dev,sizeof(*i2s), GFP_KERNEL);
+ if (!i2s)
+ return -ENOMEM;
+
+ i2s->dev = dev;
+ spin_lock_init(&i2s->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ return ret;
+ }
+ i2s->base_phys = (phys_addr_t)res->start;
+
+ i2s->dai = hisi_i2s_dai_init;
+ dev_set_drvdata(&pdev->dev, i2s);
+
+ i2s->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(i2s->base)) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = PTR_ERR(i2s->base);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ ret = -ENODEV;
+ return ret;
+ }
+ i2s->base_syscon = devm_ioremap(dev, res->start, resource_size(res));
+ if (IS_ERR(i2s->base_syscon)) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = PTR_ERR(i2s->base_syscon);
+ return ret;
+ }
+
+ /* i2s iomux config */
+ i2s->pctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(i2s->pctrl)) {
+ dev_err(dev, "could not get pinctrl\n");
+ ret = -EIO;
+ return ret;
+ }
+
+ i2s->pin_default = pinctrl_lookup_state(i2s->pctrl, PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(i2s->pin_default)) {
+ dev_err(dev, "could not get default state (%li)\n" , PTR_ERR(i2s->pin_default));
+ ret = -EIO;
+ return ret;
+ }
+
+ if (pinctrl_select_state(i2s->pctrl, i2s->pin_default)) {
+ dev_err(dev, "could not set pins to default state\n");
+ ret = -EIO;
+ return ret;
+ }
+
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev,
+ &hisi_dmaengine_pcm_config,
+ 0);
+ if (ret)
+ return ret;
+
+ ret = snd_soc_register_component(&pdev->dev, &hisi_i2s_i2s_comp,
+ &i2s->dai, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register dai\n");
+ return ret;;
+ }
+ dev_info(&pdev->dev, "Registered as %s\n", i2s->dai.name);
+
+ return 0;
+}
+
+static int hisi_i2s_remove(struct platform_device *pdev)
+{
+ struct hisi_i2s *i2s = dev_get_drvdata(&pdev->dev);
+
+ snd_soc_unregister_component(&pdev->dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ pinctrl_put(i2s->pctrl);
+
+ return 0;
+}
+
+static const struct of_device_id hisi_i2s_dt_ids[] = {
+ { .compatible = "hisilicon,hisi-i2s" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, hisi_i2s_dt_ids);
+
+static struct platform_driver hisi_i2s_driver = {
+ .probe = hisi_i2s_probe,
+ .remove = hisi_i2s_remove,
+ .driver = {
+ .name = "hisi_i2s",
+ .owner = THIS_MODULE,
+ .of_match_table = hisi_i2s_dt_ids,
+ },
+};
+
+module_platform_driver(hisi_i2s_driver);
+
+MODULE_DESCRIPTION("Hisilicon I2S driver");
+MODULE_AUTHOR("Andy Green <andy.green@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/hisilicon/hisi-i2s.h b/sound/soc/hisilicon/hisi-i2s.h
new file mode 100644
index 000000000000..7dc080113f11
--- /dev/null
+++ b/sound/soc/hisilicon/hisi-i2s.h
@@ -0,0 +1,109 @@
+/*
+ * linux/sound/soc/hisilicon/hisi-i2s.h
+ *
+ * Copyright (C) 2015 Linaro, Ltd
+ * Author: Andy Green <andy.green@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef _HISI_I2S_H
+#define _HISI_I2S_H
+
+ enum hisi_bits {
+ HII2S_BITS_16,
+ HII2S_BITS_18,
+ HII2S_BITS_20,
+ HII2S_BITS_24,
+};
+
+enum hisi_i2s_rates {
+ HII2S_FS_RATE_8KHZ = 0,
+ HII2S_FS_RATE_16KHZ = 1,
+ HII2S_FS_RATE_32KHZ = 2,
+ HII2S_FS_RATE_48KHZ = 4,
+ HII2S_FS_RATE_96KHZ = 5,
+ HII2S_FS_RATE_192KHZ = 6,
+};
+
+#define HI_ASP_CFG_R_RST_CTRLEN_REG 0x0
+#define HI_ASP_CFG_R_RST_CTRLDIS_REG 0x4
+#define HI_ASP_CFG_R_GATE_EN_REG 0xC
+#define HI_ASP_CFG_R_GATE_DIS_REG 0x10
+#define HI_ASP_CFG_R_GATE_CLKEN_REG 0x14
+#define HI_ASP_CFG_R_GATE_CLKSTAT_REG 0x18
+#define HI_ASP_CFG_R_GATE_CLKDIV_EN_REG 0x1C
+#define HI_ASP_CFG_R_CLK1_DIV_REG 0x20
+#define HI_ASP_CFG_R_CLK2_DIV_REG 0x24
+#define HI_ASP_CFG_R_CLK3_DIV_REG 0x28
+#define HI_ASP_CFG_R_CLK4_DIV_REG 0x2C
+#define HI_ASP_CFG_R_CLK5_DIV_REG 0x30
+#define HI_ASP_CFG_R_CLK6_DIV_REG 0x34
+#define HI_ASP_CFG_R_CLK_SEL_REG 0x38
+#define HI_ASP_CFG_R_SEC_REG 0x100
+
+
+#define HI_ASP_SIO_VERSION_REG (0x3C)
+#define HI_ASP_SIO_MODE_REG (0x40)
+#define HI_ASP_SIO_INTSTATUS_REG (0x44)
+#define HI_ASP_SIO_INTCLR_REG (0x48)
+#define HI_ASP_SIO_I2S_LEFT_XD_REG (0x4C)
+#define HI_ASP_SIO_I2S_RIGHT_XD_REG (0x50)
+#define HI_ASP_SIO_I2S_LEFT_RD_REG (0x54)
+#define HI_ASP_SIO_I2S_RIGHT_RD_REG (0x58)
+#define HI_ASP_SIO_CT_SET_REG (0x5C)
+#define HI_ASP_SIO_CT_CLR_REG (0x60)
+#define HI_ASP_SIO_RX_STA_REG (0x68)
+#define HI_ASP_SIO_TX_STA_REG (0x6C)
+#define HI_ASP_SIO_DATA_WIDTH_SET_REG (0x78)
+#define HI_ASP_SIO_I2S_START_POS_REG (0x7C)
+#define HI_ASP_SIO_I2S_POS_FLAG_REG (0x80)
+#define HI_ASP_SIO_SIGNED_EXT_REG (0x84)
+#define HI_ASP_SIO_I2S_POS_MERGE_EN_REG (0x88)
+#define HI_ASP_SIO_INTMASK_REG (0x8C)
+#define HI_ASP_SIO_I2S_DUAL_RX_CHN_REG (0xA0)
+#define HI_ASP_SIO_I2S_DUAL_TX_CHN_REG (0xC0)
+
+
+#define HI_ASP_CFG_R_CLK_SEL_EN BIT(2)
+#define HI_ASP_CFG_R_CLK_SEL 0x140010
+#define HI_ASP_CFG_R_CLK1_DIV_SEL 0xbcdc9a
+#define HI_ASP_CFG_R_CLK4_DIV_SEL 0x00ff000f
+#define HI_ASP_CFG_R_CLK6_DIV_SEL 0x00ff003f
+#define HI_ASP_CFG_SIO_MODE 0
+#define HI_ASP_SIO_MODE_SEL_EN BIT(0)
+#define HI_ASP_MASK 0xffffffff
+
+#define HI_ASP_SIO_RX_ENABLE BIT(13)
+#define HI_ASP_SIO_TX_ENABLE BIT(12)
+#define HI_ASP_SIO_RX_FIFO_DISABLE BIT(11)
+#define HI_ASP_SIO_TX_FIFO_DISABLE BIT(10)
+#define HI_ASP_SIO_RX_DATA_MERGE BIT(9)
+#define HI_ASP_SIO_TX_DATA_MERGE BIT(8)
+#define HI_ASP_SIO_RX_FIFO_THRESHOLD (0x5 << 4)
+#define HI_ASP_SIO_TX_FIFO_THRESHOLD (0xB << 0)
+#define HI_ASP_SIO_RX_FIFO_THRESHOLD_CLR (0xF << 4)
+#define HI_ASP_SIO_TX_FIFO_THRESHOLD_CLR (0xF << 0)
+#define HI_ASP_SIO_BURST (0x4)
+
+
+enum hisi_i2s_formats {
+ HII2S_FORMAT_I2S,
+ HII2S_FORMAT_PCM_STD,
+ HII2S_FORMAT_PCM_USER,
+ HII2S_FORMAT_LEFT_JUST,
+ HII2S_FORMAT_RIGHT_JUST,
+};
+
+#endif/* _HISI_I2S_H */